1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38
39 #include "lpfc_version.h"
40 #include "lpfc_hw4.h"
41 #include "lpfc_hw.h"
42 #include "lpfc_sli.h"
43 #include "lpfc_sli4.h"
44 #include "lpfc_nl.h"
45 #include "lpfc_disc.h"
46 #include "lpfc.h"
47 #include "lpfc_nvme.h"
48 #include "lpfc_scsi.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52 #include "lpfc_debugfs.h"
53
54 /* NVME initiator-based functions */
55
56 static struct lpfc_io_buf *
57 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
58 int idx, int expedite);
59
60 static void
61 lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
62
63 static struct nvme_fc_port_template lpfc_nvme_template;
64
65 /**
66 * lpfc_nvme_create_queue -
67 * @pnvme_lport: Transport localport that LS is to be issued from
68 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
69 * @qsize: Size of the queue in bytes
70 * @handle: An opaque driver handle used in follow-up calls.
71 *
72 * Driver registers this routine to preallocate and initialize any
73 * internal data structures to bind the @qidx to its internal IO queues.
74 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
75 *
76 * Return value :
77 * 0 - Success
78 * -EINVAL - Unsupported input value.
79 * -ENOMEM - Could not alloc necessary memory
80 **/
81 static int
lpfc_nvme_create_queue(struct nvme_fc_local_port * pnvme_lport,unsigned int qidx,u16 qsize,void ** handle)82 lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
83 unsigned int qidx, u16 qsize,
84 void **handle)
85 {
86 struct lpfc_nvme_lport *lport;
87 struct lpfc_vport *vport;
88 struct lpfc_nvme_qhandle *qhandle;
89 char *str;
90
91 if (!pnvme_lport->private)
92 return -ENOMEM;
93
94 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
95 vport = lport->vport;
96 qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
97 if (qhandle == NULL)
98 return -ENOMEM;
99
100 qhandle->cpu_id = raw_smp_processor_id();
101 qhandle->qidx = qidx;
102 /*
103 * NVME qidx == 0 is the admin queue, so both admin queue
104 * and first IO queue will use MSI-X vector and associated
105 * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
106 */
107 if (qidx) {
108 str = "IO "; /* IO queue */
109 qhandle->index = ((qidx - 1) %
110 lpfc_nvme_template.max_hw_queues);
111 } else {
112 str = "ADM"; /* Admin queue */
113 qhandle->index = qidx;
114 }
115
116 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
117 "6073 Binding %s HdwQueue %d (cpu %d) to "
118 "hdw_queue %d qhandle x%px\n", str,
119 qidx, qhandle->cpu_id, qhandle->index, qhandle);
120 *handle = (void *)qhandle;
121 return 0;
122 }
123
124 /**
125 * lpfc_nvme_delete_queue -
126 * @pnvme_lport: Transport localport that LS is to be issued from
127 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
128 * @handle: An opaque driver handle from lpfc_nvme_create_queue
129 *
130 * Driver registers this routine to free
131 * any internal data structures to bind the @qidx to its internal
132 * IO queues.
133 *
134 * Return value :
135 * 0 - Success
136 * TODO: What are the failure codes.
137 **/
138 static void
lpfc_nvme_delete_queue(struct nvme_fc_local_port * pnvme_lport,unsigned int qidx,void * handle)139 lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
140 unsigned int qidx,
141 void *handle)
142 {
143 struct lpfc_nvme_lport *lport;
144 struct lpfc_vport *vport;
145
146 if (!pnvme_lport->private)
147 return;
148
149 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
150 vport = lport->vport;
151
152 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
153 "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
154 lport, qidx, handle);
155 kfree(handle);
156 }
157
158 static void
lpfc_nvme_localport_delete(struct nvme_fc_local_port * localport)159 lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
160 {
161 struct lpfc_nvme_lport *lport = localport->private;
162
163 lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
164 "6173 localport x%px delete complete\n",
165 lport);
166
167 /* release any threads waiting for the unreg to complete */
168 if (lport->vport->localport)
169 complete(lport->lport_unreg_cmp);
170 }
171
172 /* lpfc_nvme_remoteport_delete
173 *
174 * @remoteport: Pointer to an nvme transport remoteport instance.
175 *
176 * This is a template downcall. NVME transport calls this function
177 * when it has completed the unregistration of a previously
178 * registered remoteport.
179 *
180 * Return value :
181 * None
182 */
183 static void
lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port * remoteport)184 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
185 {
186 struct lpfc_nvme_rport *rport = remoteport->private;
187 struct lpfc_vport *vport;
188 struct lpfc_nodelist *ndlp;
189 u32 fc4_xpt_flags;
190
191 ndlp = rport->ndlp;
192 if (!ndlp) {
193 pr_err("**** %s: NULL ndlp on rport x%px remoteport x%px\n",
194 __func__, rport, remoteport);
195 goto rport_err;
196 }
197
198 vport = ndlp->vport;
199 if (!vport) {
200 pr_err("**** %s: Null vport on ndlp x%px, ste x%x rport x%px\n",
201 __func__, ndlp, ndlp->nlp_state, rport);
202 goto rport_err;
203 }
204
205 fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD;
206
207 /* Remove this rport from the lport's list - memory is owned by the
208 * transport. Remove the ndlp reference for the NVME transport before
209 * calling state machine to remove the node.
210 */
211 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
212 "6146 remoteport delete of remoteport x%px, ndlp x%px "
213 "DID x%x xflags x%x\n",
214 remoteport, ndlp, ndlp->nlp_DID, ndlp->fc4_xpt_flags);
215 spin_lock_irq(&ndlp->lock);
216
217 /* The register rebind might have occurred before the delete
218 * downcall. Guard against this race.
219 */
220 if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT)
221 ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD);
222
223 spin_unlock_irq(&ndlp->lock);
224
225 /* On a devloss timeout event, one more put is executed provided the
226 * NVME and SCSI rport unregister requests are complete. If the vport
227 * is unloading, this extra put is executed by lpfc_drop_node.
228 */
229 if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
230 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
231
232 rport_err:
233 return;
234 }
235
236 /**
237 * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request
238 * @phba: pointer to lpfc hba data structure.
239 * @axchg: pointer to exchange context for the NVME LS request
240 *
241 * This routine is used for processing an asychronously received NVME LS
242 * request. Any remaining validation is done and the LS is then forwarded
243 * to the nvme-fc transport via nvme_fc_rcv_ls_req().
244 *
245 * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing)
246 * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done.
247 * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
248 *
249 * Returns 0 if LS was handled and delivered to the transport
250 * Returns 1 if LS failed to be handled and should be dropped
251 */
252 int
lpfc_nvme_handle_lsreq(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * axchg)253 lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
254 struct lpfc_async_xchg_ctx *axchg)
255 {
256 #if (IS_ENABLED(CONFIG_NVME_FC))
257 struct lpfc_vport *vport;
258 struct lpfc_nvme_rport *lpfc_rport;
259 struct nvme_fc_remote_port *remoteport;
260 struct lpfc_nvme_lport *lport;
261 uint32_t *payload = axchg->payload;
262 int rc;
263
264 vport = axchg->ndlp->vport;
265 lpfc_rport = axchg->ndlp->nrport;
266 if (!lpfc_rport)
267 return -EINVAL;
268
269 remoteport = lpfc_rport->remoteport;
270 if (!vport->localport)
271 return -EINVAL;
272
273 lport = vport->localport->private;
274 if (!lport)
275 return -EINVAL;
276
277 rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload,
278 axchg->size);
279
280 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
281 "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x "
282 "%08x %08x %08x\n",
283 axchg->size, rc,
284 *payload, *(payload+1), *(payload+2),
285 *(payload+3), *(payload+4), *(payload+5));
286
287 if (!rc)
288 return 0;
289 #endif
290 return 1;
291 }
292
293 /**
294 * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME
295 * LS request.
296 * @phba: Pointer to HBA context object
297 * @vport: The local port that issued the LS
298 * @cmdwqe: Pointer to driver command WQE object.
299 * @wcqe: Pointer to driver response CQE object.
300 *
301 * This function is the generic completion handler for NVME LS requests.
302 * The function updates any states and statistics, calls the transport
303 * ls_req done() routine, then tears down the command and buffers used
304 * for the LS request.
305 **/
306 void
__lpfc_nvme_ls_req_cmp(struct lpfc_hba * phba,struct lpfc_vport * vport,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe)307 __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
308 struct lpfc_iocbq *cmdwqe,
309 struct lpfc_wcqe_complete *wcqe)
310 {
311 struct nvmefc_ls_req *pnvme_lsreq;
312 struct lpfc_dmabuf *buf_ptr;
313 struct lpfc_nodelist *ndlp;
314 uint32_t status;
315
316 pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
317 ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
318 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
319
320 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
321 "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x "
322 "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px "
323 "ndlp:x%px\n",
324 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
325 cmdwqe->sli4_xritag, status,
326 (wcqe->parameter & 0xffff),
327 cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
328
329 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
330 cmdwqe->sli4_xritag, status, wcqe->parameter);
331
332 if (cmdwqe->context3) {
333 buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
334 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
335 kfree(buf_ptr);
336 cmdwqe->context3 = NULL;
337 }
338 if (pnvme_lsreq->done)
339 pnvme_lsreq->done(pnvme_lsreq, status);
340 else
341 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
342 "6046 NVMEx cmpl without done call back? "
343 "Data x%px DID %x Xri: %x status %x\n",
344 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
345 cmdwqe->sli4_xritag, status);
346 if (ndlp) {
347 lpfc_nlp_put(ndlp);
348 cmdwqe->context1 = NULL;
349 }
350 lpfc_sli_release_iocbq(phba, cmdwqe);
351 }
352
353 static void
lpfc_nvme_ls_req_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe)354 lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
355 struct lpfc_wcqe_complete *wcqe)
356 {
357 struct lpfc_vport *vport = cmdwqe->vport;
358 struct lpfc_nvme_lport *lport;
359 uint32_t status;
360
361 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
362
363 if (vport->localport) {
364 lport = (struct lpfc_nvme_lport *)vport->localport->private;
365 if (lport) {
366 atomic_inc(&lport->fc4NvmeLsCmpls);
367 if (status) {
368 if (bf_get(lpfc_wcqe_c_xb, wcqe))
369 atomic_inc(&lport->cmpl_ls_xb);
370 atomic_inc(&lport->cmpl_ls_err);
371 }
372 }
373 }
374
375 __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe);
376 }
377
378 static int
lpfc_nvme_gen_req(struct lpfc_vport * vport,struct lpfc_dmabuf * bmp,struct lpfc_dmabuf * inp,struct nvmefc_ls_req * pnvme_lsreq,void (* cmpl)(struct lpfc_hba *,struct lpfc_iocbq *,struct lpfc_wcqe_complete *),struct lpfc_nodelist * ndlp,uint32_t num_entry,uint32_t tmo,uint8_t retry)379 lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
380 struct lpfc_dmabuf *inp,
381 struct nvmefc_ls_req *pnvme_lsreq,
382 void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
383 struct lpfc_wcqe_complete *),
384 struct lpfc_nodelist *ndlp, uint32_t num_entry,
385 uint32_t tmo, uint8_t retry)
386 {
387 struct lpfc_hba *phba = vport->phba;
388 union lpfc_wqe128 *wqe;
389 struct lpfc_iocbq *genwqe;
390 struct ulp_bde64 *bpl;
391 struct ulp_bde64 bde;
392 int i, rc, xmit_len, first_len;
393
394 /* Allocate buffer for command WQE */
395 genwqe = lpfc_sli_get_iocbq(phba);
396 if (genwqe == NULL)
397 return 1;
398
399 wqe = &genwqe->wqe;
400 /* Initialize only 64 bytes */
401 memset(wqe, 0, sizeof(union lpfc_wqe));
402
403 genwqe->context3 = (uint8_t *)bmp;
404 genwqe->iocb_flag |= LPFC_IO_NVME_LS;
405
406 /* Save for completion so we can release these resources */
407 genwqe->context1 = lpfc_nlp_get(ndlp);
408 if (!genwqe->context1) {
409 dev_warn(&phba->pcidev->dev,
410 "Warning: Failed node ref, not sending LS_REQ\n");
411 lpfc_sli_release_iocbq(phba, genwqe);
412 return 1;
413 }
414
415 genwqe->context2 = (uint8_t *)pnvme_lsreq;
416 /* Fill in payload, bp points to frame payload */
417
418 if (!tmo)
419 /* FC spec states we need 3 * ratov for CT requests */
420 tmo = (3 * phba->fc_ratov);
421
422 /* For this command calculate the xmit length of the request bde. */
423 xmit_len = 0;
424 first_len = 0;
425 bpl = (struct ulp_bde64 *)bmp->virt;
426 for (i = 0; i < num_entry; i++) {
427 bde.tus.w = bpl[i].tus.w;
428 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
429 break;
430 xmit_len += bde.tus.f.bdeSize;
431 if (i == 0)
432 first_len = xmit_len;
433 }
434
435 genwqe->rsvd2 = num_entry;
436 genwqe->hba_wqidx = 0;
437
438 /* Words 0 - 2 */
439 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
440 wqe->generic.bde.tus.f.bdeSize = first_len;
441 wqe->generic.bde.addrLow = bpl[0].addrLow;
442 wqe->generic.bde.addrHigh = bpl[0].addrHigh;
443
444 /* Word 3 */
445 wqe->gen_req.request_payload_len = first_len;
446
447 /* Word 4 */
448
449 /* Word 5 */
450 bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
451 bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
452 bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
453 bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
454 bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
455
456 /* Word 6 */
457 bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
458 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
459 bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
460
461 /* Word 7 */
462 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo);
463 bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
464 bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
465 bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
466
467 /* Word 8 */
468 wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
469
470 /* Word 9 */
471 bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
472
473 /* Word 10 */
474 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
475 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
476 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
477 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
478 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
479
480 /* Word 11 */
481 bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
482 bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
483
484
485 /* Issue GEN REQ WQE for NPORT <did> */
486 genwqe->wqe_cmpl = cmpl;
487 genwqe->iocb_cmpl = NULL;
488 genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
489 genwqe->vport = vport;
490 genwqe->retry = retry;
491
492 lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
493 genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
494
495 rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
496 if (rc) {
497 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
498 "6045 Issue GEN REQ WQE to NPORT x%x "
499 "Data: x%x x%x rc x%x\n",
500 ndlp->nlp_DID, genwqe->iotag,
501 vport->port_state, rc);
502 lpfc_nlp_put(ndlp);
503 lpfc_sli_release_iocbq(phba, genwqe);
504 return 1;
505 }
506
507 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS,
508 "6050 Issue GEN REQ WQE to NPORT x%x "
509 "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px "
510 "bmp:x%px xmit:%d 1st:%d\n",
511 ndlp->nlp_DID, genwqe->sli4_xritag,
512 vport->port_state,
513 genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
514 return 0;
515 }
516
517
518 /**
519 * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request
520 * @vport: The local port issuing the LS
521 * @ndlp: The remote port to send the LS to
522 * @pnvme_lsreq: Pointer to LS request structure from the transport
523 * @gen_req_cmp: Completion call-back
524 *
525 * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST
526 * WQE to perform the LS operation.
527 *
528 * Return value :
529 * 0 - Success
530 * non-zero: various error codes, in form of -Exxx
531 **/
532 int
__lpfc_nvme_ls_req(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,struct nvmefc_ls_req * pnvme_lsreq,void (* gen_req_cmp)(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe))533 __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
534 struct nvmefc_ls_req *pnvme_lsreq,
535 void (*gen_req_cmp)(struct lpfc_hba *phba,
536 struct lpfc_iocbq *cmdwqe,
537 struct lpfc_wcqe_complete *wcqe))
538 {
539 struct lpfc_dmabuf *bmp;
540 struct ulp_bde64 *bpl;
541 int ret;
542 uint16_t ntype, nstate;
543
544 if (!ndlp) {
545 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
546 "6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
547 "LS Req\n",
548 ndlp);
549 return -ENODEV;
550 }
551
552 ntype = ndlp->nlp_type;
553 nstate = ndlp->nlp_state;
554 if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
555 (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
556 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
557 "6088 NVMEx LS REQ: Fail DID x%06x not "
558 "ready for IO. Type x%x, State x%x\n",
559 ndlp->nlp_DID, ntype, nstate);
560 return -ENODEV;
561 }
562
563 if (!vport->phba->sli4_hba.nvmels_wq)
564 return -ENOMEM;
565
566 /*
567 * there are two dma buf in the request, actually there is one and
568 * the second one is just the start address + cmd size.
569 * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
570 * in a lpfc_dmabuf struct. When freeing we just free the wrapper
571 * because the nvem layer owns the data bufs.
572 * We do not have to break these packets open, we don't care what is
573 * in them. And we do not have to look at the resonse data, we only
574 * care that we got a response. All of the caring is going to happen
575 * in the nvme-fc layer.
576 */
577
578 bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
579 if (!bmp) {
580 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
581 "6044 NVMEx LS REQ: Could not alloc LS buf "
582 "for DID %x\n",
583 ndlp->nlp_DID);
584 return -ENOMEM;
585 }
586
587 bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
588 if (!bmp->virt) {
589 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
590 "6042 NVMEx LS REQ: Could not alloc mbuf "
591 "for DID %x\n",
592 ndlp->nlp_DID);
593 kfree(bmp);
594 return -ENOMEM;
595 }
596
597 INIT_LIST_HEAD(&bmp->list);
598
599 bpl = (struct ulp_bde64 *)bmp->virt;
600 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
601 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
602 bpl->tus.f.bdeFlags = 0;
603 bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
604 bpl->tus.w = le32_to_cpu(bpl->tus.w);
605 bpl++;
606
607 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
608 bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
609 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
610 bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
611 bpl->tus.w = le32_to_cpu(bpl->tus.w);
612
613 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
614 "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, "
615 "rqstlen:%d rsplen:%d %pad %pad\n",
616 ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen,
617 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
618 &pnvme_lsreq->rspdma);
619
620 ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
621 pnvme_lsreq, gen_req_cmp, ndlp, 2,
622 pnvme_lsreq->timeout, 0);
623 if (ret != WQE_SUCCESS) {
624 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
625 "6052 NVMEx REQ: EXIT. issue ls wqe failed "
626 "lsreq x%px Status %x DID %x\n",
627 pnvme_lsreq, ret, ndlp->nlp_DID);
628 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
629 kfree(bmp);
630 return -EIO;
631 }
632
633 return 0;
634 }
635
636 /**
637 * lpfc_nvme_ls_req - Issue an NVME Link Service request
638 * @pnvme_lport: Transport localport that LS is to be issued from.
639 * @pnvme_rport: Transport remoteport that LS is to be sent to.
640 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
641 *
642 * Driver registers this routine to handle any link service request
643 * from the nvme_fc transport to a remote nvme-aware port.
644 *
645 * Return value :
646 * 0 - Success
647 * non-zero: various error codes, in form of -Exxx
648 **/
649 static int
lpfc_nvme_ls_req(struct nvme_fc_local_port * pnvme_lport,struct nvme_fc_remote_port * pnvme_rport,struct nvmefc_ls_req * pnvme_lsreq)650 lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
651 struct nvme_fc_remote_port *pnvme_rport,
652 struct nvmefc_ls_req *pnvme_lsreq)
653 {
654 struct lpfc_nvme_lport *lport;
655 struct lpfc_nvme_rport *rport;
656 struct lpfc_vport *vport;
657 int ret;
658
659 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
660 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
661 if (unlikely(!lport) || unlikely(!rport))
662 return -EINVAL;
663
664 vport = lport->vport;
665 if (vport->load_flag & FC_UNLOADING)
666 return -ENODEV;
667
668 atomic_inc(&lport->fc4NvmeLsRequests);
669
670 ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq,
671 lpfc_nvme_ls_req_cmp);
672 if (ret)
673 atomic_inc(&lport->xmt_ls_err);
674
675 return ret;
676 }
677
678 /**
679 * __lpfc_nvme_ls_abort - Generic service routine to abort a prior
680 * NVME LS request
681 * @vport: The local port that issued the LS
682 * @ndlp: The remote port the LS was sent to
683 * @pnvme_lsreq: Pointer to LS request structure from the transport
684 *
685 * The driver validates the ndlp, looks for the LS, and aborts the
686 * LS if found.
687 *
688 * Returns:
689 * 0 : if LS found and aborted
690 * non-zero: various error conditions in form -Exxx
691 **/
692 int
__lpfc_nvme_ls_abort(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,struct nvmefc_ls_req * pnvme_lsreq)693 __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
694 struct nvmefc_ls_req *pnvme_lsreq)
695 {
696 struct lpfc_hba *phba = vport->phba;
697 struct lpfc_sli_ring *pring;
698 struct lpfc_iocbq *wqe, *next_wqe;
699 bool foundit = false;
700
701 if (!ndlp) {
702 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
703 "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID "
704 "x%06x, Failing LS Req\n",
705 ndlp, ndlp ? ndlp->nlp_DID : 0);
706 return -EINVAL;
707 }
708
709 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
710 "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq "
711 "x%px rqstlen:%d rsplen:%d %pad %pad\n",
712 pnvme_lsreq, pnvme_lsreq->rqstlen,
713 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
714 &pnvme_lsreq->rspdma);
715
716 /*
717 * Lock the ELS ring txcmplq and look for the wqe that matches
718 * this ELS. If found, issue an abort on the wqe.
719 */
720 pring = phba->sli4_hba.nvmels_wq->pring;
721 spin_lock_irq(&phba->hbalock);
722 spin_lock(&pring->ring_lock);
723 list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
724 if (wqe->context2 == pnvme_lsreq) {
725 wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
726 foundit = true;
727 break;
728 }
729 }
730 spin_unlock(&pring->ring_lock);
731
732 if (foundit)
733 lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL);
734 spin_unlock_irq(&phba->hbalock);
735
736 if (foundit)
737 return 0;
738
739 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
740 "6213 NVMEx LS REQ Abort: Unable to locate req x%px\n",
741 pnvme_lsreq);
742 return -EINVAL;
743 }
744
745 static int
lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_rsp * ls_rsp)746 lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
747 struct nvme_fc_remote_port *remoteport,
748 struct nvmefc_ls_rsp *ls_rsp)
749 {
750 struct lpfc_async_xchg_ctx *axchg =
751 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
752 struct lpfc_nvme_lport *lport;
753 int rc;
754
755 if (axchg->phba->pport->load_flag & FC_UNLOADING)
756 return -ENODEV;
757
758 lport = (struct lpfc_nvme_lport *)localport->private;
759
760 rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp);
761
762 if (rc) {
763 /*
764 * unless the failure is due to having already sent
765 * the response, an abort will be generated for the
766 * exchange if the rsp can't be sent.
767 */
768 if (rc != -EALREADY)
769 atomic_inc(&lport->xmt_ls_abort);
770 return rc;
771 }
772
773 return 0;
774 }
775
776 /**
777 * lpfc_nvme_ls_abort - Abort a prior NVME LS request
778 * @pnvme_lport: Transport localport that LS is to be issued from.
779 * @pnvme_rport: Transport remoteport that LS is to be sent to.
780 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
781 *
782 * Driver registers this routine to abort a NVME LS request that is
783 * in progress (from the transports perspective).
784 **/
785 static void
lpfc_nvme_ls_abort(struct nvme_fc_local_port * pnvme_lport,struct nvme_fc_remote_port * pnvme_rport,struct nvmefc_ls_req * pnvme_lsreq)786 lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
787 struct nvme_fc_remote_port *pnvme_rport,
788 struct nvmefc_ls_req *pnvme_lsreq)
789 {
790 struct lpfc_nvme_lport *lport;
791 struct lpfc_vport *vport;
792 struct lpfc_nodelist *ndlp;
793 int ret;
794
795 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
796 if (unlikely(!lport))
797 return;
798 vport = lport->vport;
799
800 if (vport->load_flag & FC_UNLOADING)
801 return;
802
803 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
804
805 ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq);
806 if (!ret)
807 atomic_inc(&lport->xmt_ls_abort);
808 }
809
810 /* Fix up the existing sgls for NVME IO. */
811 static inline void
lpfc_nvme_adj_fcp_sgls(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_ncmd,struct nvmefc_fcp_req * nCmd)812 lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
813 struct lpfc_io_buf *lpfc_ncmd,
814 struct nvmefc_fcp_req *nCmd)
815 {
816 struct lpfc_hba *phba = vport->phba;
817 struct sli4_sge *sgl;
818 union lpfc_wqe128 *wqe;
819 uint32_t *wptr, *dptr;
820
821 /*
822 * Get a local pointer to the built-in wqe and correct
823 * the cmd size to match NVME's 96 bytes and fix
824 * the dma address.
825 */
826
827 wqe = &lpfc_ncmd->cur_iocbq.wqe;
828
829 /*
830 * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
831 * match NVME. NVME sends 96 bytes. Also, use the
832 * nvme commands command and response dma addresses
833 * rather than the virtual memory to ease the restore
834 * operation.
835 */
836 sgl = lpfc_ncmd->dma_sgl;
837 sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
838 if (phba->cfg_nvme_embed_cmd) {
839 sgl->addr_hi = 0;
840 sgl->addr_lo = 0;
841
842 /* Word 0-2 - NVME CMND IU (embedded payload) */
843 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
844 wqe->generic.bde.tus.f.bdeSize = 56;
845 wqe->generic.bde.addrHigh = 0;
846 wqe->generic.bde.addrLow = 64; /* Word 16 */
847
848 /* Word 10 - dbde is 0, wqes is 1 in template */
849
850 /*
851 * Embed the payload in the last half of the WQE
852 * WQE words 16-30 get the NVME CMD IU payload
853 *
854 * WQE words 16-19 get payload Words 1-4
855 * WQE words 20-21 get payload Words 6-7
856 * WQE words 22-29 get payload Words 16-23
857 */
858 wptr = &wqe->words[16]; /* WQE ptr */
859 dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
860 dptr++; /* Skip Word 0 in payload */
861
862 *wptr++ = *dptr++; /* Word 1 */
863 *wptr++ = *dptr++; /* Word 2 */
864 *wptr++ = *dptr++; /* Word 3 */
865 *wptr++ = *dptr++; /* Word 4 */
866 dptr++; /* Skip Word 5 in payload */
867 *wptr++ = *dptr++; /* Word 6 */
868 *wptr++ = *dptr++; /* Word 7 */
869 dptr += 8; /* Skip Words 8-15 in payload */
870 *wptr++ = *dptr++; /* Word 16 */
871 *wptr++ = *dptr++; /* Word 17 */
872 *wptr++ = *dptr++; /* Word 18 */
873 *wptr++ = *dptr++; /* Word 19 */
874 *wptr++ = *dptr++; /* Word 20 */
875 *wptr++ = *dptr++; /* Word 21 */
876 *wptr++ = *dptr++; /* Word 22 */
877 *wptr = *dptr; /* Word 23 */
878 } else {
879 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
880 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
881
882 /* Word 0-2 - NVME CMND IU Inline BDE */
883 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
884 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
885 wqe->generic.bde.addrHigh = sgl->addr_hi;
886 wqe->generic.bde.addrLow = sgl->addr_lo;
887
888 /* Word 10 */
889 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
890 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
891 }
892
893 sgl++;
894
895 /* Setup the physical region for the FCP RSP */
896 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
897 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
898 sgl->word2 = le32_to_cpu(sgl->word2);
899 if (nCmd->sg_cnt)
900 bf_set(lpfc_sli4_sge_last, sgl, 0);
901 else
902 bf_set(lpfc_sli4_sge_last, sgl, 1);
903 sgl->word2 = cpu_to_le32(sgl->word2);
904 sgl->sge_len = cpu_to_le32(nCmd->rsplen);
905 }
906
907
908 /*
909 * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
910 *
911 * Driver registers this routine as it io request handler. This
912 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
913 * data structure to the rport indicated in @lpfc_nvme_rport.
914 *
915 * Return value :
916 * 0 - Success
917 * TODO: What are the failure codes.
918 **/
919 static void
lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * pwqeIn,struct lpfc_wcqe_complete * wcqe)920 lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
921 struct lpfc_wcqe_complete *wcqe)
922 {
923 struct lpfc_io_buf *lpfc_ncmd =
924 (struct lpfc_io_buf *)pwqeIn->context1;
925 struct lpfc_vport *vport = pwqeIn->vport;
926 struct nvmefc_fcp_req *nCmd;
927 struct nvme_fc_ersp_iu *ep;
928 struct nvme_fc_cmd_iu *cp;
929 struct lpfc_nodelist *ndlp;
930 struct lpfc_nvme_fcpreq_priv *freqpriv;
931 struct lpfc_nvme_lport *lport;
932 uint32_t code, status, idx;
933 uint16_t cid, sqhd, data;
934 uint32_t *ptr;
935 uint32_t lat;
936 bool call_done = false;
937 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
938 int cpu;
939 #endif
940 int offline = 0;
941
942 /* Sanity check on return of outstanding command */
943 if (!lpfc_ncmd) {
944 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
945 "6071 Null lpfc_ncmd pointer. No "
946 "release, skip completion\n");
947 return;
948 }
949
950 /* Guard against abort handler being called at same time */
951 spin_lock(&lpfc_ncmd->buf_lock);
952
953 if (!lpfc_ncmd->nvmeCmd) {
954 spin_unlock(&lpfc_ncmd->buf_lock);
955 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
956 "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
957 "nvmeCmd x%px\n",
958 lpfc_ncmd, lpfc_ncmd->nvmeCmd);
959
960 /* Release the lpfc_ncmd regardless of the missing elements. */
961 lpfc_release_nvme_buf(phba, lpfc_ncmd);
962 return;
963 }
964 nCmd = lpfc_ncmd->nvmeCmd;
965 status = bf_get(lpfc_wcqe_c_status, wcqe);
966
967 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
968 phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
969
970 if (unlikely(status && vport->localport)) {
971 lport = (struct lpfc_nvme_lport *)vport->localport->private;
972 if (lport) {
973 if (bf_get(lpfc_wcqe_c_xb, wcqe))
974 atomic_inc(&lport->cmpl_fcp_xb);
975 atomic_inc(&lport->cmpl_fcp_err);
976 }
977 }
978
979 lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
980 lpfc_ncmd->cur_iocbq.sli4_xritag,
981 status, wcqe->parameter);
982 /*
983 * Catch race where our node has transitioned, but the
984 * transport is still transitioning.
985 */
986 ndlp = lpfc_ncmd->ndlp;
987 if (!ndlp) {
988 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
989 "6062 Ignoring NVME cmpl. No ndlp\n");
990 goto out_err;
991 }
992
993 code = bf_get(lpfc_wcqe_c_code, wcqe);
994 if (code == CQE_CODE_NVME_ERSP) {
995 /* For this type of CQE, we need to rebuild the rsp */
996 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
997
998 /*
999 * Get Command Id from cmd to plug into response. This
1000 * code is not needed in the next NVME Transport drop.
1001 */
1002 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
1003 cid = cp->sqe.common.command_id;
1004
1005 /*
1006 * RSN is in CQE word 2
1007 * SQHD is in CQE Word 3 bits 15:0
1008 * Cmd Specific info is in CQE Word 1
1009 * and in CQE Word 0 bits 15:0
1010 */
1011 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
1012
1013 /* Now lets build the NVME ERSP IU */
1014 ep->iu_len = cpu_to_be16(8);
1015 ep->rsn = wcqe->parameter;
1016 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
1017 ep->rsvd12 = 0;
1018 ptr = (uint32_t *)&ep->cqe.result.u64;
1019 *ptr++ = wcqe->total_data_placed;
1020 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
1021 *ptr = (uint32_t)data;
1022 ep->cqe.sq_head = sqhd;
1023 ep->cqe.sq_id = nCmd->sqid;
1024 ep->cqe.command_id = cid;
1025 ep->cqe.status = 0;
1026
1027 lpfc_ncmd->status = IOSTAT_SUCCESS;
1028 lpfc_ncmd->result = 0;
1029 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
1030 nCmd->transferred_length = nCmd->payload_length;
1031 } else {
1032 lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
1033 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
1034
1035 /* For NVME, the only failure path that results in an
1036 * IO error is when the adapter rejects it. All other
1037 * conditions are a success case and resolved by the
1038 * transport.
1039 * IOSTAT_FCP_RSP_ERROR means:
1040 * 1. Length of data received doesn't match total
1041 * transfer length in WQE
1042 * 2. If the RSP payload does NOT match these cases:
1043 * a. RSP length 12/24 bytes and all zeros
1044 * b. NVME ERSP
1045 */
1046 switch (lpfc_ncmd->status) {
1047 case IOSTAT_SUCCESS:
1048 nCmd->transferred_length = wcqe->total_data_placed;
1049 nCmd->rcv_rsplen = 0;
1050 nCmd->status = 0;
1051 break;
1052 case IOSTAT_FCP_RSP_ERROR:
1053 nCmd->transferred_length = wcqe->total_data_placed;
1054 nCmd->rcv_rsplen = wcqe->parameter;
1055 nCmd->status = 0;
1056
1057 /* Check if this is really an ERSP */
1058 if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
1059 lpfc_ncmd->status = IOSTAT_SUCCESS;
1060 lpfc_ncmd->result = 0;
1061
1062 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1063 "6084 NVME Completion ERSP: "
1064 "xri %x placed x%x\n",
1065 lpfc_ncmd->cur_iocbq.sli4_xritag,
1066 wcqe->total_data_placed);
1067 break;
1068 }
1069 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1070 "6081 NVME Completion Protocol Error: "
1071 "xri %x status x%x result x%x "
1072 "placed x%x\n",
1073 lpfc_ncmd->cur_iocbq.sli4_xritag,
1074 lpfc_ncmd->status, lpfc_ncmd->result,
1075 wcqe->total_data_placed);
1076 break;
1077 case IOSTAT_LOCAL_REJECT:
1078 /* Let fall through to set command final state. */
1079 if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1080 lpfc_printf_vlog(vport, KERN_INFO,
1081 LOG_NVME_IOERR,
1082 "6032 Delay Aborted cmd x%px "
1083 "nvme cmd x%px, xri x%x, "
1084 "xb %d\n",
1085 lpfc_ncmd, nCmd,
1086 lpfc_ncmd->cur_iocbq.sli4_xritag,
1087 bf_get(lpfc_wcqe_c_xb, wcqe));
1088 fallthrough;
1089 default:
1090 out_err:
1091 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1092 "6072 NVME Completion Error: xri %x "
1093 "status x%x result x%x [x%x] "
1094 "placed x%x\n",
1095 lpfc_ncmd->cur_iocbq.sli4_xritag,
1096 lpfc_ncmd->status, lpfc_ncmd->result,
1097 wcqe->parameter,
1098 wcqe->total_data_placed);
1099 nCmd->transferred_length = 0;
1100 nCmd->rcv_rsplen = 0;
1101 nCmd->status = NVME_SC_INTERNAL;
1102 offline = pci_channel_offline(vport->phba->pcidev);
1103 }
1104 }
1105
1106 /* pick up SLI4 exhange busy condition */
1107 if (bf_get(lpfc_wcqe_c_xb, wcqe) && !offline)
1108 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1109 else
1110 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1111
1112 /* Update stats and complete the IO. There is
1113 * no need for dma unprep because the nvme_transport
1114 * owns the dma address.
1115 */
1116 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1117 if (lpfc_ncmd->ts_cmd_start) {
1118 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1119 lpfc_ncmd->ts_data_io = ktime_get_ns();
1120 phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
1121 lpfc_io_ktime(phba, lpfc_ncmd);
1122 }
1123 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
1124 cpu = raw_smp_processor_id();
1125 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
1126 if (lpfc_ncmd->cpu != cpu)
1127 lpfc_printf_vlog(vport,
1128 KERN_INFO, LOG_NVME_IOERR,
1129 "6701 CPU Check cmpl: "
1130 "cpu %d expect %d\n",
1131 cpu, lpfc_ncmd->cpu);
1132 }
1133 #endif
1134
1135 /* NVME targets need completion held off until the abort exchange
1136 * completes unless the NVME Rport is getting unregistered.
1137 */
1138
1139 if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1140 freqpriv = nCmd->private;
1141 freqpriv->nvme_buf = NULL;
1142 lpfc_ncmd->nvmeCmd = NULL;
1143 call_done = true;
1144 }
1145 spin_unlock(&lpfc_ncmd->buf_lock);
1146
1147 /* Check if IO qualified for CMF */
1148 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
1149 nCmd->io_dir == NVMEFC_FCP_READ &&
1150 nCmd->payload_length) {
1151 /* Used when calculating average latency */
1152 lat = ktime_get_ns() - lpfc_ncmd->rx_cmd_start;
1153 lpfc_update_cmf_cmpl(phba, lat, nCmd->payload_length, NULL);
1154 }
1155
1156 if (call_done)
1157 nCmd->done(nCmd);
1158
1159 /* Call release with XB=1 to queue the IO into the abort list. */
1160 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1161 }
1162
1163
1164 /**
1165 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
1166 * @vport: pointer to a host virtual N_Port data structure
1167 * @lpfc_ncmd: Pointer to lpfc scsi command
1168 * @pnode: pointer to a node-list data structure
1169 * @cstat: pointer to the control status structure
1170 *
1171 * Driver registers this routine as it io request handler. This
1172 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1173 * data structure to the rport indicated in @lpfc_nvme_rport.
1174 *
1175 * Return value :
1176 * 0 - Success
1177 * TODO: What are the failure codes.
1178 **/
1179 static int
lpfc_nvme_prep_io_cmd(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_ncmd,struct lpfc_nodelist * pnode,struct lpfc_fc4_ctrl_stat * cstat)1180 lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1181 struct lpfc_io_buf *lpfc_ncmd,
1182 struct lpfc_nodelist *pnode,
1183 struct lpfc_fc4_ctrl_stat *cstat)
1184 {
1185 struct lpfc_hba *phba = vport->phba;
1186 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1187 struct nvme_common_command *sqe;
1188 struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq;
1189 union lpfc_wqe128 *wqe = &pwqeq->wqe;
1190 uint32_t req_len;
1191
1192 /*
1193 * There are three possibilities here - use scatter-gather segment, use
1194 * the single mapping, or neither.
1195 */
1196 if (nCmd->sg_cnt) {
1197 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
1198 /* From the iwrite template, initialize words 7 - 11 */
1199 memcpy(&wqe->words[7],
1200 &lpfc_iwrite_cmd_template.words[7],
1201 sizeof(uint32_t) * 5);
1202
1203 /* Word 4 */
1204 wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
1205
1206 /* Word 5 */
1207 if ((phba->cfg_nvme_enable_fb) &&
1208 (pnode->nlp_flag & NLP_FIRSTBURST)) {
1209 req_len = lpfc_ncmd->nvmeCmd->payload_length;
1210 if (req_len < pnode->nvme_fb_size)
1211 wqe->fcp_iwrite.initial_xfer_len =
1212 req_len;
1213 else
1214 wqe->fcp_iwrite.initial_xfer_len =
1215 pnode->nvme_fb_size;
1216 } else {
1217 wqe->fcp_iwrite.initial_xfer_len = 0;
1218 }
1219 cstat->output_requests++;
1220 } else {
1221 /* From the iread template, initialize words 7 - 11 */
1222 memcpy(&wqe->words[7],
1223 &lpfc_iread_cmd_template.words[7],
1224 sizeof(uint32_t) * 5);
1225
1226 /* Word 4 */
1227 wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1228
1229 /* Word 5 */
1230 wqe->fcp_iread.rsrvd5 = 0;
1231
1232 /* For a CMF Managed port, iod must be zero'ed */
1233 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
1234 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
1235 LPFC_WQE_IOD_NONE);
1236 cstat->input_requests++;
1237 }
1238 } else {
1239 /* From the icmnd template, initialize words 4 - 11 */
1240 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1241 sizeof(uint32_t) * 8);
1242 cstat->control_requests++;
1243 }
1244
1245 if (pnode->nlp_nvme_info & NLP_NVME_NSLER) {
1246 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
1247 sqe = &((struct nvme_fc_cmd_iu *)
1248 nCmd->cmdaddr)->sqe.common;
1249 if (sqe->opcode == nvme_admin_async_event)
1250 bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1);
1251 }
1252
1253 /*
1254 * Finish initializing those WQE fields that are independent
1255 * of the nvme_cmnd request_buffer
1256 */
1257
1258 /* Word 3 */
1259 bf_set(payload_offset_len, &wqe->fcp_icmd,
1260 (nCmd->rsplen + nCmd->cmdlen));
1261
1262 /* Word 6 */
1263 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1264 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1265 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1266
1267 /* Word 8 */
1268 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1269
1270 /* Word 9 */
1271 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1272
1273 /* Word 10 */
1274 bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG);
1275
1276 /* Words 13 14 15 are for PBDE support */
1277
1278 pwqeq->vport = vport;
1279 return 0;
1280 }
1281
1282
1283 /**
1284 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1285 * @vport: pointer to a host virtual N_Port data structure
1286 * @lpfc_ncmd: Pointer to lpfc scsi command
1287 *
1288 * Driver registers this routine as it io request handler. This
1289 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1290 * data structure to the rport indicated in @lpfc_nvme_rport.
1291 *
1292 * Return value :
1293 * 0 - Success
1294 * TODO: What are the failure codes.
1295 **/
1296 static int
lpfc_nvme_prep_io_dma(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_ncmd)1297 lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1298 struct lpfc_io_buf *lpfc_ncmd)
1299 {
1300 struct lpfc_hba *phba = vport->phba;
1301 struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1302 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1303 struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
1304 struct sli4_hybrid_sgl *sgl_xtra = NULL;
1305 struct scatterlist *data_sg;
1306 struct sli4_sge *first_data_sgl;
1307 struct ulp_bde64 *bde;
1308 dma_addr_t physaddr = 0;
1309 uint32_t num_bde = 0;
1310 uint32_t dma_len = 0;
1311 uint32_t dma_offset = 0;
1312 int nseg, i, j;
1313 bool lsp_just_set = false;
1314
1315 /* Fix up the command and response DMA stuff. */
1316 lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1317
1318 /*
1319 * There are three possibilities here - use scatter-gather segment, use
1320 * the single mapping, or neither.
1321 */
1322 if (nCmd->sg_cnt) {
1323 /*
1324 * Jump over the cmd and rsp SGEs. The fix routine
1325 * has already adjusted for this.
1326 */
1327 sgl += 2;
1328
1329 first_data_sgl = sgl;
1330 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1331 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1332 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1333 "6058 Too many sg segments from "
1334 "NVME Transport. Max %d, "
1335 "nvmeIO sg_cnt %d\n",
1336 phba->cfg_nvme_seg_cnt + 1,
1337 lpfc_ncmd->seg_cnt);
1338 lpfc_ncmd->seg_cnt = 0;
1339 return 1;
1340 }
1341
1342 /*
1343 * The driver established a maximum scatter-gather segment count
1344 * during probe that limits the number of sg elements in any
1345 * single nvme command. Just run through the seg_cnt and format
1346 * the sge's.
1347 */
1348 nseg = nCmd->sg_cnt;
1349 data_sg = nCmd->first_sgl;
1350
1351 /* for tracking the segment boundaries */
1352 j = 2;
1353 for (i = 0; i < nseg; i++) {
1354 if (data_sg == NULL) {
1355 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1356 "6059 dptr err %d, nseg %d\n",
1357 i, nseg);
1358 lpfc_ncmd->seg_cnt = 0;
1359 return 1;
1360 }
1361
1362 sgl->word2 = 0;
1363 if ((num_bde + 1) == nseg) {
1364 bf_set(lpfc_sli4_sge_last, sgl, 1);
1365 bf_set(lpfc_sli4_sge_type, sgl,
1366 LPFC_SGE_TYPE_DATA);
1367 } else {
1368 bf_set(lpfc_sli4_sge_last, sgl, 0);
1369
1370 /* expand the segment */
1371 if (!lsp_just_set &&
1372 !((j + 1) % phba->border_sge_num) &&
1373 ((nseg - 1) != i)) {
1374 /* set LSP type */
1375 bf_set(lpfc_sli4_sge_type, sgl,
1376 LPFC_SGE_TYPE_LSP);
1377
1378 sgl_xtra = lpfc_get_sgl_per_hdwq(
1379 phba, lpfc_ncmd);
1380
1381 if (unlikely(!sgl_xtra)) {
1382 lpfc_ncmd->seg_cnt = 0;
1383 return 1;
1384 }
1385 sgl->addr_lo = cpu_to_le32(putPaddrLow(
1386 sgl_xtra->dma_phys_sgl));
1387 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
1388 sgl_xtra->dma_phys_sgl));
1389
1390 } else {
1391 bf_set(lpfc_sli4_sge_type, sgl,
1392 LPFC_SGE_TYPE_DATA);
1393 }
1394 }
1395
1396 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
1397 LPFC_SGE_TYPE_LSP)) {
1398 if ((nseg - 1) == i)
1399 bf_set(lpfc_sli4_sge_last, sgl, 1);
1400
1401 physaddr = data_sg->dma_address;
1402 dma_len = data_sg->length;
1403 sgl->addr_lo = cpu_to_le32(
1404 putPaddrLow(physaddr));
1405 sgl->addr_hi = cpu_to_le32(
1406 putPaddrHigh(physaddr));
1407
1408 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1409 sgl->word2 = cpu_to_le32(sgl->word2);
1410 sgl->sge_len = cpu_to_le32(dma_len);
1411
1412 dma_offset += dma_len;
1413 data_sg = sg_next(data_sg);
1414
1415 sgl++;
1416
1417 lsp_just_set = false;
1418 } else {
1419 sgl->word2 = cpu_to_le32(sgl->word2);
1420
1421 sgl->sge_len = cpu_to_le32(
1422 phba->cfg_sg_dma_buf_size);
1423
1424 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
1425 i = i - 1;
1426
1427 lsp_just_set = true;
1428 }
1429
1430 j++;
1431 }
1432 if (phba->cfg_enable_pbde) {
1433 /* Use PBDE support for first SGL only, offset == 0 */
1434 /* Words 13-15 */
1435 bde = (struct ulp_bde64 *)
1436 &wqe->words[13];
1437 bde->addrLow = first_data_sgl->addr_lo;
1438 bde->addrHigh = first_data_sgl->addr_hi;
1439 bde->tus.f.bdeSize =
1440 le32_to_cpu(first_data_sgl->sge_len);
1441 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1442 bde->tus.w = cpu_to_le32(bde->tus.w);
1443
1444 /* Word 11 */
1445 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
1446 } else {
1447 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
1448 bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
1449 }
1450
1451 } else {
1452 lpfc_ncmd->seg_cnt = 0;
1453
1454 /* For this clause to be valid, the payload_length
1455 * and sg_cnt must zero.
1456 */
1457 if (nCmd->payload_length != 0) {
1458 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1459 "6063 NVME DMA Prep Err: sg_cnt %d "
1460 "payload_length x%x\n",
1461 nCmd->sg_cnt, nCmd->payload_length);
1462 return 1;
1463 }
1464 }
1465 return 0;
1466 }
1467
1468 /**
1469 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1470 * @pnvme_lport: Pointer to the driver's local port data
1471 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1472 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1473 * @pnvme_fcreq: IO request from nvme fc to driver.
1474 *
1475 * Driver registers this routine as it io request handler. This
1476 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1477 * data structure to the rport indicated in @lpfc_nvme_rport.
1478 *
1479 * Return value :
1480 * 0 - Success
1481 * TODO: What are the failure codes.
1482 **/
1483 static int
lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port * pnvme_lport,struct nvme_fc_remote_port * pnvme_rport,void * hw_queue_handle,struct nvmefc_fcp_req * pnvme_fcreq)1484 lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1485 struct nvme_fc_remote_port *pnvme_rport,
1486 void *hw_queue_handle,
1487 struct nvmefc_fcp_req *pnvme_fcreq)
1488 {
1489 int ret = 0;
1490 int expedite = 0;
1491 int idx, cpu;
1492 struct lpfc_nvme_lport *lport;
1493 struct lpfc_fc4_ctrl_stat *cstat;
1494 struct lpfc_vport *vport;
1495 struct lpfc_hba *phba;
1496 struct lpfc_nodelist *ndlp;
1497 struct lpfc_io_buf *lpfc_ncmd;
1498 struct lpfc_nvme_rport *rport;
1499 struct lpfc_nvme_qhandle *lpfc_queue_info;
1500 struct lpfc_nvme_fcpreq_priv *freqpriv;
1501 struct nvme_common_command *sqe;
1502 uint64_t start = 0;
1503
1504 /* Validate pointers. LLDD fault handling with transport does
1505 * have timing races.
1506 */
1507 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1508 if (unlikely(!lport)) {
1509 ret = -EINVAL;
1510 goto out_fail;
1511 }
1512
1513 vport = lport->vport;
1514
1515 if (unlikely(!hw_queue_handle)) {
1516 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1517 "6117 Fail IO, NULL hw_queue_handle\n");
1518 atomic_inc(&lport->xmt_fcp_err);
1519 ret = -EBUSY;
1520 goto out_fail;
1521 }
1522
1523 phba = vport->phba;
1524
1525 if (unlikely(vport->load_flag & FC_UNLOADING)) {
1526 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1527 "6124 Fail IO, Driver unload\n");
1528 atomic_inc(&lport->xmt_fcp_err);
1529 ret = -ENODEV;
1530 goto out_fail;
1531 }
1532
1533 freqpriv = pnvme_fcreq->private;
1534 if (unlikely(!freqpriv)) {
1535 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1536 "6158 Fail IO, NULL request data\n");
1537 atomic_inc(&lport->xmt_fcp_err);
1538 ret = -EINVAL;
1539 goto out_fail;
1540 }
1541
1542 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1543 if (phba->ktime_on)
1544 start = ktime_get_ns();
1545 #endif
1546 rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1547 lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1548
1549 /*
1550 * Catch race where our node has transitioned, but the
1551 * transport is still transitioning.
1552 */
1553 ndlp = rport->ndlp;
1554 if (!ndlp) {
1555 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1556 "6053 Busy IO, ndlp not ready: rport x%px "
1557 "ndlp x%px, DID x%06x\n",
1558 rport, ndlp, pnvme_rport->port_id);
1559 atomic_inc(&lport->xmt_fcp_err);
1560 ret = -EBUSY;
1561 goto out_fail;
1562 }
1563
1564 /* The remote node has to be a mapped target or it's an error. */
1565 if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1566 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1567 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1568 "6036 Fail IO, DID x%06x not ready for "
1569 "IO. State x%x, Type x%x Flg x%x\n",
1570 pnvme_rport->port_id,
1571 ndlp->nlp_state, ndlp->nlp_type,
1572 ndlp->fc4_xpt_flags);
1573 atomic_inc(&lport->xmt_fcp_bad_ndlp);
1574 ret = -EBUSY;
1575 goto out_fail;
1576
1577 }
1578
1579 /* Currently only NVME Keep alive commands should be expedited
1580 * if the driver runs out of a resource. These should only be
1581 * issued on the admin queue, qidx 0
1582 */
1583 if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1584 sqe = &((struct nvme_fc_cmd_iu *)
1585 pnvme_fcreq->cmdaddr)->sqe.common;
1586 if (sqe->opcode == nvme_admin_keep_alive)
1587 expedite = 1;
1588 }
1589
1590 /* Check if IO qualifies for CMF */
1591 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
1592 pnvme_fcreq->io_dir == NVMEFC_FCP_READ &&
1593 pnvme_fcreq->payload_length) {
1594 ret = lpfc_update_cmf_cmd(phba, pnvme_fcreq->payload_length);
1595 if (ret) {
1596 ret = -EBUSY;
1597 goto out_fail;
1598 }
1599 /* Get start time for IO latency */
1600 start = ktime_get_ns();
1601 }
1602
1603 /* The node is shared with FCP IO, make sure the IO pending count does
1604 * not exceed the programmed depth.
1605 */
1606 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1607 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1608 !expedite) {
1609 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1610 "6174 Fail IO, ndlp qdepth exceeded: "
1611 "idx %d DID %x pend %d qdepth %d\n",
1612 lpfc_queue_info->index, ndlp->nlp_DID,
1613 atomic_read(&ndlp->cmd_pending),
1614 ndlp->cmd_qdepth);
1615 atomic_inc(&lport->xmt_fcp_qdepth);
1616 ret = -EBUSY;
1617 goto out_fail1;
1618 }
1619 }
1620
1621 /* Lookup Hardware Queue index based on fcp_io_sched module parameter */
1622 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
1623 idx = lpfc_queue_info->index;
1624 } else {
1625 cpu = raw_smp_processor_id();
1626 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
1627 }
1628
1629 lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
1630 if (lpfc_ncmd == NULL) {
1631 atomic_inc(&lport->xmt_fcp_noxri);
1632 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1633 "6065 Fail IO, driver buffer pool is empty: "
1634 "idx %d DID %x\n",
1635 lpfc_queue_info->index, ndlp->nlp_DID);
1636 ret = -EBUSY;
1637 goto out_fail1;
1638 }
1639 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1640 if (start) {
1641 lpfc_ncmd->ts_cmd_start = start;
1642 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1643 } else {
1644 lpfc_ncmd->ts_cmd_start = 0;
1645 }
1646 #endif
1647 lpfc_ncmd->rx_cmd_start = start;
1648
1649 /*
1650 * Store the data needed by the driver to issue, abort, and complete
1651 * an IO.
1652 * Do not let the IO hang out forever. There is no midlayer issuing
1653 * an abort so inform the FW of the maximum IO pending time.
1654 */
1655 freqpriv->nvme_buf = lpfc_ncmd;
1656 lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1657 lpfc_ncmd->ndlp = ndlp;
1658 lpfc_ncmd->qidx = lpfc_queue_info->qidx;
1659
1660 /*
1661 * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1662 * This identfier was create in our hardware queue create callback
1663 * routine. The driver now is dependent on the IO queue steering from
1664 * the transport. We are trusting the upper NVME layers know which
1665 * index to use and that they have affinitized a CPU to this hardware
1666 * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1667 */
1668 lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
1669 cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
1670
1671 lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1672 ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1673 if (ret) {
1674 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1675 "6175 Fail IO, Prep DMA: "
1676 "idx %d DID %x\n",
1677 lpfc_queue_info->index, ndlp->nlp_DID);
1678 atomic_inc(&lport->xmt_fcp_err);
1679 ret = -ENOMEM;
1680 goto out_free_nvme_buf;
1681 }
1682
1683 lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1684 lpfc_ncmd->cur_iocbq.sli4_xritag,
1685 lpfc_queue_info->index, ndlp->nlp_DID);
1686
1687 ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
1688 if (ret) {
1689 atomic_inc(&lport->xmt_fcp_wqerr);
1690 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1691 "6113 Fail IO, Could not issue WQE err %x "
1692 "sid: x%x did: x%x oxid: x%x\n",
1693 ret, vport->fc_myDID, ndlp->nlp_DID,
1694 lpfc_ncmd->cur_iocbq.sli4_xritag);
1695 goto out_free_nvme_buf;
1696 }
1697
1698 if (phba->cfg_xri_rebalancing)
1699 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
1700
1701 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1702 if (lpfc_ncmd->ts_cmd_start)
1703 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1704
1705 if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
1706 cpu = raw_smp_processor_id();
1707 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1708 lpfc_ncmd->cpu = cpu;
1709 if (idx != cpu)
1710 lpfc_printf_vlog(vport,
1711 KERN_INFO, LOG_NVME_IOERR,
1712 "6702 CPU Check cmd: "
1713 "cpu %d wq %d\n",
1714 lpfc_ncmd->cpu,
1715 lpfc_queue_info->index);
1716 }
1717 #endif
1718 return 0;
1719
1720 out_free_nvme_buf:
1721 if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1722 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1723 cstat->output_requests--;
1724 else
1725 cstat->input_requests--;
1726 } else
1727 cstat->control_requests--;
1728 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1729 out_fail1:
1730 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT,
1731 pnvme_fcreq->payload_length, NULL);
1732 out_fail:
1733 return ret;
1734 }
1735
1736 /**
1737 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1738 * @phba: Pointer to HBA context object
1739 * @cmdiocb: Pointer to command iocb object.
1740 * @abts_cmpl: Pointer to wcqe complete object.
1741 *
1742 * This is the callback function for any NVME FCP IO that was aborted.
1743 *
1744 * Return value:
1745 * None
1746 **/
1747 void
lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_wcqe_complete * abts_cmpl)1748 lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1749 struct lpfc_wcqe_complete *abts_cmpl)
1750 {
1751 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1752 "6145 ABORT_XRI_CN completing on rpi x%x "
1753 "original iotag x%x, abort cmd iotag x%x "
1754 "req_tag x%x, status x%x, hwstatus x%x\n",
1755 cmdiocb->iocb.un.acxri.abortContextTag,
1756 cmdiocb->iocb.un.acxri.abortIoTag,
1757 cmdiocb->iotag,
1758 bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1759 bf_get(lpfc_wcqe_c_status, abts_cmpl),
1760 bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1761 lpfc_sli_release_iocbq(phba, cmdiocb);
1762 }
1763
1764 /**
1765 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1766 * @pnvme_lport: Pointer to the driver's local port data
1767 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1768 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1769 * @pnvme_fcreq: IO request from nvme fc to driver.
1770 *
1771 * Driver registers this routine as its nvme request io abort handler. This
1772 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1773 * data structure to the rport indicated in @lpfc_nvme_rport. This routine
1774 * is executed asynchronously - one the target is validated as "MAPPED" and
1775 * ready for IO, the driver issues the abort request and returns.
1776 *
1777 * Return value:
1778 * None
1779 **/
1780 static void
lpfc_nvme_fcp_abort(struct nvme_fc_local_port * pnvme_lport,struct nvme_fc_remote_port * pnvme_rport,void * hw_queue_handle,struct nvmefc_fcp_req * pnvme_fcreq)1781 lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1782 struct nvme_fc_remote_port *pnvme_rport,
1783 void *hw_queue_handle,
1784 struct nvmefc_fcp_req *pnvme_fcreq)
1785 {
1786 struct lpfc_nvme_lport *lport;
1787 struct lpfc_vport *vport;
1788 struct lpfc_hba *phba;
1789 struct lpfc_io_buf *lpfc_nbuf;
1790 struct lpfc_iocbq *nvmereq_wqe;
1791 struct lpfc_nvme_fcpreq_priv *freqpriv;
1792 unsigned long flags;
1793 int ret_val;
1794
1795 /* Validate pointers. LLDD fault handling with transport does
1796 * have timing races.
1797 */
1798 lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1799 if (unlikely(!lport))
1800 return;
1801
1802 vport = lport->vport;
1803
1804 if (unlikely(!hw_queue_handle)) {
1805 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1806 "6129 Fail Abort, HW Queue Handle NULL.\n");
1807 return;
1808 }
1809
1810 phba = vport->phba;
1811 freqpriv = pnvme_fcreq->private;
1812
1813 if (unlikely(!freqpriv))
1814 return;
1815 if (vport->load_flag & FC_UNLOADING)
1816 return;
1817
1818 /* Announce entry to new IO submit field. */
1819 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1820 "6002 Abort Request to rport DID x%06x "
1821 "for nvme_fc_req x%px\n",
1822 pnvme_rport->port_id,
1823 pnvme_fcreq);
1824
1825 /* If the hba is getting reset, this flag is set. It is
1826 * cleared when the reset is complete and rings reestablished.
1827 */
1828 spin_lock_irqsave(&phba->hbalock, flags);
1829 /* driver queued commands are in process of being flushed */
1830 if (phba->hba_flag & HBA_IOQ_FLUSH) {
1831 spin_unlock_irqrestore(&phba->hbalock, flags);
1832 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1833 "6139 Driver in reset cleanup - flushing "
1834 "NVME Req now. hba_flag x%x\n",
1835 phba->hba_flag);
1836 return;
1837 }
1838
1839 lpfc_nbuf = freqpriv->nvme_buf;
1840 if (!lpfc_nbuf) {
1841 spin_unlock_irqrestore(&phba->hbalock, flags);
1842 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1843 "6140 NVME IO req has no matching lpfc nvme "
1844 "io buffer. Skipping abort req.\n");
1845 return;
1846 } else if (!lpfc_nbuf->nvmeCmd) {
1847 spin_unlock_irqrestore(&phba->hbalock, flags);
1848 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1849 "6141 lpfc NVME IO req has no nvme_fcreq "
1850 "io buffer. Skipping abort req.\n");
1851 return;
1852 }
1853 nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1854
1855 /* Guard against IO completion being called at same time */
1856 spin_lock(&lpfc_nbuf->buf_lock);
1857
1858 /*
1859 * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1860 * state must match the nvme_fcreq passed by the nvme
1861 * transport. If they don't match, it is likely the driver
1862 * has already completed the NVME IO and the nvme transport
1863 * has not seen it yet.
1864 */
1865 if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1866 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1867 "6143 NVME req mismatch: "
1868 "lpfc_nbuf x%px nvmeCmd x%px, "
1869 "pnvme_fcreq x%px. Skipping Abort xri x%x\n",
1870 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1871 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1872 goto out_unlock;
1873 }
1874
1875 /* Don't abort IOs no longer on the pending queue. */
1876 if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1877 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1878 "6142 NVME IO req x%px not queued - skipping "
1879 "abort req xri x%x\n",
1880 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1881 goto out_unlock;
1882 }
1883
1884 atomic_inc(&lport->xmt_fcp_abort);
1885 lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1886 nvmereq_wqe->sli4_xritag,
1887 nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
1888
1889 /* Outstanding abort is in progress */
1890 if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1891 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1892 "6144 Outstanding NVME I/O Abort Request "
1893 "still pending on nvme_fcreq x%px, "
1894 "lpfc_ncmd x%px xri x%x\n",
1895 pnvme_fcreq, lpfc_nbuf,
1896 nvmereq_wqe->sli4_xritag);
1897 goto out_unlock;
1898 }
1899
1900 ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
1901 lpfc_nvme_abort_fcreq_cmpl);
1902
1903 spin_unlock(&lpfc_nbuf->buf_lock);
1904 spin_unlock_irqrestore(&phba->hbalock, flags);
1905
1906 /* Make sure HBA is alive */
1907 lpfc_issue_hb_tmo(phba);
1908
1909 if (ret_val != WQE_SUCCESS) {
1910 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1911 "6137 Failed abts issue_wqe with status x%x "
1912 "for nvme_fcreq x%px.\n",
1913 ret_val, pnvme_fcreq);
1914 return;
1915 }
1916
1917 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1918 "6138 Transport Abort NVME Request Issued for "
1919 "ox_id x%x\n",
1920 nvmereq_wqe->sli4_xritag);
1921 return;
1922
1923 out_unlock:
1924 spin_unlock(&lpfc_nbuf->buf_lock);
1925 spin_unlock_irqrestore(&phba->hbalock, flags);
1926 return;
1927 }
1928
1929 /* Declare and initialization an instance of the FC NVME template. */
1930 static struct nvme_fc_port_template lpfc_nvme_template = {
1931 /* initiator-based functions */
1932 .localport_delete = lpfc_nvme_localport_delete,
1933 .remoteport_delete = lpfc_nvme_remoteport_delete,
1934 .create_queue = lpfc_nvme_create_queue,
1935 .delete_queue = lpfc_nvme_delete_queue,
1936 .ls_req = lpfc_nvme_ls_req,
1937 .fcp_io = lpfc_nvme_fcp_io_submit,
1938 .ls_abort = lpfc_nvme_ls_abort,
1939 .fcp_abort = lpfc_nvme_fcp_abort,
1940 .xmt_ls_rsp = lpfc_nvme_xmt_ls_rsp,
1941
1942 .max_hw_queues = 1,
1943 .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1944 .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1945 .dma_boundary = 0xFFFFFFFF,
1946
1947 /* Sizes of additional private data for data structures.
1948 * No use for the last two sizes at this time.
1949 */
1950 .local_priv_sz = sizeof(struct lpfc_nvme_lport),
1951 .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
1952 .lsrqst_priv_sz = 0,
1953 .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
1954 };
1955
1956 /*
1957 * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
1958 *
1959 * This routine removes a nvme buffer from head of @hdwq io_buf_list
1960 * and returns to caller.
1961 *
1962 * Return codes:
1963 * NULL - Error
1964 * Pointer to lpfc_nvme_buf - Success
1965 **/
1966 static struct lpfc_io_buf *
lpfc_get_nvme_buf(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,int idx,int expedite)1967 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1968 int idx, int expedite)
1969 {
1970 struct lpfc_io_buf *lpfc_ncmd;
1971 struct lpfc_sli4_hdw_queue *qp;
1972 struct sli4_sge *sgl;
1973 struct lpfc_iocbq *pwqeq;
1974 union lpfc_wqe128 *wqe;
1975
1976 lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
1977
1978 if (lpfc_ncmd) {
1979 pwqeq = &(lpfc_ncmd->cur_iocbq);
1980 wqe = &pwqeq->wqe;
1981
1982 /* Setup key fields in buffer that may have been changed
1983 * if other protocols used this buffer.
1984 */
1985 pwqeq->iocb_flag = LPFC_IO_NVME;
1986 pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
1987 lpfc_ncmd->start_time = jiffies;
1988 lpfc_ncmd->flags = 0;
1989
1990 /* Rsp SGE will be filled in when we rcv an IO
1991 * from the NVME Layer to be sent.
1992 * The cmd is going to be embedded so we need a SKIP SGE.
1993 */
1994 sgl = lpfc_ncmd->dma_sgl;
1995 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1996 bf_set(lpfc_sli4_sge_last, sgl, 0);
1997 sgl->word2 = cpu_to_le32(sgl->word2);
1998 /* Fill in word 3 / sgl_len during cmd submission */
1999
2000 /* Initialize 64 bytes only */
2001 memset(wqe, 0, sizeof(union lpfc_wqe));
2002
2003 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
2004 atomic_inc(&ndlp->cmd_pending);
2005 lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
2006 }
2007
2008 } else {
2009 qp = &phba->sli4_hba.hdwq[idx];
2010 qp->empty_io_bufs++;
2011 }
2012
2013 return lpfc_ncmd;
2014 }
2015
2016 /**
2017 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
2018 * @phba: The Hba for which this call is being executed.
2019 * @lpfc_ncmd: The nvme buffer which is being released.
2020 *
2021 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
2022 * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
2023 * and cannot be reused for at least RA_TOV amount of time if it was
2024 * aborted.
2025 **/
2026 static void
lpfc_release_nvme_buf(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_ncmd)2027 lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
2028 {
2029 struct lpfc_sli4_hdw_queue *qp;
2030 unsigned long iflag = 0;
2031
2032 if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
2033 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
2034
2035 lpfc_ncmd->ndlp = NULL;
2036 lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
2037
2038 qp = lpfc_ncmd->hdwq;
2039 if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
2040 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2041 "6310 XB release deferred for "
2042 "ox_id x%x on reqtag x%x\n",
2043 lpfc_ncmd->cur_iocbq.sli4_xritag,
2044 lpfc_ncmd->cur_iocbq.iotag);
2045
2046 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
2047 list_add_tail(&lpfc_ncmd->list,
2048 &qp->lpfc_abts_io_buf_list);
2049 qp->abts_nvme_io_bufs++;
2050 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
2051 } else
2052 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
2053 }
2054
2055 /**
2056 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2057 * @vport: the lpfc_vport instance requesting a localport.
2058 *
2059 * This routine is invoked to create an nvme localport instance to bind
2060 * to the nvme_fc_transport. It is called once during driver load
2061 * like lpfc_create_shost after all other services are initialized.
2062 * It requires a vport, vpi, and wwns at call time. Other localport
2063 * parameters are modified as the driver's FCID and the Fabric WWN
2064 * are established.
2065 *
2066 * Return codes
2067 * 0 - successful
2068 * -ENOMEM - no heap memory available
2069 * other values - from nvme registration upcall
2070 **/
2071 int
lpfc_nvme_create_localport(struct lpfc_vport * vport)2072 lpfc_nvme_create_localport(struct lpfc_vport *vport)
2073 {
2074 int ret = 0;
2075 struct lpfc_hba *phba = vport->phba;
2076 struct nvme_fc_port_info nfcp_info;
2077 struct nvme_fc_local_port *localport;
2078 struct lpfc_nvme_lport *lport;
2079
2080 /* Initialize this localport instance. The vport wwn usage ensures
2081 * that NPIV is accounted for.
2082 */
2083 memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2084 nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2085 nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2086 nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2087
2088 /* We need to tell the transport layer + 1 because it takes page
2089 * alignment into account. When space for the SGL is allocated we
2090 * allocate + 3, one for cmd, one for rsp and one for this alignment
2091 */
2092 lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2093
2094 /* Advertise how many hw queues we support based on cfg_hdw_queue,
2095 * which will not exceed cpu count.
2096 */
2097 lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
2098
2099 if (!IS_ENABLED(CONFIG_NVME_FC))
2100 return ret;
2101
2102 /* localport is allocated from the stack, but the registration
2103 * call allocates heap memory as well as the private area.
2104 */
2105
2106 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2107 &vport->phba->pcidev->dev, &localport);
2108 if (!ret) {
2109 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2110 "6005 Successfully registered local "
2111 "NVME port num %d, localP x%px, private "
2112 "x%px, sg_seg %d\n",
2113 localport->port_num, localport,
2114 localport->private,
2115 lpfc_nvme_template.max_sgl_segments);
2116
2117 /* Private is our lport size declared in the template. */
2118 lport = (struct lpfc_nvme_lport *)localport->private;
2119 vport->localport = localport;
2120 lport->vport = vport;
2121 vport->nvmei_support = 1;
2122
2123 atomic_set(&lport->xmt_fcp_noxri, 0);
2124 atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2125 atomic_set(&lport->xmt_fcp_qdepth, 0);
2126 atomic_set(&lport->xmt_fcp_err, 0);
2127 atomic_set(&lport->xmt_fcp_wqerr, 0);
2128 atomic_set(&lport->xmt_fcp_abort, 0);
2129 atomic_set(&lport->xmt_ls_abort, 0);
2130 atomic_set(&lport->xmt_ls_err, 0);
2131 atomic_set(&lport->cmpl_fcp_xb, 0);
2132 atomic_set(&lport->cmpl_fcp_err, 0);
2133 atomic_set(&lport->cmpl_ls_xb, 0);
2134 atomic_set(&lport->cmpl_ls_err, 0);
2135
2136 atomic_set(&lport->fc4NvmeLsRequests, 0);
2137 atomic_set(&lport->fc4NvmeLsCmpls, 0);
2138 }
2139
2140 return ret;
2141 }
2142
2143 #if (IS_ENABLED(CONFIG_NVME_FC))
2144 /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
2145 *
2146 * The driver has to wait for the host nvme transport to callback
2147 * indicating the localport has successfully unregistered all
2148 * resources. Since this is an uninterruptible wait, loop every ten
2149 * seconds and print a message indicating no progress.
2150 *
2151 * An uninterruptible wait is used because of the risk of transport-to-
2152 * driver state mismatch.
2153 */
2154 static void
lpfc_nvme_lport_unreg_wait(struct lpfc_vport * vport,struct lpfc_nvme_lport * lport,struct completion * lport_unreg_cmp)2155 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2156 struct lpfc_nvme_lport *lport,
2157 struct completion *lport_unreg_cmp)
2158 {
2159 u32 wait_tmo;
2160 int ret, i, pending = 0;
2161 struct lpfc_sli_ring *pring;
2162 struct lpfc_hba *phba = vport->phba;
2163 struct lpfc_sli4_hdw_queue *qp;
2164 int abts_scsi, abts_nvme;
2165
2166 /* Host transport has to clean up and confirm requiring an indefinite
2167 * wait. Print a message if a 10 second wait expires and renew the
2168 * wait. This is unexpected.
2169 */
2170 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2171 while (true) {
2172 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
2173 if (unlikely(!ret)) {
2174 pending = 0;
2175 abts_scsi = 0;
2176 abts_nvme = 0;
2177 for (i = 0; i < phba->cfg_hdw_queue; i++) {
2178 qp = &phba->sli4_hba.hdwq[i];
2179 if (!vport || !vport->localport ||
2180 !qp || !qp->io_wq)
2181 return;
2182
2183 pring = qp->io_wq->pring;
2184 if (!pring)
2185 continue;
2186 pending += pring->txcmplq_cnt;
2187 abts_scsi += qp->abts_scsi_io_bufs;
2188 abts_nvme += qp->abts_nvme_io_bufs;
2189 }
2190 if (!vport || !vport->localport ||
2191 vport->phba->hba_flag & HBA_PCI_ERR)
2192 return;
2193
2194 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2195 "6176 Lport x%px Localport x%px wait "
2196 "timed out. Pending %d [%d:%d]. "
2197 "Renewing.\n",
2198 lport, vport->localport, pending,
2199 abts_scsi, abts_nvme);
2200 continue;
2201 }
2202 break;
2203 }
2204 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2205 "6177 Lport x%px Localport x%px Complete Success\n",
2206 lport, vport->localport);
2207 }
2208 #endif
2209
2210 /**
2211 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2212 * @vport: pointer to a host virtual N_Port data structure
2213 *
2214 * This routine is invoked to destroy all lports bound to the phba.
2215 * The lport memory was allocated by the nvme fc transport and is
2216 * released there. This routine ensures all rports bound to the
2217 * lport have been disconnected.
2218 *
2219 **/
2220 void
lpfc_nvme_destroy_localport(struct lpfc_vport * vport)2221 lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2222 {
2223 #if (IS_ENABLED(CONFIG_NVME_FC))
2224 struct nvme_fc_local_port *localport;
2225 struct lpfc_nvme_lport *lport;
2226 int ret;
2227 DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
2228
2229 if (vport->nvmei_support == 0)
2230 return;
2231
2232 localport = vport->localport;
2233 if (!localport)
2234 return;
2235 lport = (struct lpfc_nvme_lport *)localport->private;
2236
2237 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2238 "6011 Destroying NVME localport x%px\n",
2239 localport);
2240
2241 /* lport's rport list is clear. Unregister
2242 * lport and release resources.
2243 */
2244 lport->lport_unreg_cmp = &lport_unreg_cmp;
2245 ret = nvme_fc_unregister_localport(localport);
2246
2247 /* Wait for completion. This either blocks
2248 * indefinitely or succeeds
2249 */
2250 lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2251 vport->localport = NULL;
2252
2253 /* Regardless of the unregister upcall response, clear
2254 * nvmei_support. All rports are unregistered and the
2255 * driver will clean up.
2256 */
2257 vport->nvmei_support = 0;
2258 if (ret == 0) {
2259 lpfc_printf_vlog(vport,
2260 KERN_INFO, LOG_NVME_DISC,
2261 "6009 Unregistered lport Success\n");
2262 } else {
2263 lpfc_printf_vlog(vport,
2264 KERN_INFO, LOG_NVME_DISC,
2265 "6010 Unregistered lport "
2266 "Failed, status x%x\n",
2267 ret);
2268 }
2269 #endif
2270 }
2271
2272 void
lpfc_nvme_update_localport(struct lpfc_vport * vport)2273 lpfc_nvme_update_localport(struct lpfc_vport *vport)
2274 {
2275 #if (IS_ENABLED(CONFIG_NVME_FC))
2276 struct nvme_fc_local_port *localport;
2277 struct lpfc_nvme_lport *lport;
2278
2279 localport = vport->localport;
2280 if (!localport) {
2281 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2282 "6710 Update NVME fail. No localport\n");
2283 return;
2284 }
2285 lport = (struct lpfc_nvme_lport *)localport->private;
2286 if (!lport) {
2287 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2288 "6171 Update NVME fail. localP x%px, No lport\n",
2289 localport);
2290 return;
2291 }
2292 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2293 "6012 Update NVME lport x%px did x%x\n",
2294 localport, vport->fc_myDID);
2295
2296 localport->port_id = vport->fc_myDID;
2297 if (localport->port_id == 0)
2298 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2299 else
2300 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2301
2302 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2303 "6030 bound lport x%px to DID x%06x\n",
2304 lport, localport->port_id);
2305 #endif
2306 }
2307
2308 int
lpfc_nvme_register_port(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)2309 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2310 {
2311 #if (IS_ENABLED(CONFIG_NVME_FC))
2312 int ret = 0;
2313 struct nvme_fc_local_port *localport;
2314 struct lpfc_nvme_lport *lport;
2315 struct lpfc_nvme_rport *rport;
2316 struct lpfc_nvme_rport *oldrport;
2317 struct nvme_fc_remote_port *remote_port;
2318 struct nvme_fc_port_info rpinfo;
2319 struct lpfc_nodelist *prev_ndlp = NULL;
2320 struct fc_rport *srport = ndlp->rport;
2321
2322 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2323 "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2324 ndlp->nlp_DID, ndlp->nlp_type);
2325
2326 localport = vport->localport;
2327 if (!localport)
2328 return 0;
2329
2330 lport = (struct lpfc_nvme_lport *)localport->private;
2331
2332 /* NVME rports are not preserved across devloss.
2333 * Just register this instance. Note, rpinfo->dev_loss_tmo
2334 * is left 0 to indicate accept transport defaults. The
2335 * driver communicates port role capabilities consistent
2336 * with the PRLI response data.
2337 */
2338 memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2339 rpinfo.port_id = ndlp->nlp_DID;
2340 if (ndlp->nlp_type & NLP_NVME_TARGET)
2341 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2342 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2343 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2344
2345 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2346 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2347
2348 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2349 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2350 if (srport)
2351 rpinfo.dev_loss_tmo = srport->dev_loss_tmo;
2352 else
2353 rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
2354
2355 spin_lock_irq(&ndlp->lock);
2356 oldrport = lpfc_ndlp_get_nrport(ndlp);
2357 if (oldrport) {
2358 prev_ndlp = oldrport->ndlp;
2359 spin_unlock_irq(&ndlp->lock);
2360 } else {
2361 spin_unlock_irq(&ndlp->lock);
2362 if (!lpfc_nlp_get(ndlp)) {
2363 dev_warn(&vport->phba->pcidev->dev,
2364 "Warning - No node ref - exit register\n");
2365 return 0;
2366 }
2367 }
2368
2369 ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2370 if (!ret) {
2371 /* If the ndlp already has an nrport, this is just
2372 * a resume of the existing rport. Else this is a
2373 * new rport.
2374 */
2375 /* Guard against an unregister/reregister
2376 * race that leaves the WAIT flag set.
2377 */
2378 spin_lock_irq(&ndlp->lock);
2379 ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
2380 ndlp->fc4_xpt_flags |= NVME_XPT_REGD;
2381 spin_unlock_irq(&ndlp->lock);
2382 rport = remote_port->private;
2383 if (oldrport) {
2384
2385 /* Sever the ndlp<->rport association
2386 * before dropping the ndlp ref from
2387 * register.
2388 */
2389 spin_lock_irq(&ndlp->lock);
2390 ndlp->nrport = NULL;
2391 ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT;
2392 spin_unlock_irq(&ndlp->lock);
2393 rport->ndlp = NULL;
2394 rport->remoteport = NULL;
2395
2396 /* Reference only removed if previous NDLP is no longer
2397 * active. It might be just a swap and removing the
2398 * reference would cause a premature cleanup.
2399 */
2400 if (prev_ndlp && prev_ndlp != ndlp) {
2401 if (!prev_ndlp->nrport)
2402 lpfc_nlp_put(prev_ndlp);
2403 }
2404 }
2405
2406 /* Clean bind the rport to the ndlp. */
2407 rport->remoteport = remote_port;
2408 rport->lport = lport;
2409 rport->ndlp = ndlp;
2410 spin_lock_irq(&ndlp->lock);
2411 ndlp->nrport = rport;
2412 spin_unlock_irq(&ndlp->lock);
2413 lpfc_printf_vlog(vport, KERN_INFO,
2414 LOG_NVME_DISC | LOG_NODE,
2415 "6022 Bind lport x%px to remoteport x%px "
2416 "rport x%px WWNN 0x%llx, "
2417 "Rport WWPN 0x%llx DID "
2418 "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
2419 lport, remote_port, rport,
2420 rpinfo.node_name, rpinfo.port_name,
2421 rpinfo.port_id, rpinfo.port_role,
2422 ndlp, prev_ndlp);
2423 } else {
2424 lpfc_printf_vlog(vport, KERN_ERR,
2425 LOG_TRACE_EVENT,
2426 "6031 RemotePort Registration failed "
2427 "err: %d, DID x%06x\n",
2428 ret, ndlp->nlp_DID);
2429 }
2430
2431 return ret;
2432 #else
2433 return 0;
2434 #endif
2435 }
2436
2437 /*
2438 * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
2439 *
2440 * If the ndlp represents an NVME Target, that we are logged into,
2441 * ping the NVME FC Transport layer to initiate a device rescan
2442 * on this remote NPort.
2443 */
2444 void
lpfc_nvme_rescan_port(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)2445 lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2446 {
2447 #if (IS_ENABLED(CONFIG_NVME_FC))
2448 struct lpfc_nvme_rport *nrport;
2449 struct nvme_fc_remote_port *remoteport = NULL;
2450
2451 spin_lock_irq(&ndlp->lock);
2452 nrport = lpfc_ndlp_get_nrport(ndlp);
2453 if (nrport)
2454 remoteport = nrport->remoteport;
2455 spin_unlock_irq(&ndlp->lock);
2456
2457 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2458 "6170 Rescan NPort DID x%06x type x%x "
2459 "state x%x nrport x%px remoteport x%px\n",
2460 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
2461 nrport, remoteport);
2462
2463 if (!nrport || !remoteport)
2464 goto rescan_exit;
2465
2466 /* Only rescan if we are an NVME target in the MAPPED state */
2467 if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
2468 ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
2469 nvme_fc_rescan_remoteport(remoteport);
2470
2471 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2472 "6172 NVME rescanned DID x%06x "
2473 "port_state x%x\n",
2474 ndlp->nlp_DID, remoteport->port_state);
2475 }
2476 return;
2477 rescan_exit:
2478 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2479 "6169 Skip NVME Rport Rescan, NVME remoteport "
2480 "unregistered\n");
2481 #endif
2482 }
2483
2484 /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2485 *
2486 * There is no notion of Devloss or rport recovery from the current
2487 * nvme_transport perspective. Loss of an rport just means IO cannot
2488 * be sent and recovery is completely up to the initator.
2489 * For now, the driver just unbinds the DID and port_role so that
2490 * no further IO can be issued. Changes are planned for later.
2491 *
2492 * Notes - the ndlp reference count is not decremented here since
2493 * since there is no nvme_transport api for devloss. Node ref count
2494 * is only adjusted in driver unload.
2495 */
2496 void
lpfc_nvme_unregister_port(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)2497 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2498 {
2499 #if (IS_ENABLED(CONFIG_NVME_FC))
2500 int ret;
2501 struct nvme_fc_local_port *localport;
2502 struct lpfc_nvme_lport *lport;
2503 struct lpfc_nvme_rport *rport;
2504 struct nvme_fc_remote_port *remoteport = NULL;
2505
2506 localport = vport->localport;
2507
2508 /* This is fundamental error. The localport is always
2509 * available until driver unload. Just exit.
2510 */
2511 if (!localport)
2512 return;
2513
2514 lport = (struct lpfc_nvme_lport *)localport->private;
2515 if (!lport)
2516 goto input_err;
2517
2518 spin_lock_irq(&ndlp->lock);
2519 rport = lpfc_ndlp_get_nrport(ndlp);
2520 if (rport)
2521 remoteport = rport->remoteport;
2522 spin_unlock_irq(&ndlp->lock);
2523 if (!remoteport)
2524 goto input_err;
2525
2526 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2527 "6033 Unreg nvme remoteport x%px, portname x%llx, "
2528 "port_id x%06x, portstate x%x port type x%x "
2529 "refcnt %d\n",
2530 remoteport, remoteport->port_name,
2531 remoteport->port_id, remoteport->port_state,
2532 ndlp->nlp_type, kref_read(&ndlp->kref));
2533
2534 /* Sanity check ndlp type. Only call for NVME ports. Don't
2535 * clear any rport state until the transport calls back.
2536 */
2537
2538 if (ndlp->nlp_type & NLP_NVME_TARGET) {
2539 /* No concern about the role change on the nvme remoteport.
2540 * The transport will update it.
2541 */
2542 spin_lock_irq(&vport->phba->hbalock);
2543 ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT;
2544 spin_unlock_irq(&vport->phba->hbalock);
2545
2546 /* Don't let the host nvme transport keep sending keep-alives
2547 * on this remoteport. Vport is unloading, no recovery. The
2548 * return values is ignored. The upcall is a courtesy to the
2549 * transport.
2550 */
2551 if (vport->load_flag & FC_UNLOADING ||
2552 unlikely(vport->phba->hba_flag & HBA_PCI_ERR))
2553 (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2554
2555 ret = nvme_fc_unregister_remoteport(remoteport);
2556
2557 /* The driver no longer knows if the nrport memory is valid.
2558 * because the controller teardown process has begun and
2559 * is asynchronous. Break the binding in the ndlp. Also
2560 * remove the register ndlp reference to setup node release.
2561 */
2562 ndlp->nrport = NULL;
2563 lpfc_nlp_put(ndlp);
2564 if (ret != 0) {
2565 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2566 "6167 NVME unregister failed %d "
2567 "port_state x%x\n",
2568 ret, remoteport->port_state);
2569 }
2570 }
2571 return;
2572
2573 input_err:
2574 #endif
2575 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2576 "6168 State error: lport x%px, rport x%px FCID x%06x\n",
2577 vport->localport, ndlp->rport, ndlp->nlp_DID);
2578 }
2579
2580 /**
2581 * lpfc_sli4_nvme_pci_offline_aborted - Fast-path process of NVME xri abort
2582 * @phba: pointer to lpfc hba data structure.
2583 * @lpfc_ncmd: The nvme job structure for the request being aborted.
2584 *
2585 * This routine is invoked by the worker thread to process a SLI4 fast-path
2586 * NVME aborted xri. Aborted NVME IO commands are completed to the transport
2587 * here.
2588 **/
2589 void
lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_ncmd)2590 lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,
2591 struct lpfc_io_buf *lpfc_ncmd)
2592 {
2593 struct nvmefc_fcp_req *nvme_cmd = NULL;
2594
2595 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2596 "6533 %s nvme_cmd %p tag x%x abort complete and "
2597 "xri released\n", __func__,
2598 lpfc_ncmd->nvmeCmd,
2599 lpfc_ncmd->cur_iocbq.iotag);
2600
2601 /* Aborted NVME commands are required to not complete
2602 * before the abort exchange command fully completes.
2603 * Once completed, it is available via the put list.
2604 */
2605 if (lpfc_ncmd->nvmeCmd) {
2606 nvme_cmd = lpfc_ncmd->nvmeCmd;
2607 nvme_cmd->transferred_length = 0;
2608 nvme_cmd->rcv_rsplen = 0;
2609 nvme_cmd->status = NVME_SC_INTERNAL;
2610 nvme_cmd->done(nvme_cmd);
2611 lpfc_ncmd->nvmeCmd = NULL;
2612 }
2613 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2614 }
2615
2616 /**
2617 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2618 * @phba: pointer to lpfc hba data structure.
2619 * @axri: pointer to the fcp xri abort wcqe structure.
2620 * @lpfc_ncmd: The nvme job structure for the request being aborted.
2621 *
2622 * This routine is invoked by the worker thread to process a SLI4 fast-path
2623 * NVME aborted xri. Aborted NVME IO commands are completed to the transport
2624 * here.
2625 **/
2626 void
lpfc_sli4_nvme_xri_aborted(struct lpfc_hba * phba,struct sli4_wcqe_xri_aborted * axri,struct lpfc_io_buf * lpfc_ncmd)2627 lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2628 struct sli4_wcqe_xri_aborted *axri,
2629 struct lpfc_io_buf *lpfc_ncmd)
2630 {
2631 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2632 struct nvmefc_fcp_req *nvme_cmd = NULL;
2633 struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
2634
2635
2636 if (ndlp)
2637 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2638
2639 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2640 "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
2641 "xri released\n",
2642 lpfc_ncmd->nvmeCmd, xri,
2643 lpfc_ncmd->cur_iocbq.iotag);
2644
2645 /* Aborted NVME commands are required to not complete
2646 * before the abort exchange command fully completes.
2647 * Once completed, it is available via the put list.
2648 */
2649 if (lpfc_ncmd->nvmeCmd) {
2650 nvme_cmd = lpfc_ncmd->nvmeCmd;
2651 nvme_cmd->done(nvme_cmd);
2652 lpfc_ncmd->nvmeCmd = NULL;
2653 }
2654 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2655 }
2656
2657 /**
2658 * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
2659 * @phba: Pointer to HBA context object.
2660 *
2661 * This function flushes all wqes in the nvme rings and frees all resources
2662 * in the txcmplq. This function does not issue abort wqes for the IO
2663 * commands in txcmplq, they will just be returned with
2664 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2665 * slot has been permanently disabled.
2666 **/
2667 void
lpfc_nvme_wait_for_io_drain(struct lpfc_hba * phba)2668 lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2669 {
2670 struct lpfc_sli_ring *pring;
2671 u32 i, wait_cnt = 0;
2672
2673 if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
2674 return;
2675
2676 /* Cycle through all IO rings and make sure all outstanding
2677 * WQEs have been removed from the txcmplqs.
2678 */
2679 for (i = 0; i < phba->cfg_hdw_queue; i++) {
2680 if (!phba->sli4_hba.hdwq[i].io_wq)
2681 continue;
2682 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
2683
2684 if (!pring)
2685 continue;
2686
2687 /* Retrieve everything on the txcmplq */
2688 while (!list_empty(&pring->txcmplq)) {
2689 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2690 wait_cnt++;
2691
2692 /* The sleep is 10mS. Every ten seconds,
2693 * dump a message. Something is wrong.
2694 */
2695 if ((wait_cnt % 1000) == 0) {
2696 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2697 "6178 NVME IO not empty, "
2698 "cnt %d\n", wait_cnt);
2699 }
2700 }
2701 }
2702
2703 /* Make sure HBA is alive */
2704 lpfc_issue_hb_tmo(phba);
2705
2706 }
2707
2708 void
lpfc_nvme_cancel_iocb(struct lpfc_hba * phba,struct lpfc_iocbq * pwqeIn,uint32_t stat,uint32_t param)2709 lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
2710 uint32_t stat, uint32_t param)
2711 {
2712 #if (IS_ENABLED(CONFIG_NVME_FC))
2713 struct lpfc_io_buf *lpfc_ncmd;
2714 struct nvmefc_fcp_req *nCmd;
2715 struct lpfc_wcqe_complete wcqe;
2716 struct lpfc_wcqe_complete *wcqep = &wcqe;
2717
2718 lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
2719 if (!lpfc_ncmd) {
2720 lpfc_sli_release_iocbq(phba, pwqeIn);
2721 return;
2722 }
2723 /* For abort iocb just return, IO iocb will do a done call */
2724 if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
2725 CMD_ABORT_XRI_CX) {
2726 lpfc_sli_release_iocbq(phba, pwqeIn);
2727 return;
2728 }
2729
2730 spin_lock(&lpfc_ncmd->buf_lock);
2731 nCmd = lpfc_ncmd->nvmeCmd;
2732 if (!nCmd) {
2733 spin_unlock(&lpfc_ncmd->buf_lock);
2734 lpfc_release_nvme_buf(phba, lpfc_ncmd);
2735 return;
2736 }
2737 spin_unlock(&lpfc_ncmd->buf_lock);
2738
2739 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2740 "6194 NVME Cancel xri %x\n",
2741 lpfc_ncmd->cur_iocbq.sli4_xritag);
2742
2743 wcqep->word0 = 0;
2744 bf_set(lpfc_wcqe_c_status, wcqep, stat);
2745 wcqep->parameter = param;
2746 wcqep->word3 = 0; /* xb is 0 */
2747
2748 /* Call release with XB=1 to queue the IO into the abort list. */
2749 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
2750 bf_set(lpfc_wcqe_c_xb, wcqep, 1);
2751
2752 (pwqeIn->wqe_cmpl)(phba, pwqeIn, wcqep);
2753 #endif
2754 }
2755