1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38
39 #include "lpfc_version.h"
40 #include "lpfc_hw4.h"
41 #include "lpfc_hw.h"
42 #include "lpfc_sli.h"
43 #include "lpfc_sli4.h"
44 #include "lpfc_nl.h"
45 #include "lpfc_disc.h"
46 #include "lpfc.h"
47 #include "lpfc_scsi.h"
48 #include "lpfc_nvme.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52 #include "lpfc_debugfs.h"
53
54 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
55 struct lpfc_async_xchg_ctx *,
56 dma_addr_t rspbuf,
57 uint16_t rspsize);
58 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
59 struct lpfc_async_xchg_ctx *);
60 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
61 struct lpfc_async_xchg_ctx *,
62 uint32_t, uint16_t);
63 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
64 struct lpfc_async_xchg_ctx *,
65 uint32_t, uint16_t);
66 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
67 struct lpfc_async_xchg_ctx *);
68 static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
69
70 static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
71
72 static union lpfc_wqe128 lpfc_tsend_cmd_template;
73 static union lpfc_wqe128 lpfc_treceive_cmd_template;
74 static union lpfc_wqe128 lpfc_trsp_cmd_template;
75
76 /* Setup WQE templates for NVME IOs */
77 void
lpfc_nvmet_cmd_template(void)78 lpfc_nvmet_cmd_template(void)
79 {
80 union lpfc_wqe128 *wqe;
81
82 /* TSEND template */
83 wqe = &lpfc_tsend_cmd_template;
84 memset(wqe, 0, sizeof(union lpfc_wqe128));
85
86 /* Word 0, 1, 2 - BDE is variable */
87
88 /* Word 3 - payload_offset_len is zero */
89
90 /* Word 4 - relative_offset is variable */
91
92 /* Word 5 - is zero */
93
94 /* Word 6 - ctxt_tag, xri_tag is variable */
95
96 /* Word 7 - wqe_ar is variable */
97 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
98 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
99 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
100 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
101 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
102
103 /* Word 8 - abort_tag is variable */
104
105 /* Word 9 - reqtag, rcvoxid is variable */
106
107 /* Word 10 - wqes, xc is variable */
108 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
109 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
110 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
111 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
112 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
113 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
114
115 /* Word 11 - sup, irsp, irsplen is variable */
116 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
117 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
118 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
119 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
120 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
121 bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
122
123 /* Word 12 - fcp_data_len is variable */
124
125 /* Word 13, 14, 15 - PBDE is zero */
126
127 /* TRECEIVE template */
128 wqe = &lpfc_treceive_cmd_template;
129 memset(wqe, 0, sizeof(union lpfc_wqe128));
130
131 /* Word 0, 1, 2 - BDE is variable */
132
133 /* Word 3 */
134 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
135
136 /* Word 4 - relative_offset is variable */
137
138 /* Word 5 - is zero */
139
140 /* Word 6 - ctxt_tag, xri_tag is variable */
141
142 /* Word 7 */
143 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
144 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
145 bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
146 bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
147 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
148
149 /* Word 8 - abort_tag is variable */
150
151 /* Word 9 - reqtag, rcvoxid is variable */
152
153 /* Word 10 - xc is variable */
154 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
155 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
156 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
157 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
158 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
159 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
160
161 /* Word 11 - pbde is variable */
162 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
163 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
164 bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
165 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
166 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
167 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
168
169 /* Word 12 - fcp_data_len is variable */
170
171 /* Word 13, 14, 15 - PBDE is variable */
172
173 /* TRSP template */
174 wqe = &lpfc_trsp_cmd_template;
175 memset(wqe, 0, sizeof(union lpfc_wqe128));
176
177 /* Word 0, 1, 2 - BDE is variable */
178
179 /* Word 3 - response_len is variable */
180
181 /* Word 4, 5 - is zero */
182
183 /* Word 6 - ctxt_tag, xri_tag is variable */
184
185 /* Word 7 */
186 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
187 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
188 bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
189 bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
190 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
191
192 /* Word 8 - abort_tag is variable */
193
194 /* Word 9 - reqtag is variable */
195
196 /* Word 10 wqes, xc is variable */
197 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
198 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
199 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
200 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
201 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
202 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
203
204 /* Word 11 irsp, irsplen is variable */
205 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
206 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
207 bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
208 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
209 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
210 bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
211
212 /* Word 12, 13, 14, 15 - is zero */
213 }
214
215 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
216 static struct lpfc_async_xchg_ctx *
lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba * phba,u16 xri)217 lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
218 {
219 struct lpfc_async_xchg_ctx *ctxp;
220 unsigned long iflag;
221 bool found = false;
222
223 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
224 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
225 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
226 continue;
227
228 found = true;
229 break;
230 }
231 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
232 if (found)
233 return ctxp;
234
235 return NULL;
236 }
237
238 static struct lpfc_async_xchg_ctx *
lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba * phba,u16 oxid,u32 sid)239 lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
240 {
241 struct lpfc_async_xchg_ctx *ctxp;
242 unsigned long iflag;
243 bool found = false;
244
245 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
246 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
247 if (ctxp->oxid != oxid || ctxp->sid != sid)
248 continue;
249
250 found = true;
251 break;
252 }
253 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
254 if (found)
255 return ctxp;
256
257 return NULL;
258 }
259 #endif
260
261 static void
lpfc_nvmet_defer_release(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp)262 lpfc_nvmet_defer_release(struct lpfc_hba *phba,
263 struct lpfc_async_xchg_ctx *ctxp)
264 {
265 lockdep_assert_held(&ctxp->ctxlock);
266
267 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
268 "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
269 ctxp->oxid, ctxp->flag);
270
271 if (ctxp->flag & LPFC_NVME_CTX_RLS)
272 return;
273
274 ctxp->flag |= LPFC_NVME_CTX_RLS;
275 spin_lock(&phba->sli4_hba.t_active_list_lock);
276 list_del(&ctxp->list);
277 spin_unlock(&phba->sli4_hba.t_active_list_lock);
278 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
279 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
280 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
281 }
282
283 /**
284 * __lpfc_nvme_xmt_ls_rsp_cmp - Generic completion handler for the
285 * transmission of an NVME LS response.
286 * @phba: Pointer to HBA context object.
287 * @cmdwqe: Pointer to driver command WQE object.
288 * @wcqe: Pointer to driver response CQE object.
289 *
290 * The function is called from SLI ring event handler with no
291 * lock held. The function frees memory resources used for the command
292 * used to send the NVME LS RSP.
293 **/
294 void
__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe)295 __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
296 struct lpfc_wcqe_complete *wcqe)
297 {
298 struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2;
299 struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
300 uint32_t status, result;
301
302 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
303 result = wcqe->parameter;
304
305 if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) {
306 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
307 "6410 NVMEx LS cmpl state mismatch IO x%x: "
308 "%d %d\n",
309 axchg->oxid, axchg->state, axchg->entry_cnt);
310 }
311
312 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n",
313 axchg->oxid, status, result);
314
315 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
316 "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
317 status, result, axchg->oxid);
318
319 lpfc_nlp_put(cmdwqe->context1);
320 cmdwqe->context2 = NULL;
321 cmdwqe->context3 = NULL;
322 lpfc_sli_release_iocbq(phba, cmdwqe);
323 ls_rsp->done(ls_rsp);
324 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
325 "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n",
326 status, axchg->oxid);
327 kfree(axchg);
328 }
329
330 /**
331 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
332 * @phba: Pointer to HBA context object.
333 * @cmdwqe: Pointer to driver command WQE object.
334 * @wcqe: Pointer to driver response CQE object.
335 *
336 * The function is called from SLI ring event handler with no
337 * lock held. This function is the completion handler for NVME LS commands
338 * The function updates any states and statistics, then calls the
339 * generic completion handler to free resources.
340 **/
341 static void
lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe)342 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
343 struct lpfc_wcqe_complete *wcqe)
344 {
345 struct lpfc_nvmet_tgtport *tgtp;
346 uint32_t status, result;
347
348 if (!phba->targetport)
349 goto finish;
350
351 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
352 result = wcqe->parameter;
353
354 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
355 if (tgtp) {
356 if (status) {
357 atomic_inc(&tgtp->xmt_ls_rsp_error);
358 if (result == IOERR_ABORT_REQUESTED)
359 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
360 if (bf_get(lpfc_wcqe_c_xb, wcqe))
361 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
362 } else {
363 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
364 }
365 }
366
367 finish:
368 __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, wcqe);
369 }
370
371 /**
372 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
373 * @phba: HBA buffer is associated with
374 * @ctxp: context to clean up
375 * @mp: Buffer to free
376 *
377 * Description: Frees the given DMA buffer in the appropriate way given by
378 * reposting it to its associated RQ so it can be reused.
379 *
380 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
381 *
382 * Returns: None
383 **/
384 void
lpfc_nvmet_ctxbuf_post(struct lpfc_hba * phba,struct lpfc_nvmet_ctxbuf * ctx_buf)385 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
386 {
387 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
388 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
389 struct lpfc_nvmet_tgtport *tgtp;
390 struct fc_frame_header *fc_hdr;
391 struct rqb_dmabuf *nvmebuf;
392 struct lpfc_nvmet_ctx_info *infop;
393 uint32_t size, oxid, sid;
394 int cpu;
395 unsigned long iflag;
396
397 if (ctxp->state == LPFC_NVME_STE_FREE) {
398 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
399 "6411 NVMET free, already free IO x%x: %d %d\n",
400 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
401 }
402
403 if (ctxp->rqb_buffer) {
404 spin_lock_irqsave(&ctxp->ctxlock, iflag);
405 nvmebuf = ctxp->rqb_buffer;
406 /* check if freed in another path whilst acquiring lock */
407 if (nvmebuf) {
408 ctxp->rqb_buffer = NULL;
409 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
410 ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ;
411 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
412 nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
413 nvmebuf);
414 } else {
415 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
416 /* repost */
417 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
418 }
419 } else {
420 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
421 }
422 }
423 ctxp->state = LPFC_NVME_STE_FREE;
424
425 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
426 if (phba->sli4_hba.nvmet_io_wait_cnt) {
427 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
428 nvmebuf, struct rqb_dmabuf,
429 hbuf.list);
430 phba->sli4_hba.nvmet_io_wait_cnt--;
431 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
432 iflag);
433
434 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
435 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
436 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
437 size = nvmebuf->bytes_recv;
438 sid = sli4_sid_from_fc_hdr(fc_hdr);
439
440 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
441 ctxp->wqeq = NULL;
442 ctxp->offset = 0;
443 ctxp->phba = phba;
444 ctxp->size = size;
445 ctxp->oxid = oxid;
446 ctxp->sid = sid;
447 ctxp->state = LPFC_NVME_STE_RCV;
448 ctxp->entry_cnt = 1;
449 ctxp->flag = 0;
450 ctxp->ctxbuf = ctx_buf;
451 ctxp->rqb_buffer = (void *)nvmebuf;
452 spin_lock_init(&ctxp->ctxlock);
453
454 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
455 /* NOTE: isr time stamp is stale when context is re-assigned*/
456 if (ctxp->ts_isr_cmd) {
457 ctxp->ts_cmd_nvme = 0;
458 ctxp->ts_nvme_data = 0;
459 ctxp->ts_data_wqput = 0;
460 ctxp->ts_isr_data = 0;
461 ctxp->ts_data_nvme = 0;
462 ctxp->ts_nvme_status = 0;
463 ctxp->ts_status_wqput = 0;
464 ctxp->ts_isr_status = 0;
465 ctxp->ts_status_nvme = 0;
466 }
467 #endif
468 atomic_inc(&tgtp->rcv_fcp_cmd_in);
469
470 /* Indicate that a replacement buffer has been posted */
471 spin_lock_irqsave(&ctxp->ctxlock, iflag);
472 ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ;
473 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
474
475 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
476 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
477 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
478 "6181 Unable to queue deferred work "
479 "for oxid x%x. "
480 "FCP Drop IO [x%x x%x x%x]\n",
481 ctxp->oxid,
482 atomic_read(&tgtp->rcv_fcp_cmd_in),
483 atomic_read(&tgtp->rcv_fcp_cmd_out),
484 atomic_read(&tgtp->xmt_fcp_release));
485
486 spin_lock_irqsave(&ctxp->ctxlock, iflag);
487 lpfc_nvmet_defer_release(phba, ctxp);
488 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
489 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
490 }
491 return;
492 }
493 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
494
495 /*
496 * Use the CPU context list, from the MRQ the IO was received on
497 * (ctxp->idx), to save context structure.
498 */
499 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
500 list_del_init(&ctxp->list);
501 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
502 cpu = raw_smp_processor_id();
503 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
504 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
505 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
506 infop->nvmet_ctx_list_cnt++;
507 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
508 #endif
509 }
510
511 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
512 static void
lpfc_nvmet_ktime(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp)513 lpfc_nvmet_ktime(struct lpfc_hba *phba,
514 struct lpfc_async_xchg_ctx *ctxp)
515 {
516 uint64_t seg1, seg2, seg3, seg4, seg5;
517 uint64_t seg6, seg7, seg8, seg9, seg10;
518 uint64_t segsum;
519
520 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
521 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
522 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
523 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
524 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
525 return;
526
527 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
528 return;
529 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
530 return;
531 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
532 return;
533 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
534 return;
535 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
536 return;
537 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
538 return;
539 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
540 return;
541 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
542 return;
543 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
544 return;
545 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
546 return;
547 /*
548 * Segment 1 - Time from FCP command received by MSI-X ISR
549 * to FCP command is passed to NVME Layer.
550 * Segment 2 - Time from FCP command payload handed
551 * off to NVME Layer to Driver receives a Command op
552 * from NVME Layer.
553 * Segment 3 - Time from Driver receives a Command op
554 * from NVME Layer to Command is put on WQ.
555 * Segment 4 - Time from Driver WQ put is done
556 * to MSI-X ISR for Command cmpl.
557 * Segment 5 - Time from MSI-X ISR for Command cmpl to
558 * Command cmpl is passed to NVME Layer.
559 * Segment 6 - Time from Command cmpl is passed to NVME
560 * Layer to Driver receives a RSP op from NVME Layer.
561 * Segment 7 - Time from Driver receives a RSP op from
562 * NVME Layer to WQ put is done on TRSP FCP Status.
563 * Segment 8 - Time from Driver WQ put is done on TRSP
564 * FCP Status to MSI-X ISR for TRSP cmpl.
565 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
566 * TRSP cmpl is passed to NVME Layer.
567 * Segment 10 - Time from FCP command received by
568 * MSI-X ISR to command is completed on wire.
569 * (Segments 1 thru 8) for READDATA / WRITEDATA
570 * (Segments 1 thru 4) for READDATA_RSP
571 */
572 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
573 segsum = seg1;
574
575 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
576 if (segsum > seg2)
577 return;
578 seg2 -= segsum;
579 segsum += seg2;
580
581 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
582 if (segsum > seg3)
583 return;
584 seg3 -= segsum;
585 segsum += seg3;
586
587 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
588 if (segsum > seg4)
589 return;
590 seg4 -= segsum;
591 segsum += seg4;
592
593 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
594 if (segsum > seg5)
595 return;
596 seg5 -= segsum;
597 segsum += seg5;
598
599
600 /* For auto rsp commands seg6 thru seg10 will be 0 */
601 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
602 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
603 if (segsum > seg6)
604 return;
605 seg6 -= segsum;
606 segsum += seg6;
607
608 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
609 if (segsum > seg7)
610 return;
611 seg7 -= segsum;
612 segsum += seg7;
613
614 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
615 if (segsum > seg8)
616 return;
617 seg8 -= segsum;
618 segsum += seg8;
619
620 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
621 if (segsum > seg9)
622 return;
623 seg9 -= segsum;
624 segsum += seg9;
625
626 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
627 return;
628 seg10 = (ctxp->ts_isr_status -
629 ctxp->ts_isr_cmd);
630 } else {
631 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
632 return;
633 seg6 = 0;
634 seg7 = 0;
635 seg8 = 0;
636 seg9 = 0;
637 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
638 }
639
640 phba->ktime_seg1_total += seg1;
641 if (seg1 < phba->ktime_seg1_min)
642 phba->ktime_seg1_min = seg1;
643 else if (seg1 > phba->ktime_seg1_max)
644 phba->ktime_seg1_max = seg1;
645
646 phba->ktime_seg2_total += seg2;
647 if (seg2 < phba->ktime_seg2_min)
648 phba->ktime_seg2_min = seg2;
649 else if (seg2 > phba->ktime_seg2_max)
650 phba->ktime_seg2_max = seg2;
651
652 phba->ktime_seg3_total += seg3;
653 if (seg3 < phba->ktime_seg3_min)
654 phba->ktime_seg3_min = seg3;
655 else if (seg3 > phba->ktime_seg3_max)
656 phba->ktime_seg3_max = seg3;
657
658 phba->ktime_seg4_total += seg4;
659 if (seg4 < phba->ktime_seg4_min)
660 phba->ktime_seg4_min = seg4;
661 else if (seg4 > phba->ktime_seg4_max)
662 phba->ktime_seg4_max = seg4;
663
664 phba->ktime_seg5_total += seg5;
665 if (seg5 < phba->ktime_seg5_min)
666 phba->ktime_seg5_min = seg5;
667 else if (seg5 > phba->ktime_seg5_max)
668 phba->ktime_seg5_max = seg5;
669
670 phba->ktime_data_samples++;
671 if (!seg6)
672 goto out;
673
674 phba->ktime_seg6_total += seg6;
675 if (seg6 < phba->ktime_seg6_min)
676 phba->ktime_seg6_min = seg6;
677 else if (seg6 > phba->ktime_seg6_max)
678 phba->ktime_seg6_max = seg6;
679
680 phba->ktime_seg7_total += seg7;
681 if (seg7 < phba->ktime_seg7_min)
682 phba->ktime_seg7_min = seg7;
683 else if (seg7 > phba->ktime_seg7_max)
684 phba->ktime_seg7_max = seg7;
685
686 phba->ktime_seg8_total += seg8;
687 if (seg8 < phba->ktime_seg8_min)
688 phba->ktime_seg8_min = seg8;
689 else if (seg8 > phba->ktime_seg8_max)
690 phba->ktime_seg8_max = seg8;
691
692 phba->ktime_seg9_total += seg9;
693 if (seg9 < phba->ktime_seg9_min)
694 phba->ktime_seg9_min = seg9;
695 else if (seg9 > phba->ktime_seg9_max)
696 phba->ktime_seg9_max = seg9;
697 out:
698 phba->ktime_seg10_total += seg10;
699 if (seg10 < phba->ktime_seg10_min)
700 phba->ktime_seg10_min = seg10;
701 else if (seg10 > phba->ktime_seg10_max)
702 phba->ktime_seg10_max = seg10;
703 phba->ktime_status_samples++;
704 }
705 #endif
706
707 /**
708 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
709 * @phba: Pointer to HBA context object.
710 * @cmdwqe: Pointer to driver command WQE object.
711 * @wcqe: Pointer to driver response CQE object.
712 *
713 * The function is called from SLI ring event handler with no
714 * lock held. This function is the completion handler for NVME FCP commands
715 * The function frees memory resources used for the NVME commands.
716 **/
717 static void
lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe)718 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
719 struct lpfc_wcqe_complete *wcqe)
720 {
721 struct lpfc_nvmet_tgtport *tgtp;
722 struct nvmefc_tgt_fcp_req *rsp;
723 struct lpfc_async_xchg_ctx *ctxp;
724 uint32_t status, result, op, start_clean, logerr;
725 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
726 int id;
727 #endif
728
729 ctxp = cmdwqe->context2;
730 ctxp->flag &= ~LPFC_NVME_IO_INP;
731
732 rsp = &ctxp->hdlrctx.fcp_req;
733 op = rsp->op;
734
735 status = bf_get(lpfc_wcqe_c_status, wcqe);
736 result = wcqe->parameter;
737
738 if (phba->targetport)
739 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
740 else
741 tgtp = NULL;
742
743 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
744 ctxp->oxid, op, status);
745
746 if (status) {
747 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
748 rsp->transferred_length = 0;
749 if (tgtp) {
750 atomic_inc(&tgtp->xmt_fcp_rsp_error);
751 if (result == IOERR_ABORT_REQUESTED)
752 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
753 }
754
755 logerr = LOG_NVME_IOERR;
756
757 /* pick up SLI4 exhange busy condition */
758 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
759 ctxp->flag |= LPFC_NVME_XBUSY;
760 logerr |= LOG_NVME_ABTS;
761 if (tgtp)
762 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
763
764 } else {
765 ctxp->flag &= ~LPFC_NVME_XBUSY;
766 }
767
768 lpfc_printf_log(phba, KERN_INFO, logerr,
769 "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
770 "XBUSY:x%x\n",
771 ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
772 status, result, ctxp->flag);
773
774 } else {
775 rsp->fcp_error = NVME_SC_SUCCESS;
776 if (op == NVMET_FCOP_RSP)
777 rsp->transferred_length = rsp->rsplen;
778 else
779 rsp->transferred_length = rsp->transfer_length;
780 if (tgtp)
781 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
782 }
783
784 if ((op == NVMET_FCOP_READDATA_RSP) ||
785 (op == NVMET_FCOP_RSP)) {
786 /* Sanity check */
787 ctxp->state = LPFC_NVME_STE_DONE;
788 ctxp->entry_cnt++;
789
790 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
791 if (ctxp->ts_cmd_nvme) {
792 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
793 ctxp->ts_isr_data =
794 cmdwqe->isr_timestamp;
795 ctxp->ts_data_nvme =
796 ktime_get_ns();
797 ctxp->ts_nvme_status =
798 ctxp->ts_data_nvme;
799 ctxp->ts_status_wqput =
800 ctxp->ts_data_nvme;
801 ctxp->ts_isr_status =
802 ctxp->ts_data_nvme;
803 ctxp->ts_status_nvme =
804 ctxp->ts_data_nvme;
805 } else {
806 ctxp->ts_isr_status =
807 cmdwqe->isr_timestamp;
808 ctxp->ts_status_nvme =
809 ktime_get_ns();
810 }
811 }
812 #endif
813 rsp->done(rsp);
814 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
815 if (ctxp->ts_cmd_nvme)
816 lpfc_nvmet_ktime(phba, ctxp);
817 #endif
818 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
819 } else {
820 ctxp->entry_cnt++;
821 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
822 memset(((char *)cmdwqe) + start_clean, 0,
823 (sizeof(struct lpfc_iocbq) - start_clean));
824 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
825 if (ctxp->ts_cmd_nvme) {
826 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
827 ctxp->ts_data_nvme = ktime_get_ns();
828 }
829 #endif
830 rsp->done(rsp);
831 }
832 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
833 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
834 id = raw_smp_processor_id();
835 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
836 if (ctxp->cpu != id)
837 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
838 "6704 CPU Check cmdcmpl: "
839 "cpu %d expect %d\n",
840 id, ctxp->cpu);
841 }
842 #endif
843 }
844
845 /**
846 * __lpfc_nvme_xmt_ls_rsp - Generic service routine to issue transmit
847 * an NVME LS rsp for a prior NVME LS request that was received.
848 * @axchg: pointer to exchange context for the NVME LS request the response
849 * is for.
850 * @ls_rsp: pointer to the transport LS RSP that is to be sent
851 * @xmt_ls_rsp_cmp: completion routine to call upon RSP transmit done
852 *
853 * This routine is used to format and send a WQE to transmit a NVME LS
854 * Response. The response is for a prior NVME LS request that was
855 * received and posted to the transport.
856 *
857 * Returns:
858 * 0 : if response successfully transmit
859 * non-zero : if response failed to transmit, of the form -Exxx.
860 **/
861 int
__lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx * axchg,struct nvmefc_ls_rsp * ls_rsp,void (* xmt_ls_rsp_cmp)(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe))862 __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
863 struct nvmefc_ls_rsp *ls_rsp,
864 void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
865 struct lpfc_iocbq *cmdwqe,
866 struct lpfc_wcqe_complete *wcqe))
867 {
868 struct lpfc_hba *phba = axchg->phba;
869 struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
870 struct lpfc_iocbq *nvmewqeq;
871 struct lpfc_dmabuf dmabuf;
872 struct ulp_bde64 bpl;
873 int rc;
874
875 if (phba->pport->load_flag & FC_UNLOADING)
876 return -ENODEV;
877
878 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
879 "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid);
880
881 if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) {
882 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
883 "6412 NVMEx LS rsp state mismatch "
884 "oxid x%x: %d %d\n",
885 axchg->oxid, axchg->state, axchg->entry_cnt);
886 return -EALREADY;
887 }
888 axchg->state = LPFC_NVME_STE_LS_RSP;
889 axchg->entry_cnt++;
890
891 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma,
892 ls_rsp->rsplen);
893 if (nvmewqeq == NULL) {
894 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
895 "6150 NVMEx LS Drop Rsp x%x: Prep\n",
896 axchg->oxid);
897 rc = -ENOMEM;
898 goto out_free_buf;
899 }
900
901 /* Save numBdes for bpl2sgl */
902 nvmewqeq->rsvd2 = 1;
903 nvmewqeq->hba_wqidx = 0;
904 nvmewqeq->context3 = &dmabuf;
905 dmabuf.virt = &bpl;
906 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
907 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
908 bpl.tus.f.bdeSize = ls_rsp->rsplen;
909 bpl.tus.f.bdeFlags = 0;
910 bpl.tus.w = le32_to_cpu(bpl.tus.w);
911 /*
912 * Note: although we're using stack space for the dmabuf, the
913 * call to lpfc_sli4_issue_wqe is synchronous, so it will not
914 * be referenced after it returns back to this routine.
915 */
916
917 nvmewqeq->wqe_cmpl = xmt_ls_rsp_cmp;
918 nvmewqeq->iocb_cmpl = NULL;
919 nvmewqeq->context2 = axchg;
920
921 lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
922 axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
923
924 rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
925
926 /* clear to be sure there's no reference */
927 nvmewqeq->context3 = NULL;
928
929 if (rc == WQE_SUCCESS) {
930 /*
931 * Okay to repost buffer here, but wait till cmpl
932 * before freeing ctxp and iocbq.
933 */
934 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
935 return 0;
936 }
937
938 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
939 "6151 NVMEx LS RSP x%x: failed to transmit %d\n",
940 axchg->oxid, rc);
941
942 rc = -ENXIO;
943
944 lpfc_nlp_put(nvmewqeq->context1);
945
946 out_free_buf:
947 /* Give back resources */
948 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
949
950 /*
951 * As transport doesn't track completions of responses, if the rsp
952 * fails to send, the transport will effectively ignore the rsp
953 * and consider the LS done. However, the driver has an active
954 * exchange open for the LS - so be sure to abort the exchange
955 * if the response isn't sent.
956 */
957 lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid);
958 return rc;
959 }
960
961 /**
962 * lpfc_nvmet_xmt_ls_rsp - Transmit NVME LS response
963 * @tgtport: pointer to target port that NVME LS is to be transmit from.
964 * @ls_rsp: pointer to the transport LS RSP that is to be sent
965 *
966 * Driver registers this routine to transmit responses for received NVME
967 * LS requests.
968 *
969 * This routine is used to format and send a WQE to transmit a NVME LS
970 * Response. The ls_rsp is used to reverse-map the LS to the original
971 * NVME LS request sequence, which provides addressing information for
972 * the remote port the LS to be sent to, as well as the exchange id
973 * that is the LS is bound to.
974 *
975 * Returns:
976 * 0 : if response successfully transmit
977 * non-zero : if response failed to transmit, of the form -Exxx.
978 **/
979 static int
lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port * tgtport,struct nvmefc_ls_rsp * ls_rsp)980 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
981 struct nvmefc_ls_rsp *ls_rsp)
982 {
983 struct lpfc_async_xchg_ctx *axchg =
984 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
985 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
986 int rc;
987
988 if (axchg->phba->pport->load_flag & FC_UNLOADING)
989 return -ENODEV;
990
991 rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp);
992
993 if (rc) {
994 atomic_inc(&nvmep->xmt_ls_drop);
995 /*
996 * unless the failure is due to having already sent
997 * the response, an abort will be generated for the
998 * exchange if the rsp can't be sent.
999 */
1000 if (rc != -EALREADY)
1001 atomic_inc(&nvmep->xmt_ls_abort);
1002 return rc;
1003 }
1004
1005 atomic_inc(&nvmep->xmt_ls_rsp);
1006 return 0;
1007 }
1008
1009 static int
lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * rsp)1010 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
1011 struct nvmefc_tgt_fcp_req *rsp)
1012 {
1013 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1014 struct lpfc_async_xchg_ctx *ctxp =
1015 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1016 struct lpfc_hba *phba = ctxp->phba;
1017 struct lpfc_queue *wq;
1018 struct lpfc_iocbq *nvmewqeq;
1019 struct lpfc_sli_ring *pring;
1020 unsigned long iflags;
1021 int rc;
1022 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1023 int id;
1024 #endif
1025
1026 if (phba->pport->load_flag & FC_UNLOADING) {
1027 rc = -ENODEV;
1028 goto aerr;
1029 }
1030
1031 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1032 if (ctxp->ts_cmd_nvme) {
1033 if (rsp->op == NVMET_FCOP_RSP)
1034 ctxp->ts_nvme_status = ktime_get_ns();
1035 else
1036 ctxp->ts_nvme_data = ktime_get_ns();
1037 }
1038
1039 /* Setup the hdw queue if not already set */
1040 if (!ctxp->hdwq)
1041 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
1042
1043 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
1044 id = raw_smp_processor_id();
1045 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1046 if (rsp->hwqid != id)
1047 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1048 "6705 CPU Check OP: "
1049 "cpu %d expect %d\n",
1050 id, rsp->hwqid);
1051 ctxp->cpu = id; /* Setup cpu for cmpl check */
1052 }
1053 #endif
1054
1055 /* Sanity check */
1056 if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
1057 (ctxp->state == LPFC_NVME_STE_ABORT)) {
1058 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1059 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1060 "6102 IO oxid x%x aborted\n",
1061 ctxp->oxid);
1062 rc = -ENXIO;
1063 goto aerr;
1064 }
1065
1066 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
1067 if (nvmewqeq == NULL) {
1068 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1069 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1070 "6152 FCP Drop IO x%x: Prep\n",
1071 ctxp->oxid);
1072 rc = -ENXIO;
1073 goto aerr;
1074 }
1075
1076 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
1077 nvmewqeq->iocb_cmpl = NULL;
1078 nvmewqeq->context2 = ctxp;
1079 nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
1080 ctxp->wqeq->hba_wqidx = rsp->hwqid;
1081
1082 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
1083 ctxp->oxid, rsp->op, rsp->rsplen);
1084
1085 ctxp->flag |= LPFC_NVME_IO_INP;
1086 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1087 if (rc == WQE_SUCCESS) {
1088 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1089 if (!ctxp->ts_cmd_nvme)
1090 return 0;
1091 if (rsp->op == NVMET_FCOP_RSP)
1092 ctxp->ts_status_wqput = ktime_get_ns();
1093 else
1094 ctxp->ts_data_wqput = ktime_get_ns();
1095 #endif
1096 return 0;
1097 }
1098
1099 if (rc == -EBUSY) {
1100 /*
1101 * WQ was full, so queue nvmewqeq to be sent after
1102 * WQE release CQE
1103 */
1104 ctxp->flag |= LPFC_NVME_DEFER_WQFULL;
1105 wq = ctxp->hdwq->io_wq;
1106 pring = wq->pring;
1107 spin_lock_irqsave(&pring->ring_lock, iflags);
1108 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
1109 wq->q_flag |= HBA_NVMET_WQFULL;
1110 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1111 atomic_inc(&lpfc_nvmep->defer_wqfull);
1112 return 0;
1113 }
1114
1115 /* Give back resources */
1116 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1117 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1118 "6153 FCP Drop IO x%x: Issue: %d\n",
1119 ctxp->oxid, rc);
1120
1121 ctxp->wqeq->hba_wqidx = 0;
1122 nvmewqeq->context2 = NULL;
1123 nvmewqeq->context3 = NULL;
1124 rc = -EBUSY;
1125 aerr:
1126 return rc;
1127 }
1128
1129 static void
lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port * targetport)1130 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1131 {
1132 struct lpfc_nvmet_tgtport *tport = targetport->private;
1133
1134 /* release any threads waiting for the unreg to complete */
1135 if (tport->phba->targetport)
1136 complete(tport->tport_unreg_cmp);
1137 }
1138
1139 static void
lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * req)1140 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1141 struct nvmefc_tgt_fcp_req *req)
1142 {
1143 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1144 struct lpfc_async_xchg_ctx *ctxp =
1145 container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1146 struct lpfc_hba *phba = ctxp->phba;
1147 struct lpfc_queue *wq;
1148 unsigned long flags;
1149
1150 if (phba->pport->load_flag & FC_UNLOADING)
1151 return;
1152
1153 if (!ctxp->hdwq)
1154 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1155
1156 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1157 "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1158 ctxp->oxid, ctxp->flag, ctxp->state);
1159
1160 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1161 ctxp->oxid, ctxp->flag, ctxp->state);
1162
1163 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1164
1165 spin_lock_irqsave(&ctxp->ctxlock, flags);
1166
1167 /* Since iaab/iaar are NOT set, we need to check
1168 * if the firmware is in process of aborting IO
1169 */
1170 if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) {
1171 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1172 return;
1173 }
1174 ctxp->flag |= LPFC_NVME_ABORT_OP;
1175
1176 if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) {
1177 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1178 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1179 ctxp->oxid);
1180 wq = ctxp->hdwq->io_wq;
1181 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1182 return;
1183 }
1184 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1185
1186 /* A state of LPFC_NVME_STE_RCV means we have just received
1187 * the NVME command and have not started processing it.
1188 * (by issuing any IO WQEs on this exchange yet)
1189 */
1190 if (ctxp->state == LPFC_NVME_STE_RCV)
1191 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1192 ctxp->oxid);
1193 else
1194 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1195 ctxp->oxid);
1196 }
1197
1198 static void
lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * rsp)1199 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1200 struct nvmefc_tgt_fcp_req *rsp)
1201 {
1202 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1203 struct lpfc_async_xchg_ctx *ctxp =
1204 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1205 struct lpfc_hba *phba = ctxp->phba;
1206 unsigned long flags;
1207 bool aborting = false;
1208
1209 spin_lock_irqsave(&ctxp->ctxlock, flags);
1210 if (ctxp->flag & LPFC_NVME_XBUSY)
1211 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1212 "6027 NVMET release with XBUSY flag x%x"
1213 " oxid x%x\n",
1214 ctxp->flag, ctxp->oxid);
1215 else if (ctxp->state != LPFC_NVME_STE_DONE &&
1216 ctxp->state != LPFC_NVME_STE_ABORT)
1217 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1218 "6413 NVMET release bad state %d %d oxid x%x\n",
1219 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1220
1221 if ((ctxp->flag & LPFC_NVME_ABORT_OP) ||
1222 (ctxp->flag & LPFC_NVME_XBUSY)) {
1223 aborting = true;
1224 /* let the abort path do the real release */
1225 lpfc_nvmet_defer_release(phba, ctxp);
1226 }
1227 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1228
1229 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1230 ctxp->state, aborting);
1231
1232 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1233 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
1234
1235 if (aborting)
1236 return;
1237
1238 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1239 }
1240
1241 static void
lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * rsp)1242 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1243 struct nvmefc_tgt_fcp_req *rsp)
1244 {
1245 struct lpfc_nvmet_tgtport *tgtp;
1246 struct lpfc_async_xchg_ctx *ctxp =
1247 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1248 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1249 struct lpfc_hba *phba = ctxp->phba;
1250 unsigned long iflag;
1251
1252
1253 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1254 ctxp->oxid, ctxp->size, raw_smp_processor_id());
1255
1256 if (!nvmebuf) {
1257 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1258 "6425 Defer rcv: no buffer oxid x%x: "
1259 "flg %x ste %x\n",
1260 ctxp->oxid, ctxp->flag, ctxp->state);
1261 return;
1262 }
1263
1264 tgtp = phba->targetport->private;
1265 if (tgtp)
1266 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1267
1268 /* Free the nvmebuf since a new buffer already replaced it */
1269 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1270 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1271 ctxp->rqb_buffer = NULL;
1272 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1273 }
1274
1275 /**
1276 * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request
1277 * @phba: Pointer to HBA context object
1278 * @cmdwqe: Pointer to driver command WQE object.
1279 * @wcqe: Pointer to driver response CQE object.
1280 *
1281 * This function is the completion handler for NVME LS requests.
1282 * The function updates any states and statistics, then calls the
1283 * generic completion handler to finish completion of the request.
1284 **/
1285 static void
lpfc_nvmet_ls_req_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe)1286 lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1287 struct lpfc_wcqe_complete *wcqe)
1288 {
1289 __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
1290 }
1291
1292 /**
1293 * lpfc_nvmet_ls_req - Issue an Link Service request
1294 * @targetport - pointer to target instance registered with nvmet transport.
1295 * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv.
1296 * Driver sets this value to the ndlp pointer.
1297 * @pnvme_lsreq - the transport nvme_ls_req structure for the LS
1298 *
1299 * Driver registers this routine to handle any link service request
1300 * from the nvme_fc transport to a remote nvme-aware port.
1301 *
1302 * Return value :
1303 * 0 - Success
1304 * non-zero: various error codes, in form of -Exxx
1305 **/
1306 static int
lpfc_nvmet_ls_req(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * pnvme_lsreq)1307 lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
1308 void *hosthandle,
1309 struct nvmefc_ls_req *pnvme_lsreq)
1310 {
1311 struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1312 struct lpfc_hba *phba;
1313 struct lpfc_nodelist *ndlp;
1314 int ret;
1315 u32 hstate;
1316
1317 if (!lpfc_nvmet)
1318 return -EINVAL;
1319
1320 phba = lpfc_nvmet->phba;
1321 if (phba->pport->load_flag & FC_UNLOADING)
1322 return -EINVAL;
1323
1324 hstate = atomic_read(&lpfc_nvmet->state);
1325 if (hstate == LPFC_NVMET_INV_HOST_ACTIVE)
1326 return -EACCES;
1327
1328 ndlp = (struct lpfc_nodelist *)hosthandle;
1329
1330 ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq,
1331 lpfc_nvmet_ls_req_cmp);
1332
1333 return ret;
1334 }
1335
1336 /**
1337 * lpfc_nvmet_ls_abort - Abort a prior NVME LS request
1338 * @targetport: Transport targetport, that LS was issued from.
1339 * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv.
1340 * Driver sets this value to the ndlp pointer.
1341 * @pnvme_lsreq - the transport nvme_ls_req structure for LS to be aborted
1342 *
1343 * Driver registers this routine to abort an NVME LS request that is
1344 * in progress (from the transports perspective).
1345 **/
1346 static void
lpfc_nvmet_ls_abort(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * pnvme_lsreq)1347 lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
1348 void *hosthandle,
1349 struct nvmefc_ls_req *pnvme_lsreq)
1350 {
1351 struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1352 struct lpfc_hba *phba;
1353 struct lpfc_nodelist *ndlp;
1354 int ret;
1355
1356 phba = lpfc_nvmet->phba;
1357 if (phba->pport->load_flag & FC_UNLOADING)
1358 return;
1359
1360 ndlp = (struct lpfc_nodelist *)hosthandle;
1361
1362 ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq);
1363 if (!ret)
1364 atomic_inc(&lpfc_nvmet->xmt_ls_abort);
1365 }
1366
1367 static void
lpfc_nvmet_host_release(void * hosthandle)1368 lpfc_nvmet_host_release(void *hosthandle)
1369 {
1370 struct lpfc_nodelist *ndlp = hosthandle;
1371 struct lpfc_hba *phba = NULL;
1372 struct lpfc_nvmet_tgtport *tgtp;
1373
1374 phba = ndlp->phba;
1375 if (!phba->targetport || !phba->targetport->private)
1376 return;
1377
1378 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1379 "6202 NVMET XPT releasing hosthandle x%px\n",
1380 hosthandle);
1381 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1382 atomic_set(&tgtp->state, 0);
1383 }
1384
1385 static void
lpfc_nvmet_discovery_event(struct nvmet_fc_target_port * tgtport)1386 lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
1387 {
1388 struct lpfc_nvmet_tgtport *tgtp;
1389 struct lpfc_hba *phba;
1390 uint32_t rc;
1391
1392 tgtp = tgtport->private;
1393 phba = tgtp->phba;
1394
1395 rc = lpfc_issue_els_rscn(phba->pport, 0);
1396 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1397 "6420 NVMET subsystem change: Notification %s\n",
1398 (rc) ? "Failed" : "Sent");
1399 }
1400
1401 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1402 .targetport_delete = lpfc_nvmet_targetport_delete,
1403 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
1404 .fcp_op = lpfc_nvmet_xmt_fcp_op,
1405 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
1406 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1407 .defer_rcv = lpfc_nvmet_defer_rcv,
1408 .discovery_event = lpfc_nvmet_discovery_event,
1409 .ls_req = lpfc_nvmet_ls_req,
1410 .ls_abort = lpfc_nvmet_ls_abort,
1411 .host_release = lpfc_nvmet_host_release,
1412
1413 .max_hw_queues = 1,
1414 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1415 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1416 .dma_boundary = 0xFFFFFFFF,
1417
1418 /* optional features */
1419 .target_features = 0,
1420 /* sizes of additional private data for data structures */
1421 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1422 .lsrqst_priv_sz = 0,
1423 };
1424
1425 static void
__lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba * phba,struct lpfc_nvmet_ctx_info * infop)1426 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1427 struct lpfc_nvmet_ctx_info *infop)
1428 {
1429 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1430 unsigned long flags;
1431
1432 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1433 list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1434 &infop->nvmet_ctx_list, list) {
1435 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1436 list_del_init(&ctx_buf->list);
1437 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1438
1439 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1440 ctx_buf->sglq->state = SGL_FREED;
1441 ctx_buf->sglq->ndlp = NULL;
1442
1443 spin_lock(&phba->sli4_hba.sgl_list_lock);
1444 list_add_tail(&ctx_buf->sglq->list,
1445 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1446 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1447
1448 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1449 kfree(ctx_buf->context);
1450 }
1451 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1452 }
1453
1454 static void
lpfc_nvmet_cleanup_io_context(struct lpfc_hba * phba)1455 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1456 {
1457 struct lpfc_nvmet_ctx_info *infop;
1458 int i, j;
1459
1460 /* The first context list, MRQ 0 CPU 0 */
1461 infop = phba->sli4_hba.nvmet_ctx_info;
1462 if (!infop)
1463 return;
1464
1465 /* Cycle the the entire CPU context list for every MRQ */
1466 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1467 for_each_present_cpu(j) {
1468 infop = lpfc_get_ctx_list(phba, j, i);
1469 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1470 }
1471 }
1472 kfree(phba->sli4_hba.nvmet_ctx_info);
1473 phba->sli4_hba.nvmet_ctx_info = NULL;
1474 }
1475
1476 static int
lpfc_nvmet_setup_io_context(struct lpfc_hba * phba)1477 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1478 {
1479 struct lpfc_nvmet_ctxbuf *ctx_buf;
1480 struct lpfc_iocbq *nvmewqe;
1481 union lpfc_wqe128 *wqe;
1482 struct lpfc_nvmet_ctx_info *last_infop;
1483 struct lpfc_nvmet_ctx_info *infop;
1484 int i, j, idx, cpu;
1485
1486 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1487 "6403 Allocate NVMET resources for %d XRIs\n",
1488 phba->sli4_hba.nvmet_xri_cnt);
1489
1490 phba->sli4_hba.nvmet_ctx_info = kcalloc(
1491 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1492 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1493 if (!phba->sli4_hba.nvmet_ctx_info) {
1494 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1495 "6419 Failed allocate memory for "
1496 "nvmet context lists\n");
1497 return -ENOMEM;
1498 }
1499
1500 /*
1501 * Assuming X CPUs in the system, and Y MRQs, allocate some
1502 * lpfc_nvmet_ctx_info structures as follows:
1503 *
1504 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1505 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1506 * ...
1507 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1508 *
1509 * Each line represents a MRQ "silo" containing an entry for
1510 * every CPU.
1511 *
1512 * MRQ X is initially assumed to be associated with CPU X, thus
1513 * contexts are initially distributed across all MRQs using
1514 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1515 * freed, the are freed to the MRQ silo based on the CPU number
1516 * of the IO completion. Thus a context that was allocated for MRQ A
1517 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1518 */
1519 for_each_possible_cpu(i) {
1520 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1521 infop = lpfc_get_ctx_list(phba, i, j);
1522 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1523 spin_lock_init(&infop->nvmet_ctx_list_lock);
1524 infop->nvmet_ctx_list_cnt = 0;
1525 }
1526 }
1527
1528 /*
1529 * Setup the next CPU context info ptr for each MRQ.
1530 * MRQ 0 will cycle thru CPUs 0 - X separately from
1531 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1532 */
1533 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1534 last_infop = lpfc_get_ctx_list(phba,
1535 cpumask_first(cpu_present_mask),
1536 j);
1537 for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) {
1538 infop = lpfc_get_ctx_list(phba, i, j);
1539 infop->nvmet_ctx_next_cpu = last_infop;
1540 last_infop = infop;
1541 }
1542 }
1543
1544 /* For all nvmet xris, allocate resources needed to process a
1545 * received command on a per xri basis.
1546 */
1547 idx = 0;
1548 cpu = cpumask_first(cpu_present_mask);
1549 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1550 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1551 if (!ctx_buf) {
1552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1553 "6404 Ran out of memory for NVMET\n");
1554 return -ENOMEM;
1555 }
1556
1557 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1558 GFP_KERNEL);
1559 if (!ctx_buf->context) {
1560 kfree(ctx_buf);
1561 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1562 "6405 Ran out of NVMET "
1563 "context memory\n");
1564 return -ENOMEM;
1565 }
1566 ctx_buf->context->ctxbuf = ctx_buf;
1567 ctx_buf->context->state = LPFC_NVME_STE_FREE;
1568
1569 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1570 if (!ctx_buf->iocbq) {
1571 kfree(ctx_buf->context);
1572 kfree(ctx_buf);
1573 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1574 "6406 Ran out of NVMET iocb/WQEs\n");
1575 return -ENOMEM;
1576 }
1577 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1578 nvmewqe = ctx_buf->iocbq;
1579 wqe = &nvmewqe->wqe;
1580
1581 /* Initialize WQE */
1582 memset(wqe, 0, sizeof(union lpfc_wqe));
1583
1584 ctx_buf->iocbq->context1 = NULL;
1585 spin_lock(&phba->sli4_hba.sgl_list_lock);
1586 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1587 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1588 if (!ctx_buf->sglq) {
1589 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1590 kfree(ctx_buf->context);
1591 kfree(ctx_buf);
1592 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1593 "6407 Ran out of NVMET XRIs\n");
1594 return -ENOMEM;
1595 }
1596 INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1597
1598 /*
1599 * Add ctx to MRQidx context list. Our initial assumption
1600 * is MRQidx will be associated with CPUidx. This association
1601 * can change on the fly.
1602 */
1603 infop = lpfc_get_ctx_list(phba, cpu, idx);
1604 spin_lock(&infop->nvmet_ctx_list_lock);
1605 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1606 infop->nvmet_ctx_list_cnt++;
1607 spin_unlock(&infop->nvmet_ctx_list_lock);
1608
1609 /* Spread ctx structures evenly across all MRQs */
1610 idx++;
1611 if (idx >= phba->cfg_nvmet_mrq) {
1612 idx = 0;
1613 cpu = cpumask_first(cpu_present_mask);
1614 continue;
1615 }
1616 cpu = cpumask_next(cpu, cpu_present_mask);
1617 if (cpu == nr_cpu_ids)
1618 cpu = cpumask_first(cpu_present_mask);
1619
1620 }
1621
1622 for_each_present_cpu(i) {
1623 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1624 infop = lpfc_get_ctx_list(phba, i, j);
1625 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1626 "6408 TOTAL NVMET ctx for CPU %d "
1627 "MRQ %d: cnt %d nextcpu x%px\n",
1628 i, j, infop->nvmet_ctx_list_cnt,
1629 infop->nvmet_ctx_next_cpu);
1630 }
1631 }
1632 return 0;
1633 }
1634
1635 int
lpfc_nvmet_create_targetport(struct lpfc_hba * phba)1636 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1637 {
1638 struct lpfc_vport *vport = phba->pport;
1639 struct lpfc_nvmet_tgtport *tgtp;
1640 struct nvmet_fc_port_info pinfo;
1641 int error;
1642
1643 if (phba->targetport)
1644 return 0;
1645
1646 error = lpfc_nvmet_setup_io_context(phba);
1647 if (error)
1648 return error;
1649
1650 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1651 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1652 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1653 pinfo.port_id = vport->fc_myDID;
1654
1655 /* We need to tell the transport layer + 1 because it takes page
1656 * alignment into account. When space for the SGL is allocated we
1657 * allocate + 3, one for cmd, one for rsp and one for this alignment
1658 */
1659 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1660 lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1661 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1662
1663 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1664 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1665 &phba->pcidev->dev,
1666 &phba->targetport);
1667 #else
1668 error = -ENOENT;
1669 #endif
1670 if (error) {
1671 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1672 "6025 Cannot register NVME targetport x%x: "
1673 "portnm %llx nodenm %llx segs %d qs %d\n",
1674 error,
1675 pinfo.port_name, pinfo.node_name,
1676 lpfc_tgttemplate.max_sgl_segments,
1677 lpfc_tgttemplate.max_hw_queues);
1678 phba->targetport = NULL;
1679 phba->nvmet_support = 0;
1680
1681 lpfc_nvmet_cleanup_io_context(phba);
1682
1683 } else {
1684 tgtp = (struct lpfc_nvmet_tgtport *)
1685 phba->targetport->private;
1686 tgtp->phba = phba;
1687
1688 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1689 "6026 Registered NVME "
1690 "targetport: x%px, private x%px "
1691 "portnm %llx nodenm %llx segs %d qs %d\n",
1692 phba->targetport, tgtp,
1693 pinfo.port_name, pinfo.node_name,
1694 lpfc_tgttemplate.max_sgl_segments,
1695 lpfc_tgttemplate.max_hw_queues);
1696
1697 atomic_set(&tgtp->rcv_ls_req_in, 0);
1698 atomic_set(&tgtp->rcv_ls_req_out, 0);
1699 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1700 atomic_set(&tgtp->xmt_ls_abort, 0);
1701 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1702 atomic_set(&tgtp->xmt_ls_rsp, 0);
1703 atomic_set(&tgtp->xmt_ls_drop, 0);
1704 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1705 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1706 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1707 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1708 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1709 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1710 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1711 atomic_set(&tgtp->xmt_fcp_drop, 0);
1712 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1713 atomic_set(&tgtp->xmt_fcp_read, 0);
1714 atomic_set(&tgtp->xmt_fcp_write, 0);
1715 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1716 atomic_set(&tgtp->xmt_fcp_release, 0);
1717 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1718 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1719 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1720 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1721 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1722 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1723 atomic_set(&tgtp->xmt_fcp_abort, 0);
1724 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1725 atomic_set(&tgtp->xmt_abort_unsol, 0);
1726 atomic_set(&tgtp->xmt_abort_sol, 0);
1727 atomic_set(&tgtp->xmt_abort_rsp, 0);
1728 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1729 atomic_set(&tgtp->defer_ctx, 0);
1730 atomic_set(&tgtp->defer_fod, 0);
1731 atomic_set(&tgtp->defer_wqfull, 0);
1732 }
1733 return error;
1734 }
1735
1736 int
lpfc_nvmet_update_targetport(struct lpfc_hba * phba)1737 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1738 {
1739 struct lpfc_vport *vport = phba->pport;
1740
1741 if (!phba->targetport)
1742 return 0;
1743
1744 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1745 "6007 Update NVMET port x%px did x%x\n",
1746 phba->targetport, vport->fc_myDID);
1747
1748 phba->targetport->port_id = vport->fc_myDID;
1749 return 0;
1750 }
1751
1752 /**
1753 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1754 * @phba: pointer to lpfc hba data structure.
1755 * @axri: pointer to the nvmet xri abort wcqe structure.
1756 *
1757 * This routine is invoked by the worker thread to process a SLI4 fast-path
1758 * NVMET aborted xri.
1759 **/
1760 void
lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba * phba,struct sli4_wcqe_xri_aborted * axri)1761 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1762 struct sli4_wcqe_xri_aborted *axri)
1763 {
1764 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1765 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1766 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1767 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1768 struct lpfc_nvmet_tgtport *tgtp;
1769 struct nvmefc_tgt_fcp_req *req = NULL;
1770 struct lpfc_nodelist *ndlp;
1771 unsigned long iflag = 0;
1772 int rrq_empty = 0;
1773 bool released = false;
1774
1775 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1776 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1777
1778 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1779 return;
1780
1781 if (phba->targetport) {
1782 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1783 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1784 }
1785
1786 spin_lock_irqsave(&phba->hbalock, iflag);
1787 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1788 list_for_each_entry_safe(ctxp, next_ctxp,
1789 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1790 list) {
1791 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1792 continue;
1793
1794 spin_lock(&ctxp->ctxlock);
1795 /* Check if we already received a free context call
1796 * and we have completed processing an abort situation.
1797 */
1798 if (ctxp->flag & LPFC_NVME_CTX_RLS &&
1799 !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
1800 list_del_init(&ctxp->list);
1801 released = true;
1802 }
1803 ctxp->flag &= ~LPFC_NVME_XBUSY;
1804 spin_unlock(&ctxp->ctxlock);
1805 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1806
1807 rrq_empty = list_empty(&phba->active_rrq_list);
1808 spin_unlock_irqrestore(&phba->hbalock, iflag);
1809 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1810 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1811 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1812 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1813 lpfc_set_rrq_active(phba, ndlp,
1814 ctxp->ctxbuf->sglq->sli4_lxritag,
1815 rxid, 1);
1816 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1817 }
1818
1819 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1820 "6318 XB aborted oxid x%x flg x%x (%x)\n",
1821 ctxp->oxid, ctxp->flag, released);
1822 if (released)
1823 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1824
1825 if (rrq_empty)
1826 lpfc_worker_wake_up(phba);
1827 return;
1828 }
1829 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1830 spin_unlock_irqrestore(&phba->hbalock, iflag);
1831
1832 ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1833 if (ctxp) {
1834 /*
1835 * Abort already done by FW, so BA_ACC sent.
1836 * However, the transport may be unaware.
1837 */
1838 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1839 "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1840 "flag x%x oxid x%x rxid x%x\n",
1841 xri, ctxp->state, ctxp->flag, ctxp->oxid,
1842 rxid);
1843
1844 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1845 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1846 ctxp->state = LPFC_NVME_STE_ABORT;
1847 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1848
1849 lpfc_nvmeio_data(phba,
1850 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1851 xri, raw_smp_processor_id(), 0);
1852
1853 req = &ctxp->hdlrctx.fcp_req;
1854 if (req)
1855 nvmet_fc_rcv_fcp_abort(phba->targetport, req);
1856 }
1857 #endif
1858 }
1859
1860 int
lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport * vport,struct fc_frame_header * fc_hdr)1861 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1862 struct fc_frame_header *fc_hdr)
1863 {
1864 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1865 struct lpfc_hba *phba = vport->phba;
1866 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1867 struct nvmefc_tgt_fcp_req *rsp;
1868 uint32_t sid;
1869 uint16_t oxid, xri;
1870 unsigned long iflag = 0;
1871
1872 sid = sli4_sid_from_fc_hdr(fc_hdr);
1873 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1874
1875 spin_lock_irqsave(&phba->hbalock, iflag);
1876 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1877 list_for_each_entry_safe(ctxp, next_ctxp,
1878 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1879 list) {
1880 if (ctxp->oxid != oxid || ctxp->sid != sid)
1881 continue;
1882
1883 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1884
1885 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1886 spin_unlock_irqrestore(&phba->hbalock, iflag);
1887
1888 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1889 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1890 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1891
1892 lpfc_nvmeio_data(phba,
1893 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1894 xri, raw_smp_processor_id(), 0);
1895
1896 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1897 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1898
1899 rsp = &ctxp->hdlrctx.fcp_req;
1900 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1901
1902 /* Respond with BA_ACC accordingly */
1903 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1904 return 0;
1905 }
1906 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1907 spin_unlock_irqrestore(&phba->hbalock, iflag);
1908
1909 /* check the wait list */
1910 if (phba->sli4_hba.nvmet_io_wait_cnt) {
1911 struct rqb_dmabuf *nvmebuf;
1912 struct fc_frame_header *fc_hdr_tmp;
1913 u32 sid_tmp;
1914 u16 oxid_tmp;
1915 bool found = false;
1916
1917 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1918
1919 /* match by oxid and s_id */
1920 list_for_each_entry(nvmebuf,
1921 &phba->sli4_hba.lpfc_nvmet_io_wait_list,
1922 hbuf.list) {
1923 fc_hdr_tmp = (struct fc_frame_header *)
1924 (nvmebuf->hbuf.virt);
1925 oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
1926 sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
1927 if (oxid_tmp != oxid || sid_tmp != sid)
1928 continue;
1929
1930 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1931 "6321 NVMET Rcv ABTS oxid x%x from x%x "
1932 "is waiting for a ctxp\n",
1933 oxid, sid);
1934
1935 list_del_init(&nvmebuf->hbuf.list);
1936 phba->sli4_hba.nvmet_io_wait_cnt--;
1937 found = true;
1938 break;
1939 }
1940 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1941 iflag);
1942
1943 /* free buffer since already posted a new DMA buffer to RQ */
1944 if (found) {
1945 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1946 /* Respond with BA_ACC accordingly */
1947 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1948 return 0;
1949 }
1950 }
1951
1952 /* check active list */
1953 ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1954 if (ctxp) {
1955 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1956
1957 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1958 ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP);
1959 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1960
1961 lpfc_nvmeio_data(phba,
1962 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1963 xri, raw_smp_processor_id(), 0);
1964
1965 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1966 "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1967 "flag x%x state x%x\n",
1968 ctxp->oxid, xri, ctxp->flag, ctxp->state);
1969
1970 if (ctxp->flag & LPFC_NVME_TNOTIFY) {
1971 /* Notify the transport */
1972 nvmet_fc_rcv_fcp_abort(phba->targetport,
1973 &ctxp->hdlrctx.fcp_req);
1974 } else {
1975 cancel_work_sync(&ctxp->ctxbuf->defer_work);
1976 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1977 lpfc_nvmet_defer_release(phba, ctxp);
1978 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1979 }
1980 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1981 ctxp->oxid);
1982
1983 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1984 return 0;
1985 }
1986
1987 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1988 oxid, raw_smp_processor_id(), 1);
1989
1990 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1991 "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
1992
1993 /* Respond with BA_RJT accordingly */
1994 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1995 #endif
1996 return 0;
1997 }
1998
1999 static void
lpfc_nvmet_wqfull_flush(struct lpfc_hba * phba,struct lpfc_queue * wq,struct lpfc_async_xchg_ctx * ctxp)2000 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
2001 struct lpfc_async_xchg_ctx *ctxp)
2002 {
2003 struct lpfc_sli_ring *pring;
2004 struct lpfc_iocbq *nvmewqeq;
2005 struct lpfc_iocbq *next_nvmewqeq;
2006 unsigned long iflags;
2007 struct lpfc_wcqe_complete wcqe;
2008 struct lpfc_wcqe_complete *wcqep;
2009
2010 pring = wq->pring;
2011 wcqep = &wcqe;
2012
2013 /* Fake an ABORT error code back to cmpl routine */
2014 memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
2015 bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
2016 wcqep->parameter = IOERR_ABORT_REQUESTED;
2017
2018 spin_lock_irqsave(&pring->ring_lock, iflags);
2019 list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
2020 &wq->wqfull_list, list) {
2021 if (ctxp) {
2022 /* Checking for a specific IO to flush */
2023 if (nvmewqeq->context2 == ctxp) {
2024 list_del(&nvmewqeq->list);
2025 spin_unlock_irqrestore(&pring->ring_lock,
2026 iflags);
2027 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
2028 wcqep);
2029 return;
2030 }
2031 continue;
2032 } else {
2033 /* Flush all IOs */
2034 list_del(&nvmewqeq->list);
2035 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2036 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
2037 spin_lock_irqsave(&pring->ring_lock, iflags);
2038 }
2039 }
2040 if (!ctxp)
2041 wq->q_flag &= ~HBA_NVMET_WQFULL;
2042 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2043 }
2044
2045 void
lpfc_nvmet_wqfull_process(struct lpfc_hba * phba,struct lpfc_queue * wq)2046 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
2047 struct lpfc_queue *wq)
2048 {
2049 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2050 struct lpfc_sli_ring *pring;
2051 struct lpfc_iocbq *nvmewqeq;
2052 struct lpfc_async_xchg_ctx *ctxp;
2053 unsigned long iflags;
2054 int rc;
2055
2056 /*
2057 * Some WQE slots are available, so try to re-issue anything
2058 * on the WQ wqfull_list.
2059 */
2060 pring = wq->pring;
2061 spin_lock_irqsave(&pring->ring_lock, iflags);
2062 while (!list_empty(&wq->wqfull_list)) {
2063 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
2064 list);
2065 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2066 ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2;
2067 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
2068 spin_lock_irqsave(&pring->ring_lock, iflags);
2069 if (rc == -EBUSY) {
2070 /* WQ was full again, so put it back on the list */
2071 list_add(&nvmewqeq->list, &wq->wqfull_list);
2072 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2073 return;
2074 }
2075 if (rc == WQE_SUCCESS) {
2076 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2077 if (ctxp->ts_cmd_nvme) {
2078 if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP)
2079 ctxp->ts_status_wqput = ktime_get_ns();
2080 else
2081 ctxp->ts_data_wqput = ktime_get_ns();
2082 }
2083 #endif
2084 } else {
2085 WARN_ON(rc);
2086 }
2087 }
2088 wq->q_flag &= ~HBA_NVMET_WQFULL;
2089 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2090
2091 #endif
2092 }
2093
2094 void
lpfc_nvmet_destroy_targetport(struct lpfc_hba * phba)2095 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
2096 {
2097 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2098 struct lpfc_nvmet_tgtport *tgtp;
2099 struct lpfc_queue *wq;
2100 uint32_t qidx;
2101 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
2102
2103 if (phba->nvmet_support == 0)
2104 return;
2105 if (phba->targetport) {
2106 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2107 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
2108 wq = phba->sli4_hba.hdwq[qidx].io_wq;
2109 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
2110 }
2111 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
2112 nvmet_fc_unregister_targetport(phba->targetport);
2113 if (!wait_for_completion_timeout(&tport_unreg_cmp,
2114 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
2115 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2116 "6179 Unreg targetport x%px timeout "
2117 "reached.\n", phba->targetport);
2118 lpfc_nvmet_cleanup_io_context(phba);
2119 }
2120 phba->targetport = NULL;
2121 #endif
2122 }
2123
2124 /**
2125 * lpfc_nvmet_handle_lsreq - Process an NVME LS request
2126 * @phba: pointer to lpfc hba data structure.
2127 * @axchg: pointer to exchange context for the NVME LS request
2128 *
2129 * This routine is used for processing an asychronously received NVME LS
2130 * request. Any remaining validation is done and the LS is then forwarded
2131 * to the nvmet-fc transport via nvmet_fc_rcv_ls_req().
2132 *
2133 * The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing)
2134 * -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done.
2135 * lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
2136 *
2137 * Returns 0 if LS was handled and delivered to the transport
2138 * Returns 1 if LS failed to be handled and should be dropped
2139 */
2140 int
lpfc_nvmet_handle_lsreq(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * axchg)2141 lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
2142 struct lpfc_async_xchg_ctx *axchg)
2143 {
2144 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2145 struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private;
2146 uint32_t *payload = axchg->payload;
2147 int rc;
2148
2149 atomic_inc(&tgtp->rcv_ls_req_in);
2150
2151 /*
2152 * Driver passes the ndlp as the hosthandle argument allowing
2153 * the transport to generate LS requests for any associateions
2154 * that are created.
2155 */
2156 rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp,
2157 axchg->payload, axchg->size);
2158
2159 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2160 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2161 "%08x %08x %08x\n", axchg->size, rc,
2162 *payload, *(payload+1), *(payload+2),
2163 *(payload+3), *(payload+4), *(payload+5));
2164
2165 if (!rc) {
2166 atomic_inc(&tgtp->rcv_ls_req_out);
2167 return 0;
2168 }
2169
2170 atomic_inc(&tgtp->rcv_ls_req_drop);
2171 #endif
2172 return 1;
2173 }
2174
2175 static void
lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf * ctx_buf)2176 lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
2177 {
2178 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2179 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
2180 struct lpfc_hba *phba = ctxp->phba;
2181 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2182 struct lpfc_nvmet_tgtport *tgtp;
2183 uint32_t *payload, qno;
2184 uint32_t rc;
2185 unsigned long iflags;
2186
2187 if (!nvmebuf) {
2188 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2189 "6159 process_rcv_fcp_req, nvmebuf is NULL, "
2190 "oxid: x%x flg: x%x state: x%x\n",
2191 ctxp->oxid, ctxp->flag, ctxp->state);
2192 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2193 lpfc_nvmet_defer_release(phba, ctxp);
2194 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2195 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2196 ctxp->oxid);
2197 return;
2198 }
2199
2200 if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
2201 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2202 "6324 IO oxid x%x aborted\n",
2203 ctxp->oxid);
2204 return;
2205 }
2206
2207 payload = (uint32_t *)(nvmebuf->dbuf.virt);
2208 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2209 ctxp->flag |= LPFC_NVME_TNOTIFY;
2210 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2211 if (ctxp->ts_isr_cmd)
2212 ctxp->ts_cmd_nvme = ktime_get_ns();
2213 #endif
2214 /*
2215 * The calling sequence should be:
2216 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
2217 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2218 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
2219 * the NVME command / FC header is stored.
2220 * A buffer has already been reposted for this IO, so just free
2221 * the nvmebuf.
2222 */
2223 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req,
2224 payload, ctxp->size);
2225 /* Process FCP command */
2226 if (rc == 0) {
2227 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2228 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2229 if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) ||
2230 (nvmebuf != ctxp->rqb_buffer)) {
2231 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2232 return;
2233 }
2234 ctxp->rqb_buffer = NULL;
2235 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2236 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2237 return;
2238 }
2239
2240 /* Processing of FCP command is deferred */
2241 if (rc == -EOVERFLOW) {
2242 lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
2243 "from %06x\n",
2244 ctxp->oxid, ctxp->size, ctxp->sid);
2245 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2246 atomic_inc(&tgtp->defer_fod);
2247 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2248 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
2249 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2250 return;
2251 }
2252 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2253 /*
2254 * Post a replacement DMA buffer to RQ and defer
2255 * freeing rcv buffer till .defer_rcv callback
2256 */
2257 qno = nvmebuf->idx;
2258 lpfc_post_rq_buffer(
2259 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2260 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2261 return;
2262 }
2263 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
2264 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2265 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2266 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2267 ctxp->oxid, rc,
2268 atomic_read(&tgtp->rcv_fcp_cmd_in),
2269 atomic_read(&tgtp->rcv_fcp_cmd_out),
2270 atomic_read(&tgtp->xmt_fcp_release));
2271 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2272 ctxp->oxid, ctxp->size, ctxp->sid);
2273 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2274 lpfc_nvmet_defer_release(phba, ctxp);
2275 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2276 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2277 #endif
2278 }
2279
2280 static void
lpfc_nvmet_fcp_rqst_defer_work(struct work_struct * work)2281 lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
2282 {
2283 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2284 struct lpfc_nvmet_ctxbuf *ctx_buf =
2285 container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
2286
2287 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2288 #endif
2289 }
2290
2291 static struct lpfc_nvmet_ctxbuf *
lpfc_nvmet_replenish_context(struct lpfc_hba * phba,struct lpfc_nvmet_ctx_info * current_infop)2292 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
2293 struct lpfc_nvmet_ctx_info *current_infop)
2294 {
2295 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2296 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
2297 struct lpfc_nvmet_ctx_info *get_infop;
2298 int i;
2299
2300 /*
2301 * The current_infop for the MRQ a NVME command IU was received
2302 * on is empty. Our goal is to replenish this MRQs context
2303 * list from a another CPUs.
2304 *
2305 * First we need to pick a context list to start looking on.
2306 * nvmet_ctx_start_cpu has available context the last time
2307 * we needed to replenish this CPU where nvmet_ctx_next_cpu
2308 * is just the next sequential CPU for this MRQ.
2309 */
2310 if (current_infop->nvmet_ctx_start_cpu)
2311 get_infop = current_infop->nvmet_ctx_start_cpu;
2312 else
2313 get_infop = current_infop->nvmet_ctx_next_cpu;
2314
2315 for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
2316 if (get_infop == current_infop) {
2317 get_infop = get_infop->nvmet_ctx_next_cpu;
2318 continue;
2319 }
2320 spin_lock(&get_infop->nvmet_ctx_list_lock);
2321
2322 /* Just take the entire context list, if there are any */
2323 if (get_infop->nvmet_ctx_list_cnt) {
2324 list_splice_init(&get_infop->nvmet_ctx_list,
2325 ¤t_infop->nvmet_ctx_list);
2326 current_infop->nvmet_ctx_list_cnt =
2327 get_infop->nvmet_ctx_list_cnt - 1;
2328 get_infop->nvmet_ctx_list_cnt = 0;
2329 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2330
2331 current_infop->nvmet_ctx_start_cpu = get_infop;
2332 list_remove_head(¤t_infop->nvmet_ctx_list,
2333 ctx_buf, struct lpfc_nvmet_ctxbuf,
2334 list);
2335 return ctx_buf;
2336 }
2337
2338 /* Otherwise, move on to the next CPU for this MRQ */
2339 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2340 get_infop = get_infop->nvmet_ctx_next_cpu;
2341 }
2342
2343 #endif
2344 /* Nothing found, all contexts for the MRQ are in-flight */
2345 return NULL;
2346 }
2347
2348 /**
2349 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
2350 * @phba: pointer to lpfc hba data structure.
2351 * @idx: relative index of MRQ vector
2352 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2353 * @isr_timestamp: in jiffies.
2354 * @cqflag: cq processing information regarding workload.
2355 *
2356 * This routine is used for processing the WQE associated with a unsolicited
2357 * event. It first determines whether there is an existing ndlp that matches
2358 * the DID from the unsolicited WQE. If not, it will create a new one with
2359 * the DID from the unsolicited WQE. The ELS command from the unsolicited
2360 * WQE is then used to invoke the proper routine and to set up proper state
2361 * of the discovery state machine.
2362 **/
2363 static void
lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba * phba,uint32_t idx,struct rqb_dmabuf * nvmebuf,uint64_t isr_timestamp,uint8_t cqflag)2364 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2365 uint32_t idx,
2366 struct rqb_dmabuf *nvmebuf,
2367 uint64_t isr_timestamp,
2368 uint8_t cqflag)
2369 {
2370 struct lpfc_async_xchg_ctx *ctxp;
2371 struct lpfc_nvmet_tgtport *tgtp;
2372 struct fc_frame_header *fc_hdr;
2373 struct lpfc_nvmet_ctxbuf *ctx_buf;
2374 struct lpfc_nvmet_ctx_info *current_infop;
2375 uint32_t size, oxid, sid, qno;
2376 unsigned long iflag;
2377 int current_cpu;
2378
2379 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2380 return;
2381
2382 ctx_buf = NULL;
2383 if (!nvmebuf || !phba->targetport) {
2384 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2385 "6157 NVMET FCP Drop IO\n");
2386 if (nvmebuf)
2387 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2388 return;
2389 }
2390
2391 /*
2392 * Get a pointer to the context list for this MRQ based on
2393 * the CPU this MRQ IRQ is associated with. If the CPU association
2394 * changes from our initial assumption, the context list could
2395 * be empty, thus it would need to be replenished with the
2396 * context list from another CPU for this MRQ.
2397 */
2398 current_cpu = raw_smp_processor_id();
2399 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2400 spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag);
2401 if (current_infop->nvmet_ctx_list_cnt) {
2402 list_remove_head(¤t_infop->nvmet_ctx_list,
2403 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2404 current_infop->nvmet_ctx_list_cnt--;
2405 } else {
2406 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2407 }
2408 spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag);
2409
2410 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2411 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2412 size = nvmebuf->bytes_recv;
2413
2414 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2415 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
2416 this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
2417 if (idx != current_cpu)
2418 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2419 "6703 CPU Check rcv: "
2420 "cpu %d expect %d\n",
2421 current_cpu, idx);
2422 }
2423 #endif
2424
2425 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
2426 oxid, size, raw_smp_processor_id());
2427
2428 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2429
2430 if (!ctx_buf) {
2431 /* Queue this NVME IO to process later */
2432 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2433 list_add_tail(&nvmebuf->hbuf.list,
2434 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2435 phba->sli4_hba.nvmet_io_wait_cnt++;
2436 phba->sli4_hba.nvmet_io_wait_total++;
2437 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2438 iflag);
2439
2440 /* Post a brand new DMA buffer to RQ */
2441 qno = nvmebuf->idx;
2442 lpfc_post_rq_buffer(
2443 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2444 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2445
2446 atomic_inc(&tgtp->defer_ctx);
2447 return;
2448 }
2449
2450 sid = sli4_sid_from_fc_hdr(fc_hdr);
2451
2452 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
2453 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
2454 list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2455 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
2456 if (ctxp->state != LPFC_NVME_STE_FREE) {
2457 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2458 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2459 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2460 }
2461 ctxp->wqeq = NULL;
2462 ctxp->offset = 0;
2463 ctxp->phba = phba;
2464 ctxp->size = size;
2465 ctxp->oxid = oxid;
2466 ctxp->sid = sid;
2467 ctxp->idx = idx;
2468 ctxp->state = LPFC_NVME_STE_RCV;
2469 ctxp->entry_cnt = 1;
2470 ctxp->flag = 0;
2471 ctxp->ctxbuf = ctx_buf;
2472 ctxp->rqb_buffer = (void *)nvmebuf;
2473 ctxp->hdwq = NULL;
2474 spin_lock_init(&ctxp->ctxlock);
2475
2476 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2477 if (isr_timestamp)
2478 ctxp->ts_isr_cmd = isr_timestamp;
2479 ctxp->ts_cmd_nvme = 0;
2480 ctxp->ts_nvme_data = 0;
2481 ctxp->ts_data_wqput = 0;
2482 ctxp->ts_isr_data = 0;
2483 ctxp->ts_data_nvme = 0;
2484 ctxp->ts_nvme_status = 0;
2485 ctxp->ts_status_wqput = 0;
2486 ctxp->ts_isr_status = 0;
2487 ctxp->ts_status_nvme = 0;
2488 #endif
2489
2490 atomic_inc(&tgtp->rcv_fcp_cmd_in);
2491 /* check for cq processing load */
2492 if (!cqflag) {
2493 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2494 return;
2495 }
2496
2497 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2498 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2499 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2500 "6325 Unable to queue work for oxid x%x. "
2501 "FCP Drop IO [x%x x%x x%x]\n",
2502 ctxp->oxid,
2503 atomic_read(&tgtp->rcv_fcp_cmd_in),
2504 atomic_read(&tgtp->rcv_fcp_cmd_out),
2505 atomic_read(&tgtp->xmt_fcp_release));
2506
2507 spin_lock_irqsave(&ctxp->ctxlock, iflag);
2508 lpfc_nvmet_defer_release(phba, ctxp);
2509 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2510 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2511 }
2512 }
2513
2514 /**
2515 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2516 * @phba: pointer to lpfc hba data structure.
2517 * @idx: relative index of MRQ vector
2518 * @nvmebuf: pointer to received nvme data structure.
2519 * @isr_timestamp: in jiffies.
2520 * @cqflag: cq processing information regarding workload.
2521 *
2522 * This routine is used to process an unsolicited event received from a SLI
2523 * (Service Level Interface) ring. The actual processing of the data buffer
2524 * associated with the unsolicited event is done by invoking the routine
2525 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2526 * SLI RQ on which the unsolicited event was received.
2527 **/
2528 void
lpfc_nvmet_unsol_fcp_event(struct lpfc_hba * phba,uint32_t idx,struct rqb_dmabuf * nvmebuf,uint64_t isr_timestamp,uint8_t cqflag)2529 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2530 uint32_t idx,
2531 struct rqb_dmabuf *nvmebuf,
2532 uint64_t isr_timestamp,
2533 uint8_t cqflag)
2534 {
2535 if (!nvmebuf) {
2536 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2537 "3167 NVMET FCP Drop IO\n");
2538 return;
2539 }
2540 if (phba->nvmet_support == 0) {
2541 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2542 return;
2543 }
2544 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
2545 }
2546
2547 /**
2548 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2549 * @phba: pointer to a host N_Port data structure.
2550 * @ctxp: Context info for NVME LS Request
2551 * @rspbuf: DMA buffer of NVME command.
2552 * @rspsize: size of the NVME command.
2553 *
2554 * This routine is used for allocating a lpfc-WQE data structure from
2555 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2556 * passed into the routine for discovery state machine to issue an Extended
2557 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2558 * and preparation routine that is used by all the discovery state machine
2559 * routines and the NVME command-specific fields will be later set up by
2560 * the individual discovery machine routines after calling this routine
2561 * allocating and preparing a generic WQE data structure. It fills in the
2562 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2563 * payload and response payload (if expected). The reference count on the
2564 * ndlp is incremented by 1 and the reference to the ndlp is put into
2565 * context1 of the WQE data structure for this WQE to hold the ndlp
2566 * reference for the command's callback function to access later.
2567 *
2568 * Return code
2569 * Pointer to the newly allocated/prepared nvme wqe data structure
2570 * NULL - when nvme wqe data structure allocation/preparation failed
2571 **/
2572 static struct lpfc_iocbq *
lpfc_nvmet_prep_ls_wqe(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp,dma_addr_t rspbuf,uint16_t rspsize)2573 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2574 struct lpfc_async_xchg_ctx *ctxp,
2575 dma_addr_t rspbuf, uint16_t rspsize)
2576 {
2577 struct lpfc_nodelist *ndlp;
2578 struct lpfc_iocbq *nvmewqe;
2579 union lpfc_wqe128 *wqe;
2580
2581 if (!lpfc_is_link_up(phba)) {
2582 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2583 "6104 NVMET prep LS wqe: link err: "
2584 "NPORT x%x oxid:x%x ste %d\n",
2585 ctxp->sid, ctxp->oxid, ctxp->state);
2586 return NULL;
2587 }
2588
2589 /* Allocate buffer for command wqe */
2590 nvmewqe = lpfc_sli_get_iocbq(phba);
2591 if (nvmewqe == NULL) {
2592 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2593 "6105 NVMET prep LS wqe: No WQE: "
2594 "NPORT x%x oxid x%x ste %d\n",
2595 ctxp->sid, ctxp->oxid, ctxp->state);
2596 return NULL;
2597 }
2598
2599 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2600 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2601 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2602 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2603 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2604 "6106 NVMET prep LS wqe: No ndlp: "
2605 "NPORT x%x oxid x%x ste %d\n",
2606 ctxp->sid, ctxp->oxid, ctxp->state);
2607 goto nvme_wqe_free_wqeq_exit;
2608 }
2609 ctxp->wqeq = nvmewqe;
2610
2611 /* prevent preparing wqe with NULL ndlp reference */
2612 nvmewqe->context1 = lpfc_nlp_get(ndlp);
2613 if (nvmewqe->context1 == NULL)
2614 goto nvme_wqe_free_wqeq_exit;
2615 nvmewqe->context2 = ctxp;
2616
2617 wqe = &nvmewqe->wqe;
2618 memset(wqe, 0, sizeof(union lpfc_wqe));
2619
2620 /* Words 0 - 2 */
2621 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2622 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2623 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2624 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2625
2626 /* Word 3 */
2627
2628 /* Word 4 */
2629
2630 /* Word 5 */
2631 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2632 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2633 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2634 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2635 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2636
2637 /* Word 6 */
2638 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2639 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2640 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2641
2642 /* Word 7 */
2643 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2644 CMD_XMIT_SEQUENCE64_WQE);
2645 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2646 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2647 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2648
2649 /* Word 8 */
2650 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2651
2652 /* Word 9 */
2653 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2654 /* Needs to be set by caller */
2655 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2656
2657 /* Word 10 */
2658 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2659 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2660 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2661 LPFC_WQE_LENLOC_WORD12);
2662 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2663
2664 /* Word 11 */
2665 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2666 LPFC_WQE_CQ_ID_DEFAULT);
2667 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2668 OTHER_COMMAND);
2669
2670 /* Word 12 */
2671 wqe->xmit_sequence.xmit_len = rspsize;
2672
2673 nvmewqe->retry = 1;
2674 nvmewqe->vport = phba->pport;
2675 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2676 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2677
2678 /* Xmit NVMET response to remote NPORT <did> */
2679 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2680 "6039 Xmit NVMET LS response to remote "
2681 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2682 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2683 rspsize);
2684 return nvmewqe;
2685
2686 nvme_wqe_free_wqeq_exit:
2687 nvmewqe->context2 = NULL;
2688 nvmewqe->context3 = NULL;
2689 lpfc_sli_release_iocbq(phba, nvmewqe);
2690 return NULL;
2691 }
2692
2693
2694 static struct lpfc_iocbq *
lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp)2695 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2696 struct lpfc_async_xchg_ctx *ctxp)
2697 {
2698 struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req;
2699 struct lpfc_nvmet_tgtport *tgtp;
2700 struct sli4_sge *sgl;
2701 struct lpfc_nodelist *ndlp;
2702 struct lpfc_iocbq *nvmewqe;
2703 struct scatterlist *sgel;
2704 union lpfc_wqe128 *wqe;
2705 struct ulp_bde64 *bde;
2706 dma_addr_t physaddr;
2707 int i, cnt, nsegs;
2708 int do_pbde;
2709 int xc = 1;
2710
2711 if (!lpfc_is_link_up(phba)) {
2712 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2713 "6107 NVMET prep FCP wqe: link err:"
2714 "NPORT x%x oxid x%x ste %d\n",
2715 ctxp->sid, ctxp->oxid, ctxp->state);
2716 return NULL;
2717 }
2718
2719 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2720 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2721 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2722 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2723 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2724 "6108 NVMET prep FCP wqe: no ndlp: "
2725 "NPORT x%x oxid x%x ste %d\n",
2726 ctxp->sid, ctxp->oxid, ctxp->state);
2727 return NULL;
2728 }
2729
2730 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2731 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2732 "6109 NVMET prep FCP wqe: seg cnt err: "
2733 "NPORT x%x oxid x%x ste %d cnt %d\n",
2734 ctxp->sid, ctxp->oxid, ctxp->state,
2735 phba->cfg_nvme_seg_cnt);
2736 return NULL;
2737 }
2738 nsegs = rsp->sg_cnt;
2739
2740 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2741 nvmewqe = ctxp->wqeq;
2742 if (nvmewqe == NULL) {
2743 /* Allocate buffer for command wqe */
2744 nvmewqe = ctxp->ctxbuf->iocbq;
2745 if (nvmewqe == NULL) {
2746 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2747 "6110 NVMET prep FCP wqe: No "
2748 "WQE: NPORT x%x oxid x%x ste %d\n",
2749 ctxp->sid, ctxp->oxid, ctxp->state);
2750 return NULL;
2751 }
2752 ctxp->wqeq = nvmewqe;
2753 xc = 0; /* create new XRI */
2754 nvmewqe->sli4_lxritag = NO_XRI;
2755 nvmewqe->sli4_xritag = NO_XRI;
2756 }
2757
2758 /* Sanity check */
2759 if (((ctxp->state == LPFC_NVME_STE_RCV) &&
2760 (ctxp->entry_cnt == 1)) ||
2761 (ctxp->state == LPFC_NVME_STE_DATA)) {
2762 wqe = &nvmewqe->wqe;
2763 } else {
2764 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2765 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2766 ctxp->state, ctxp->entry_cnt);
2767 return NULL;
2768 }
2769
2770 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2771 switch (rsp->op) {
2772 case NVMET_FCOP_READDATA:
2773 case NVMET_FCOP_READDATA_RSP:
2774 /* From the tsend template, initialize words 7 - 11 */
2775 memcpy(&wqe->words[7],
2776 &lpfc_tsend_cmd_template.words[7],
2777 sizeof(uint32_t) * 5);
2778
2779 /* Words 0 - 2 : The first sg segment */
2780 sgel = &rsp->sg[0];
2781 physaddr = sg_dma_address(sgel);
2782 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2783 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2784 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2785 wqe->fcp_tsend.bde.addrHigh =
2786 cpu_to_le32(putPaddrHigh(physaddr));
2787
2788 /* Word 3 */
2789 wqe->fcp_tsend.payload_offset_len = 0;
2790
2791 /* Word 4 */
2792 wqe->fcp_tsend.relative_offset = ctxp->offset;
2793
2794 /* Word 5 */
2795 wqe->fcp_tsend.reserved = 0;
2796
2797 /* Word 6 */
2798 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2799 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2800 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2801 nvmewqe->sli4_xritag);
2802
2803 /* Word 7 - set ar later */
2804
2805 /* Word 8 */
2806 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2807
2808 /* Word 9 */
2809 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2810 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2811
2812 /* Word 10 - set wqes later, in template xc=1 */
2813 if (!xc)
2814 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2815
2816 /* Word 11 - set sup, irsp, irsplen later */
2817 do_pbde = 0;
2818
2819 /* Word 12 */
2820 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2821
2822 /* Setup 2 SKIP SGEs */
2823 sgl->addr_hi = 0;
2824 sgl->addr_lo = 0;
2825 sgl->word2 = 0;
2826 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2827 sgl->word2 = cpu_to_le32(sgl->word2);
2828 sgl->sge_len = 0;
2829 sgl++;
2830 sgl->addr_hi = 0;
2831 sgl->addr_lo = 0;
2832 sgl->word2 = 0;
2833 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2834 sgl->word2 = cpu_to_le32(sgl->word2);
2835 sgl->sge_len = 0;
2836 sgl++;
2837 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2838 atomic_inc(&tgtp->xmt_fcp_read_rsp);
2839
2840 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2841
2842 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2843 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2844 bf_set(wqe_sup,
2845 &wqe->fcp_tsend.wqe_com, 1);
2846 } else {
2847 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2848 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2849 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2850 ((rsp->rsplen >> 2) - 1));
2851 memcpy(&wqe->words[16], rsp->rspaddr,
2852 rsp->rsplen);
2853 }
2854 } else {
2855 atomic_inc(&tgtp->xmt_fcp_read);
2856
2857 /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2858 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2859 }
2860 break;
2861
2862 case NVMET_FCOP_WRITEDATA:
2863 /* From the treceive template, initialize words 3 - 11 */
2864 memcpy(&wqe->words[3],
2865 &lpfc_treceive_cmd_template.words[3],
2866 sizeof(uint32_t) * 9);
2867
2868 /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
2869 wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
2870 wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
2871 wqe->fcp_treceive.bde.addrLow = 0;
2872 wqe->fcp_treceive.bde.addrHigh = 0;
2873
2874 /* Word 4 */
2875 wqe->fcp_treceive.relative_offset = ctxp->offset;
2876
2877 /* Word 6 */
2878 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2879 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2880 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2881 nvmewqe->sli4_xritag);
2882
2883 /* Word 7 */
2884
2885 /* Word 8 */
2886 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2887
2888 /* Word 9 */
2889 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2890 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2891
2892 /* Word 10 - in template xc=1 */
2893 if (!xc)
2894 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2895
2896 /* Word 11 - set pbde later */
2897 if (phba->cfg_enable_pbde) {
2898 do_pbde = 1;
2899 } else {
2900 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2901 do_pbde = 0;
2902 }
2903
2904 /* Word 12 */
2905 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2906
2907 /* Setup 2 SKIP SGEs */
2908 sgl->addr_hi = 0;
2909 sgl->addr_lo = 0;
2910 sgl->word2 = 0;
2911 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2912 sgl->word2 = cpu_to_le32(sgl->word2);
2913 sgl->sge_len = 0;
2914 sgl++;
2915 sgl->addr_hi = 0;
2916 sgl->addr_lo = 0;
2917 sgl->word2 = 0;
2918 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2919 sgl->word2 = cpu_to_le32(sgl->word2);
2920 sgl->sge_len = 0;
2921 sgl++;
2922 atomic_inc(&tgtp->xmt_fcp_write);
2923 break;
2924
2925 case NVMET_FCOP_RSP:
2926 /* From the treceive template, initialize words 4 - 11 */
2927 memcpy(&wqe->words[4],
2928 &lpfc_trsp_cmd_template.words[4],
2929 sizeof(uint32_t) * 8);
2930
2931 /* Words 0 - 2 */
2932 physaddr = rsp->rspdma;
2933 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2934 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2935 wqe->fcp_trsp.bde.addrLow =
2936 cpu_to_le32(putPaddrLow(physaddr));
2937 wqe->fcp_trsp.bde.addrHigh =
2938 cpu_to_le32(putPaddrHigh(physaddr));
2939
2940 /* Word 3 */
2941 wqe->fcp_trsp.response_len = rsp->rsplen;
2942
2943 /* Word 6 */
2944 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2945 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2946 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2947 nvmewqe->sli4_xritag);
2948
2949 /* Word 7 */
2950
2951 /* Word 8 */
2952 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2953
2954 /* Word 9 */
2955 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2956 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2957
2958 /* Word 10 */
2959 if (xc)
2960 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2961
2962 /* Word 11 */
2963 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2964 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2965 /* Bad response - embed it */
2966 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2967 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2968 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2969 ((rsp->rsplen >> 2) - 1));
2970 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2971 }
2972 do_pbde = 0;
2973
2974 /* Word 12 */
2975 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2976
2977 /* Use rspbuf, NOT sg list */
2978 nsegs = 0;
2979 sgl->word2 = 0;
2980 atomic_inc(&tgtp->xmt_fcp_rsp);
2981 break;
2982
2983 default:
2984 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2985 "6064 Unknown Rsp Op %d\n",
2986 rsp->op);
2987 return NULL;
2988 }
2989
2990 nvmewqe->retry = 1;
2991 nvmewqe->vport = phba->pport;
2992 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2993 nvmewqe->context1 = ndlp;
2994
2995 for_each_sg(rsp->sg, sgel, nsegs, i) {
2996 physaddr = sg_dma_address(sgel);
2997 cnt = sg_dma_len(sgel);
2998 sgl->addr_hi = putPaddrHigh(physaddr);
2999 sgl->addr_lo = putPaddrLow(physaddr);
3000 sgl->word2 = 0;
3001 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3002 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
3003 if ((i+1) == rsp->sg_cnt)
3004 bf_set(lpfc_sli4_sge_last, sgl, 1);
3005 sgl->word2 = cpu_to_le32(sgl->word2);
3006 sgl->sge_len = cpu_to_le32(cnt);
3007 if (i == 0) {
3008 bde = (struct ulp_bde64 *)&wqe->words[13];
3009 if (do_pbde) {
3010 /* Words 13-15 (PBDE) */
3011 bde->addrLow = sgl->addr_lo;
3012 bde->addrHigh = sgl->addr_hi;
3013 bde->tus.f.bdeSize =
3014 le32_to_cpu(sgl->sge_len);
3015 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3016 bde->tus.w = cpu_to_le32(bde->tus.w);
3017 } else {
3018 memset(bde, 0, sizeof(struct ulp_bde64));
3019 }
3020 }
3021 sgl++;
3022 ctxp->offset += cnt;
3023 }
3024 ctxp->state = LPFC_NVME_STE_DATA;
3025 ctxp->entry_cnt++;
3026 return nvmewqe;
3027 }
3028
3029 /**
3030 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
3031 * @phba: Pointer to HBA context object.
3032 * @cmdwqe: Pointer to driver command WQE object.
3033 * @wcqe: Pointer to driver response CQE object.
3034 *
3035 * The function is called from SLI ring event handler with no
3036 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
3037 * The function frees memory resources used for the NVME commands.
3038 **/
3039 static void
lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe)3040 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3041 struct lpfc_wcqe_complete *wcqe)
3042 {
3043 struct lpfc_async_xchg_ctx *ctxp;
3044 struct lpfc_nvmet_tgtport *tgtp;
3045 uint32_t result;
3046 unsigned long flags;
3047 bool released = false;
3048
3049 ctxp = cmdwqe->context2;
3050 result = wcqe->parameter;
3051
3052 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3053 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3054 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3055
3056 spin_lock_irqsave(&ctxp->ctxlock, flags);
3057 ctxp->state = LPFC_NVME_STE_DONE;
3058
3059 /* Check if we already received a free context call
3060 * and we have completed processing an abort situation.
3061 */
3062 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3063 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3064 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3065 list_del_init(&ctxp->list);
3066 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3067 released = true;
3068 }
3069 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3070 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3071 atomic_inc(&tgtp->xmt_abort_rsp);
3072
3073 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3074 "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
3075 "WCQE: %08x %08x %08x %08x\n",
3076 ctxp->oxid, ctxp->flag, released,
3077 wcqe->word0, wcqe->total_data_placed,
3078 result, wcqe->word3);
3079
3080 cmdwqe->context2 = NULL;
3081 cmdwqe->context3 = NULL;
3082 /*
3083 * if transport has released ctx, then can reuse it. Otherwise,
3084 * will be recycled by transport release call.
3085 */
3086 if (released)
3087 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3088
3089 /* This is the iocbq for the abort, not the command */
3090 lpfc_sli_release_iocbq(phba, cmdwqe);
3091
3092 /* Since iaab/iaar are NOT set, there is no work left.
3093 * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
3094 * should have been called already.
3095 */
3096 }
3097
3098 /**
3099 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
3100 * @phba: Pointer to HBA context object.
3101 * @cmdwqe: Pointer to driver command WQE object.
3102 * @wcqe: Pointer to driver response CQE object.
3103 *
3104 * The function is called from SLI ring event handler with no
3105 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
3106 * The function frees memory resources used for the NVME commands.
3107 **/
3108 static void
lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe)3109 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3110 struct lpfc_wcqe_complete *wcqe)
3111 {
3112 struct lpfc_async_xchg_ctx *ctxp;
3113 struct lpfc_nvmet_tgtport *tgtp;
3114 unsigned long flags;
3115 uint32_t result;
3116 bool released = false;
3117
3118 ctxp = cmdwqe->context2;
3119 result = wcqe->parameter;
3120
3121 if (!ctxp) {
3122 /* if context is clear, related io alrady complete */
3123 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3124 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3125 wcqe->word0, wcqe->total_data_placed,
3126 result, wcqe->word3);
3127 return;
3128 }
3129
3130 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3131 spin_lock_irqsave(&ctxp->ctxlock, flags);
3132 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3133 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3134
3135 /* Sanity check */
3136 if (ctxp->state != LPFC_NVME_STE_ABORT) {
3137 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3138 "6112 ABTS Wrong state:%d oxid x%x\n",
3139 ctxp->state, ctxp->oxid);
3140 }
3141
3142 /* Check if we already received a free context call
3143 * and we have completed processing an abort situation.
3144 */
3145 ctxp->state = LPFC_NVME_STE_DONE;
3146 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3147 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3148 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3149 list_del_init(&ctxp->list);
3150 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3151 released = true;
3152 }
3153 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3154 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3155 atomic_inc(&tgtp->xmt_abort_rsp);
3156
3157 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3158 "6316 ABTS cmpl oxid x%x flg x%x (%x) "
3159 "WCQE: %08x %08x %08x %08x\n",
3160 ctxp->oxid, ctxp->flag, released,
3161 wcqe->word0, wcqe->total_data_placed,
3162 result, wcqe->word3);
3163
3164 cmdwqe->context2 = NULL;
3165 cmdwqe->context3 = NULL;
3166 /*
3167 * if transport has released ctx, then can reuse it. Otherwise,
3168 * will be recycled by transport release call.
3169 */
3170 if (released)
3171 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3172
3173 /* Since iaab/iaar are NOT set, there is no work left.
3174 * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
3175 * should have been called already.
3176 */
3177 }
3178
3179 /**
3180 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
3181 * @phba: Pointer to HBA context object.
3182 * @cmdwqe: Pointer to driver command WQE object.
3183 * @wcqe: Pointer to driver response CQE object.
3184 *
3185 * The function is called from SLI ring event handler with no
3186 * lock held. This function is the completion handler for NVME ABTS for LS cmds
3187 * The function frees memory resources used for the NVME commands.
3188 **/
3189 static void
lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdwqe,struct lpfc_wcqe_complete * wcqe)3190 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3191 struct lpfc_wcqe_complete *wcqe)
3192 {
3193 struct lpfc_async_xchg_ctx *ctxp;
3194 struct lpfc_nvmet_tgtport *tgtp;
3195 uint32_t result;
3196
3197 ctxp = cmdwqe->context2;
3198 result = wcqe->parameter;
3199
3200 if (phba->nvmet_support) {
3201 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3202 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3203 }
3204
3205 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3206 "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
3207 ctxp, wcqe->word0, wcqe->total_data_placed,
3208 result, wcqe->word3);
3209
3210 if (!ctxp) {
3211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3212 "6415 NVMET LS Abort No ctx: WCQE: "
3213 "%08x %08x %08x %08x\n",
3214 wcqe->word0, wcqe->total_data_placed,
3215 result, wcqe->word3);
3216
3217 lpfc_sli_release_iocbq(phba, cmdwqe);
3218 return;
3219 }
3220
3221 if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
3222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3223 "6416 NVMET LS abort cmpl state mismatch: "
3224 "oxid x%x: %d %d\n",
3225 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3226 }
3227
3228 cmdwqe->context2 = NULL;
3229 cmdwqe->context3 = NULL;
3230 lpfc_sli_release_iocbq(phba, cmdwqe);
3231 kfree(ctxp);
3232 }
3233
3234 static int
lpfc_nvmet_unsol_issue_abort(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp,uint32_t sid,uint16_t xri)3235 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
3236 struct lpfc_async_xchg_ctx *ctxp,
3237 uint32_t sid, uint16_t xri)
3238 {
3239 struct lpfc_nvmet_tgtport *tgtp = NULL;
3240 struct lpfc_iocbq *abts_wqeq;
3241 union lpfc_wqe128 *wqe_abts;
3242 struct lpfc_nodelist *ndlp;
3243
3244 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3245 "6067 ABTS: sid %x xri x%x/x%x\n",
3246 sid, xri, ctxp->wqeq->sli4_xritag);
3247
3248 if (phba->nvmet_support && phba->targetport)
3249 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3250
3251 ndlp = lpfc_findnode_did(phba->pport, sid);
3252 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3253 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3254 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3255 if (tgtp)
3256 atomic_inc(&tgtp->xmt_abort_rsp_error);
3257 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3258 "6134 Drop ABTS - wrong NDLP state x%x.\n",
3259 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3260
3261 /* No failure to an ABTS request. */
3262 return 0;
3263 }
3264
3265 abts_wqeq = ctxp->wqeq;
3266 wqe_abts = &abts_wqeq->wqe;
3267
3268 /*
3269 * Since we zero the whole WQE, we need to ensure we set the WQE fields
3270 * that were initialized in lpfc_sli4_nvmet_alloc.
3271 */
3272 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
3273
3274 /* Word 5 */
3275 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
3276 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
3277 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
3278 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
3279 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
3280
3281 /* Word 6 */
3282 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
3283 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
3284 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
3285 abts_wqeq->sli4_xritag);
3286
3287 /* Word 7 */
3288 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
3289 CMD_XMIT_SEQUENCE64_WQE);
3290 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
3291 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
3292 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
3293
3294 /* Word 8 */
3295 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
3296
3297 /* Word 9 */
3298 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
3299 /* Needs to be set by caller */
3300 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
3301
3302 /* Word 10 */
3303 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
3304 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
3305 LPFC_WQE_LENLOC_WORD12);
3306 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
3307 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
3308
3309 /* Word 11 */
3310 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
3311 LPFC_WQE_CQ_ID_DEFAULT);
3312 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
3313 OTHER_COMMAND);
3314
3315 abts_wqeq->vport = phba->pport;
3316 abts_wqeq->context1 = ndlp;
3317 abts_wqeq->context2 = ctxp;
3318 abts_wqeq->context3 = NULL;
3319 abts_wqeq->rsvd2 = 0;
3320 /* hba_wqidx should already be setup from command we are aborting */
3321 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
3322 abts_wqeq->iocb.ulpLe = 1;
3323
3324 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3325 "6069 Issue ABTS to xri x%x reqtag x%x\n",
3326 xri, abts_wqeq->iotag);
3327 return 1;
3328 }
3329
3330 static int
lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp,uint32_t sid,uint16_t xri)3331 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3332 struct lpfc_async_xchg_ctx *ctxp,
3333 uint32_t sid, uint16_t xri)
3334 {
3335 struct lpfc_nvmet_tgtport *tgtp;
3336 struct lpfc_iocbq *abts_wqeq;
3337 struct lpfc_nodelist *ndlp;
3338 unsigned long flags;
3339 u8 opt;
3340 int rc;
3341
3342 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3343 if (!ctxp->wqeq) {
3344 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3345 ctxp->wqeq->hba_wqidx = 0;
3346 }
3347
3348 ndlp = lpfc_findnode_did(phba->pport, sid);
3349 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3350 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3351 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3352 atomic_inc(&tgtp->xmt_abort_rsp_error);
3353 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3354 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3355 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3356
3357 /* No failure to an ABTS request. */
3358 spin_lock_irqsave(&ctxp->ctxlock, flags);
3359 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3360 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3361 return 0;
3362 }
3363
3364 /* Issue ABTS for this WQE based on iotag */
3365 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3366 spin_lock_irqsave(&ctxp->ctxlock, flags);
3367 if (!ctxp->abort_wqeq) {
3368 atomic_inc(&tgtp->xmt_abort_rsp_error);
3369 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3370 "6161 ABORT failed: No wqeqs: "
3371 "xri: x%x\n", ctxp->oxid);
3372 /* No failure to an ABTS request. */
3373 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3374 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3375 return 0;
3376 }
3377 abts_wqeq = ctxp->abort_wqeq;
3378 ctxp->state = LPFC_NVME_STE_ABORT;
3379 opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0;
3380 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3381
3382 /* Announce entry to new IO submit field. */
3383 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3384 "6162 ABORT Request to rport DID x%06x "
3385 "for xri x%x x%x\n",
3386 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3387
3388 /* If the hba is getting reset, this flag is set. It is
3389 * cleared when the reset is complete and rings reestablished.
3390 */
3391 spin_lock_irqsave(&phba->hbalock, flags);
3392 /* driver queued commands are in process of being flushed */
3393 if (phba->hba_flag & HBA_IOQ_FLUSH) {
3394 spin_unlock_irqrestore(&phba->hbalock, flags);
3395 atomic_inc(&tgtp->xmt_abort_rsp_error);
3396 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3397 "6163 Driver in reset cleanup - flushing "
3398 "NVME Req now. hba_flag x%x oxid x%x\n",
3399 phba->hba_flag, ctxp->oxid);
3400 lpfc_sli_release_iocbq(phba, abts_wqeq);
3401 spin_lock_irqsave(&ctxp->ctxlock, flags);
3402 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3403 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3404 return 0;
3405 }
3406
3407 /* Outstanding abort is in progress */
3408 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3409 spin_unlock_irqrestore(&phba->hbalock, flags);
3410 atomic_inc(&tgtp->xmt_abort_rsp_error);
3411 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3412 "6164 Outstanding NVME I/O Abort Request "
3413 "still pending on oxid x%x\n",
3414 ctxp->oxid);
3415 lpfc_sli_release_iocbq(phba, abts_wqeq);
3416 spin_lock_irqsave(&ctxp->ctxlock, flags);
3417 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3418 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3419 return 0;
3420 }
3421
3422 /* Ready - mark outstanding as aborted by driver. */
3423 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3424
3425 lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
3426
3427 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3428 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3429 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3430 abts_wqeq->iocb_cmpl = NULL;
3431 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3432 abts_wqeq->context2 = ctxp;
3433 abts_wqeq->vport = phba->pport;
3434 if (!ctxp->hdwq)
3435 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3436
3437 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3438 spin_unlock_irqrestore(&phba->hbalock, flags);
3439 if (rc == WQE_SUCCESS) {
3440 atomic_inc(&tgtp->xmt_abort_sol);
3441 return 0;
3442 }
3443
3444 atomic_inc(&tgtp->xmt_abort_rsp_error);
3445 spin_lock_irqsave(&ctxp->ctxlock, flags);
3446 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3447 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3448 lpfc_sli_release_iocbq(phba, abts_wqeq);
3449 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3450 "6166 Failed ABORT issue_wqe with status x%x "
3451 "for oxid x%x.\n",
3452 rc, ctxp->oxid);
3453 return 1;
3454 }
3455
3456 static int
lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp,uint32_t sid,uint16_t xri)3457 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3458 struct lpfc_async_xchg_ctx *ctxp,
3459 uint32_t sid, uint16_t xri)
3460 {
3461 struct lpfc_nvmet_tgtport *tgtp;
3462 struct lpfc_iocbq *abts_wqeq;
3463 unsigned long flags;
3464 bool released = false;
3465 int rc;
3466
3467 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3468 if (!ctxp->wqeq) {
3469 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3470 ctxp->wqeq->hba_wqidx = 0;
3471 }
3472
3473 if (ctxp->state == LPFC_NVME_STE_FREE) {
3474 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3475 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3476 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3477 rc = WQE_BUSY;
3478 goto aerr;
3479 }
3480 ctxp->state = LPFC_NVME_STE_ABORT;
3481 ctxp->entry_cnt++;
3482 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3483 if (rc == 0)
3484 goto aerr;
3485
3486 spin_lock_irqsave(&phba->hbalock, flags);
3487 abts_wqeq = ctxp->wqeq;
3488 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3489 abts_wqeq->iocb_cmpl = NULL;
3490 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3491 if (!ctxp->hdwq)
3492 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3493
3494 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3495 spin_unlock_irqrestore(&phba->hbalock, flags);
3496 if (rc == WQE_SUCCESS) {
3497 return 0;
3498 }
3499
3500 aerr:
3501 spin_lock_irqsave(&ctxp->ctxlock, flags);
3502 if (ctxp->flag & LPFC_NVME_CTX_RLS) {
3503 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3504 list_del_init(&ctxp->list);
3505 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3506 released = true;
3507 }
3508 ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS);
3509 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3510
3511 atomic_inc(&tgtp->xmt_abort_rsp_error);
3512 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3513 "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3514 "(%x)\n",
3515 ctxp->oxid, rc, released);
3516 if (released)
3517 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3518 return 1;
3519 }
3520
3521 /**
3522 * lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received
3523 * via async frame receive where the frame is not handled.
3524 * @phba: pointer to adapter structure
3525 * @ctxp: pointer to the asynchronously received received sequence
3526 * @sid: address of the remote port to send the ABTS to
3527 * @xri: oxid value to for the ABTS (other side's exchange id).
3528 **/
3529 int
lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba * phba,struct lpfc_async_xchg_ctx * ctxp,uint32_t sid,uint16_t xri)3530 lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
3531 struct lpfc_async_xchg_ctx *ctxp,
3532 uint32_t sid, uint16_t xri)
3533 {
3534 struct lpfc_nvmet_tgtport *tgtp = NULL;
3535 struct lpfc_iocbq *abts_wqeq;
3536 unsigned long flags;
3537 int rc;
3538
3539 if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3540 (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3541 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3542 ctxp->entry_cnt++;
3543 } else {
3544 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3545 "6418 NVMET LS abort state mismatch "
3546 "IO x%x: %d %d\n",
3547 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3548 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3549 }
3550
3551 if (phba->nvmet_support && phba->targetport)
3552 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3553
3554 if (!ctxp->wqeq) {
3555 /* Issue ABTS for this WQE based on iotag */
3556 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3557 if (!ctxp->wqeq) {
3558 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3559 "6068 Abort failed: No wqeqs: "
3560 "xri: x%x\n", xri);
3561 /* No failure to an ABTS request. */
3562 kfree(ctxp);
3563 return 0;
3564 }
3565 }
3566 abts_wqeq = ctxp->wqeq;
3567
3568 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3569 rc = WQE_BUSY;
3570 goto out;
3571 }
3572
3573 spin_lock_irqsave(&phba->hbalock, flags);
3574 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3575 abts_wqeq->iocb_cmpl = NULL;
3576 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
3577 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3578 spin_unlock_irqrestore(&phba->hbalock, flags);
3579 if (rc == WQE_SUCCESS) {
3580 if (tgtp)
3581 atomic_inc(&tgtp->xmt_abort_unsol);
3582 return 0;
3583 }
3584 out:
3585 if (tgtp)
3586 atomic_inc(&tgtp->xmt_abort_rsp_error);
3587 abts_wqeq->context2 = NULL;
3588 abts_wqeq->context3 = NULL;
3589 lpfc_sli_release_iocbq(phba, abts_wqeq);
3590 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3591 "6056 Failed to Issue ABTS. Status x%x\n", rc);
3592 return 1;
3593 }
3594
3595 /**
3596 * lpfc_nvmet_invalidate_host
3597 *
3598 * @phba - pointer to the driver instance bound to an adapter port.
3599 * @ndlp - pointer to an lpfc_nodelist type
3600 *
3601 * This routine upcalls the nvmet transport to invalidate an NVME
3602 * host to which this target instance had active connections.
3603 */
3604 void
lpfc_nvmet_invalidate_host(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp)3605 lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3606 {
3607 struct lpfc_nvmet_tgtport *tgtp;
3608
3609 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_NVME_ABTS,
3610 "6203 Invalidating hosthandle x%px\n",
3611 ndlp);
3612
3613 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3614 atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE);
3615
3616 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
3617 /* Need to get the nvmet_fc_target_port pointer here.*/
3618 nvmet_fc_invalidate_host(phba->targetport, ndlp);
3619 #endif
3620 }
3621