1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/export.h>
27 #include <linux/delay.h>
28 #include <asm/unaligned.h>
29 #include <linux/t10-pi.h>
30 #include <linux/crc-t10dif.h>
31 #include <net/checksum.h>
32
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_eh.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi_tcq.h>
38 #include <scsi/scsi_transport_fc.h>
39
40 #include "lpfc_version.h"
41 #include "lpfc_hw4.h"
42 #include "lpfc_hw.h"
43 #include "lpfc_sli.h"
44 #include "lpfc_sli4.h"
45 #include "lpfc_nl.h"
46 #include "lpfc_disc.h"
47 #include "lpfc.h"
48 #include "lpfc_scsi.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52
53 #define LPFC_RESET_WAIT 2
54 #define LPFC_ABORT_WAIT 2
55
56 static char *dif_op_str[] = {
57 "PROT_NORMAL",
58 "PROT_READ_INSERT",
59 "PROT_WRITE_STRIP",
60 "PROT_READ_STRIP",
61 "PROT_WRITE_INSERT",
62 "PROT_READ_PASS",
63 "PROT_WRITE_PASS",
64 };
65
66 struct scsi_dif_tuple {
67 __be16 guard_tag; /* Checksum */
68 __be16 app_tag; /* Opaque storage */
69 __be32 ref_tag; /* Target LBA or indirect LBA */
70 };
71
72 static struct lpfc_rport_data *
lpfc_rport_data_from_scsi_device(struct scsi_device * sdev)73 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
74 {
75 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
76
77 if (vport->phba->cfg_fof)
78 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
79 else
80 return (struct lpfc_rport_data *)sdev->hostdata;
81 }
82
83 static void
84 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
85 static void
86 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
87 static int
88 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
89
90 static inline unsigned
lpfc_cmd_blksize(struct scsi_cmnd * sc)91 lpfc_cmd_blksize(struct scsi_cmnd *sc)
92 {
93 return sc->device->sector_size;
94 }
95
96 #define LPFC_CHECK_PROTECT_GUARD 1
97 #define LPFC_CHECK_PROTECT_REF 2
98 static inline unsigned
lpfc_cmd_protect(struct scsi_cmnd * sc,int flag)99 lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
100 {
101 return 1;
102 }
103
104 static inline unsigned
lpfc_cmd_guard_csum(struct scsi_cmnd * sc)105 lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
106 {
107 if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
108 return 0;
109 if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
110 return 1;
111 return 0;
112 }
113
114 /**
115 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
116 * @phba: Pointer to HBA object.
117 * @lpfc_cmd: lpfc scsi command object pointer.
118 *
119 * This function is called from the lpfc_prep_task_mgmt_cmd function to
120 * set the last bit in the response sge entry.
121 **/
122 static void
lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)123 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
124 struct lpfc_io_buf *lpfc_cmd)
125 {
126 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
127 if (sgl) {
128 sgl += 1;
129 sgl->word2 = le32_to_cpu(sgl->word2);
130 bf_set(lpfc_sli4_sge_last, sgl, 1);
131 sgl->word2 = cpu_to_le32(sgl->word2);
132 }
133 }
134
135 /**
136 * lpfc_update_stats - Update statistical data for the command completion
137 * @vport: The virtual port on which this call is executing.
138 * @lpfc_cmd: lpfc scsi command object pointer.
139 *
140 * This function is called when there is a command completion and this
141 * function updates the statistical data for the command completion.
142 **/
143 static void
lpfc_update_stats(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd)144 lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
145 {
146 struct lpfc_hba *phba = vport->phba;
147 struct lpfc_rport_data *rdata;
148 struct lpfc_nodelist *pnode;
149 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
150 unsigned long flags;
151 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
152 unsigned long latency;
153 int i;
154
155 if (!vport->stat_data_enabled ||
156 vport->stat_data_blocked ||
157 (cmd->result))
158 return;
159
160 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
161 rdata = lpfc_cmd->rdata;
162 pnode = rdata->pnode;
163
164 spin_lock_irqsave(shost->host_lock, flags);
165 if (!pnode ||
166 !pnode->lat_data ||
167 (phba->bucket_type == LPFC_NO_BUCKET)) {
168 spin_unlock_irqrestore(shost->host_lock, flags);
169 return;
170 }
171
172 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
173 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
174 phba->bucket_step;
175 /* check array subscript bounds */
176 if (i < 0)
177 i = 0;
178 else if (i >= LPFC_MAX_BUCKET_COUNT)
179 i = LPFC_MAX_BUCKET_COUNT - 1;
180 } else {
181 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
182 if (latency <= (phba->bucket_base +
183 ((1<<i)*phba->bucket_step)))
184 break;
185 }
186
187 pnode->lat_data[i].cmd_count++;
188 spin_unlock_irqrestore(shost->host_lock, flags);
189 }
190
191 /**
192 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
193 * @phba: The Hba for which this call is being executed.
194 *
195 * This routine is called when there is resource error in driver or firmware.
196 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
197 * posts at most 1 event each second. This routine wakes up worker thread of
198 * @phba to process WORKER_RAM_DOWN_EVENT event.
199 *
200 * This routine should be called with no lock held.
201 **/
202 void
lpfc_rampdown_queue_depth(struct lpfc_hba * phba)203 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
204 {
205 unsigned long flags;
206 uint32_t evt_posted;
207 unsigned long expires;
208
209 spin_lock_irqsave(&phba->hbalock, flags);
210 atomic_inc(&phba->num_rsrc_err);
211 phba->last_rsrc_error_time = jiffies;
212
213 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
214 if (time_after(expires, jiffies)) {
215 spin_unlock_irqrestore(&phba->hbalock, flags);
216 return;
217 }
218
219 phba->last_ramp_down_time = jiffies;
220
221 spin_unlock_irqrestore(&phba->hbalock, flags);
222
223 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
224 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
225 if (!evt_posted)
226 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
227 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
228
229 if (!evt_posted)
230 lpfc_worker_wake_up(phba);
231 return;
232 }
233
234 /**
235 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
236 * @phba: The Hba for which this call is being executed.
237 *
238 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
239 * thread.This routine reduces queue depth for all scsi device on each vport
240 * associated with @phba.
241 **/
242 void
lpfc_ramp_down_queue_handler(struct lpfc_hba * phba)243 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
244 {
245 struct lpfc_vport **vports;
246 struct Scsi_Host *shost;
247 struct scsi_device *sdev;
248 unsigned long new_queue_depth;
249 unsigned long num_rsrc_err, num_cmd_success;
250 int i;
251
252 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
253 num_cmd_success = atomic_read(&phba->num_cmd_success);
254
255 /*
256 * The error and success command counters are global per
257 * driver instance. If another handler has already
258 * operated on this error event, just exit.
259 */
260 if (num_rsrc_err == 0)
261 return;
262
263 vports = lpfc_create_vport_work_array(phba);
264 if (vports != NULL)
265 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
266 shost = lpfc_shost_from_vport(vports[i]);
267 shost_for_each_device(sdev, shost) {
268 new_queue_depth =
269 sdev->queue_depth * num_rsrc_err /
270 (num_rsrc_err + num_cmd_success);
271 if (!new_queue_depth)
272 new_queue_depth = sdev->queue_depth - 1;
273 else
274 new_queue_depth = sdev->queue_depth -
275 new_queue_depth;
276 scsi_change_queue_depth(sdev, new_queue_depth);
277 }
278 }
279 lpfc_destroy_vport_work_array(phba, vports);
280 atomic_set(&phba->num_rsrc_err, 0);
281 atomic_set(&phba->num_cmd_success, 0);
282 }
283
284 /**
285 * lpfc_scsi_dev_block - set all scsi hosts to block state
286 * @phba: Pointer to HBA context object.
287 *
288 * This function walks vport list and set each SCSI host to block state
289 * by invoking fc_remote_port_delete() routine. This function is invoked
290 * with EEH when device's PCI slot has been permanently disabled.
291 **/
292 void
lpfc_scsi_dev_block(struct lpfc_hba * phba)293 lpfc_scsi_dev_block(struct lpfc_hba *phba)
294 {
295 struct lpfc_vport **vports;
296 struct Scsi_Host *shost;
297 struct scsi_device *sdev;
298 struct fc_rport *rport;
299 int i;
300
301 vports = lpfc_create_vport_work_array(phba);
302 if (vports != NULL)
303 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
304 shost = lpfc_shost_from_vport(vports[i]);
305 shost_for_each_device(sdev, shost) {
306 rport = starget_to_rport(scsi_target(sdev));
307 fc_remote_port_delete(rport);
308 }
309 }
310 lpfc_destroy_vport_work_array(phba, vports);
311 }
312
313 /**
314 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
315 * @vport: The virtual port for which this call being executed.
316 * @num_to_allocate: The requested number of buffers to allocate.
317 *
318 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
319 * the scsi buffer contains all the necessary information needed to initiate
320 * a SCSI I/O. The non-DMAable buffer region contains information to build
321 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
322 * and the initial BPL. In addition to allocating memory, the FCP CMND and
323 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
324 *
325 * Return codes:
326 * int - number of scsi buffers that were allocated.
327 * 0 = failure, less than num_to_alloc is a partial failure.
328 **/
329 static int
lpfc_new_scsi_buf_s3(struct lpfc_vport * vport,int num_to_alloc)330 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
331 {
332 struct lpfc_hba *phba = vport->phba;
333 struct lpfc_io_buf *psb;
334 struct ulp_bde64 *bpl;
335 IOCB_t *iocb;
336 dma_addr_t pdma_phys_fcp_cmd;
337 dma_addr_t pdma_phys_fcp_rsp;
338 dma_addr_t pdma_phys_sgl;
339 uint16_t iotag;
340 int bcnt, bpl_size;
341
342 bpl_size = phba->cfg_sg_dma_buf_size -
343 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
344
345 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
346 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
347 num_to_alloc, phba->cfg_sg_dma_buf_size,
348 (int)sizeof(struct fcp_cmnd),
349 (int)sizeof(struct fcp_rsp), bpl_size);
350
351 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
352 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
353 if (!psb)
354 break;
355
356 /*
357 * Get memory from the pci pool to map the virt space to pci
358 * bus space for an I/O. The DMA buffer includes space for the
359 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
360 * necessary to support the sg_tablesize.
361 */
362 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
363 GFP_KERNEL, &psb->dma_handle);
364 if (!psb->data) {
365 kfree(psb);
366 break;
367 }
368
369
370 /* Allocate iotag for psb->cur_iocbq. */
371 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
372 if (iotag == 0) {
373 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
374 psb->data, psb->dma_handle);
375 kfree(psb);
376 break;
377 }
378 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
379
380 psb->fcp_cmnd = psb->data;
381 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
382 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
383 sizeof(struct fcp_rsp);
384
385 /* Initialize local short-hand pointers. */
386 bpl = (struct ulp_bde64 *)psb->dma_sgl;
387 pdma_phys_fcp_cmd = psb->dma_handle;
388 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
389 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
390 sizeof(struct fcp_rsp);
391
392 /*
393 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
394 * are sg list bdes. Initialize the first two and leave the
395 * rest for queuecommand.
396 */
397 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
398 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
399 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
400 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
401 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
402
403 /* Setup the physical region for the FCP RSP */
404 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
405 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
406 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
407 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
408 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
409
410 /*
411 * Since the IOCB for the FCP I/O is built into this
412 * lpfc_scsi_buf, initialize it with all known data now.
413 */
414 iocb = &psb->cur_iocbq.iocb;
415 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
416 if ((phba->sli_rev == 3) &&
417 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
418 /* fill in immediate fcp command BDE */
419 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
420 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
421 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
422 unsli3.fcp_ext.icd);
423 iocb->un.fcpi64.bdl.addrHigh = 0;
424 iocb->ulpBdeCount = 0;
425 iocb->ulpLe = 0;
426 /* fill in response BDE */
427 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
428 BUFF_TYPE_BDE_64;
429 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
430 sizeof(struct fcp_rsp);
431 iocb->unsli3.fcp_ext.rbde.addrLow =
432 putPaddrLow(pdma_phys_fcp_rsp);
433 iocb->unsli3.fcp_ext.rbde.addrHigh =
434 putPaddrHigh(pdma_phys_fcp_rsp);
435 } else {
436 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
437 iocb->un.fcpi64.bdl.bdeSize =
438 (2 * sizeof(struct ulp_bde64));
439 iocb->un.fcpi64.bdl.addrLow =
440 putPaddrLow(pdma_phys_sgl);
441 iocb->un.fcpi64.bdl.addrHigh =
442 putPaddrHigh(pdma_phys_sgl);
443 iocb->ulpBdeCount = 1;
444 iocb->ulpLe = 1;
445 }
446 iocb->ulpClass = CLASS3;
447 psb->status = IOSTAT_SUCCESS;
448 /* Put it back into the SCSI buffer list */
449 psb->cur_iocbq.context1 = psb;
450 spin_lock_init(&psb->buf_lock);
451 lpfc_release_scsi_buf_s3(phba, psb);
452
453 }
454
455 return bcnt;
456 }
457
458 /**
459 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
460 * @vport: pointer to lpfc vport data structure.
461 *
462 * This routine is invoked by the vport cleanup for deletions and the cleanup
463 * for an ndlp on removal.
464 **/
465 void
lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport * vport)466 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
467 {
468 struct lpfc_hba *phba = vport->phba;
469 struct lpfc_io_buf *psb, *next_psb;
470 struct lpfc_sli4_hdw_queue *qp;
471 unsigned long iflag = 0;
472 int idx;
473
474 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
475 return;
476
477 spin_lock_irqsave(&phba->hbalock, iflag);
478 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
479 qp = &phba->sli4_hba.hdwq[idx];
480
481 spin_lock(&qp->abts_io_buf_list_lock);
482 list_for_each_entry_safe(psb, next_psb,
483 &qp->lpfc_abts_io_buf_list, list) {
484 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
485 continue;
486
487 if (psb->rdata && psb->rdata->pnode &&
488 psb->rdata->pnode->vport == vport)
489 psb->rdata = NULL;
490 }
491 spin_unlock(&qp->abts_io_buf_list_lock);
492 }
493 spin_unlock_irqrestore(&phba->hbalock, iflag);
494 }
495
496 /**
497 * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
498 * @phba: pointer to lpfc hba data structure.
499 * @axri: pointer to the fcp xri abort wcqe structure.
500 *
501 * This routine is invoked by the worker thread to process a SLI4 fast-path
502 * FCP or NVME aborted xri.
503 **/
504 void
lpfc_sli4_io_xri_aborted(struct lpfc_hba * phba,struct sli4_wcqe_xri_aborted * axri,int idx)505 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
506 struct sli4_wcqe_xri_aborted *axri, int idx)
507 {
508 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
509 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
510 struct lpfc_io_buf *psb, *next_psb;
511 struct lpfc_sli4_hdw_queue *qp;
512 unsigned long iflag = 0;
513 struct lpfc_iocbq *iocbq;
514 int i;
515 struct lpfc_nodelist *ndlp;
516 int rrq_empty = 0;
517 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
518
519 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
520 return;
521
522 qp = &phba->sli4_hba.hdwq[idx];
523 spin_lock_irqsave(&phba->hbalock, iflag);
524 spin_lock(&qp->abts_io_buf_list_lock);
525 list_for_each_entry_safe(psb, next_psb,
526 &qp->lpfc_abts_io_buf_list, list) {
527 if (psb->cur_iocbq.sli4_xritag == xri) {
528 list_del_init(&psb->list);
529 psb->flags &= ~LPFC_SBUF_XBUSY;
530 psb->status = IOSTAT_SUCCESS;
531 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
532 qp->abts_nvme_io_bufs--;
533 spin_unlock(&qp->abts_io_buf_list_lock);
534 spin_unlock_irqrestore(&phba->hbalock, iflag);
535 lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
536 return;
537 }
538 qp->abts_scsi_io_bufs--;
539 spin_unlock(&qp->abts_io_buf_list_lock);
540
541 if (psb->rdata && psb->rdata->pnode)
542 ndlp = psb->rdata->pnode;
543 else
544 ndlp = NULL;
545
546 rrq_empty = list_empty(&phba->active_rrq_list);
547 spin_unlock_irqrestore(&phba->hbalock, iflag);
548 if (ndlp) {
549 lpfc_set_rrq_active(phba, ndlp,
550 psb->cur_iocbq.sli4_lxritag, rxid, 1);
551 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
552 }
553 lpfc_release_scsi_buf_s4(phba, psb);
554 if (rrq_empty)
555 lpfc_worker_wake_up(phba);
556 return;
557 }
558 }
559 spin_unlock(&qp->abts_io_buf_list_lock);
560 for (i = 1; i <= phba->sli.last_iotag; i++) {
561 iocbq = phba->sli.iocbq_lookup[i];
562
563 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
564 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
565 continue;
566 if (iocbq->sli4_xritag != xri)
567 continue;
568 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
569 psb->flags &= ~LPFC_SBUF_XBUSY;
570 spin_unlock_irqrestore(&phba->hbalock, iflag);
571 if (!list_empty(&pring->txq))
572 lpfc_worker_wake_up(phba);
573 return;
574
575 }
576 spin_unlock_irqrestore(&phba->hbalock, iflag);
577 }
578
579 /**
580 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
581 * @phba: The HBA for which this call is being executed.
582 *
583 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
584 * and returns to caller.
585 *
586 * Return codes:
587 * NULL - Error
588 * Pointer to lpfc_scsi_buf - Success
589 **/
590 static struct lpfc_io_buf *
lpfc_get_scsi_buf_s3(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)591 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
592 struct scsi_cmnd *cmnd)
593 {
594 struct lpfc_io_buf *lpfc_cmd = NULL;
595 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
596 unsigned long iflag = 0;
597
598 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
599 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
600 list);
601 if (!lpfc_cmd) {
602 spin_lock(&phba->scsi_buf_list_put_lock);
603 list_splice(&phba->lpfc_scsi_buf_list_put,
604 &phba->lpfc_scsi_buf_list_get);
605 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
606 list_remove_head(scsi_buf_list_get, lpfc_cmd,
607 struct lpfc_io_buf, list);
608 spin_unlock(&phba->scsi_buf_list_put_lock);
609 }
610 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
611
612 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
613 atomic_inc(&ndlp->cmd_pending);
614 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
615 }
616 return lpfc_cmd;
617 }
618 /**
619 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
620 * @phba: The HBA for which this call is being executed.
621 *
622 * This routine removes a scsi buffer from head of @hdwq io_buf_list
623 * and returns to caller.
624 *
625 * Return codes:
626 * NULL - Error
627 * Pointer to lpfc_scsi_buf - Success
628 **/
629 static struct lpfc_io_buf *
lpfc_get_scsi_buf_s4(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)630 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
631 struct scsi_cmnd *cmnd)
632 {
633 struct lpfc_io_buf *lpfc_cmd;
634 struct lpfc_sli4_hdw_queue *qp;
635 struct sli4_sge *sgl;
636 IOCB_t *iocb;
637 dma_addr_t pdma_phys_fcp_rsp;
638 dma_addr_t pdma_phys_fcp_cmd;
639 uint32_t cpu, idx;
640 int tag;
641 struct fcp_cmd_rsp_buf *tmp = NULL;
642
643 cpu = raw_smp_processor_id();
644 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
645 tag = blk_mq_unique_tag(cmnd->request);
646 idx = blk_mq_unique_tag_to_hwq(tag);
647 } else {
648 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
649 }
650
651 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
652 !phba->cfg_xri_rebalancing);
653 if (!lpfc_cmd) {
654 qp = &phba->sli4_hba.hdwq[idx];
655 qp->empty_io_bufs++;
656 return NULL;
657 }
658
659 /* Setup key fields in buffer that may have been changed
660 * if other protocols used this buffer.
661 */
662 lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
663 lpfc_cmd->prot_seg_cnt = 0;
664 lpfc_cmd->seg_cnt = 0;
665 lpfc_cmd->timeout = 0;
666 lpfc_cmd->flags = 0;
667 lpfc_cmd->start_time = jiffies;
668 lpfc_cmd->waitq = NULL;
669 lpfc_cmd->cpu = cpu;
670 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
671 lpfc_cmd->prot_data_type = 0;
672 #endif
673 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
674 if (!tmp) {
675 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
676 return NULL;
677 }
678
679 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
680 lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
681
682 /*
683 * The first two SGEs are the FCP_CMD and FCP_RSP.
684 * The balance are sg list bdes. Initialize the
685 * first two and leave the rest for queuecommand.
686 */
687 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
688 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
689 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
690 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
691 sgl->word2 = le32_to_cpu(sgl->word2);
692 bf_set(lpfc_sli4_sge_last, sgl, 0);
693 sgl->word2 = cpu_to_le32(sgl->word2);
694 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
695 sgl++;
696
697 /* Setup the physical region for the FCP RSP */
698 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
699 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
700 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
701 sgl->word2 = le32_to_cpu(sgl->word2);
702 bf_set(lpfc_sli4_sge_last, sgl, 1);
703 sgl->word2 = cpu_to_le32(sgl->word2);
704 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
705
706 /*
707 * Since the IOCB for the FCP I/O is built into this
708 * lpfc_io_buf, initialize it with all known data now.
709 */
710 iocb = &lpfc_cmd->cur_iocbq.iocb;
711 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
712 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
713 /* setting the BLP size to 2 * sizeof BDE may not be correct.
714 * We are setting the bpl to point to out sgl. An sgl's
715 * entries are 16 bytes, a bpl entries are 12 bytes.
716 */
717 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
718 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
719 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
720 iocb->ulpBdeCount = 1;
721 iocb->ulpLe = 1;
722 iocb->ulpClass = CLASS3;
723
724 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
725 atomic_inc(&ndlp->cmd_pending);
726 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
727 }
728 return lpfc_cmd;
729 }
730 /**
731 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
732 * @phba: The HBA for which this call is being executed.
733 *
734 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
735 * and returns to caller.
736 *
737 * Return codes:
738 * NULL - Error
739 * Pointer to lpfc_scsi_buf - Success
740 **/
741 static struct lpfc_io_buf*
lpfc_get_scsi_buf(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)742 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
743 struct scsi_cmnd *cmnd)
744 {
745 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
746 }
747
748 /**
749 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
750 * @phba: The Hba for which this call is being executed.
751 * @psb: The scsi buffer which is being released.
752 *
753 * This routine releases @psb scsi buffer by adding it to tail of @phba
754 * lpfc_scsi_buf_list list.
755 **/
756 static void
lpfc_release_scsi_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * psb)757 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
758 {
759 unsigned long iflag = 0;
760
761 psb->seg_cnt = 0;
762 psb->prot_seg_cnt = 0;
763
764 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
765 psb->pCmd = NULL;
766 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
767 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
768 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
769 }
770
771 /**
772 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
773 * @phba: The Hba for which this call is being executed.
774 * @psb: The scsi buffer which is being released.
775 *
776 * This routine releases @psb scsi buffer by adding it to tail of @hdwq
777 * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
778 * and cannot be reused for at least RA_TOV amount of time if it was
779 * aborted.
780 **/
781 static void
lpfc_release_scsi_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * psb)782 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
783 {
784 struct lpfc_sli4_hdw_queue *qp;
785 unsigned long iflag = 0;
786
787 psb->seg_cnt = 0;
788 psb->prot_seg_cnt = 0;
789
790 qp = psb->hdwq;
791 if (psb->flags & LPFC_SBUF_XBUSY) {
792 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
793 psb->pCmd = NULL;
794 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
795 qp->abts_scsi_io_bufs++;
796 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
797 } else {
798 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
799 }
800 }
801
802 /**
803 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
804 * @phba: The Hba for which this call is being executed.
805 * @psb: The scsi buffer which is being released.
806 *
807 * This routine releases @psb scsi buffer by adding it to tail of @phba
808 * lpfc_scsi_buf_list list.
809 **/
810 static void
lpfc_release_scsi_buf(struct lpfc_hba * phba,struct lpfc_io_buf * psb)811 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
812 {
813 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
814 atomic_dec(&psb->ndlp->cmd_pending);
815
816 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
817 phba->lpfc_release_scsi_buf(phba, psb);
818 }
819
820 /**
821 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
822 * @phba: The Hba for which this call is being executed.
823 * @lpfc_cmd: The scsi buffer which is going to be mapped.
824 *
825 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
826 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
827 * through sg elements and format the bde. This routine also initializes all
828 * IOCB fields which are dependent on scsi command request buffer.
829 *
830 * Return codes:
831 * 1 - Error
832 * 0 - Success
833 **/
834 static int
lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)835 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
836 {
837 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
838 struct scatterlist *sgel = NULL;
839 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
840 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
841 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
842 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
843 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
844 dma_addr_t physaddr;
845 uint32_t num_bde = 0;
846 int nseg, datadir = scsi_cmnd->sc_data_direction;
847
848 /*
849 * There are three possibilities here - use scatter-gather segment, use
850 * the single mapping, or neither. Start the lpfc command prep by
851 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
852 * data bde entry.
853 */
854 bpl += 2;
855 if (scsi_sg_count(scsi_cmnd)) {
856 /*
857 * The driver stores the segment count returned from pci_map_sg
858 * because this a count of dma-mappings used to map the use_sg
859 * pages. They are not guaranteed to be the same for those
860 * architectures that implement an IOMMU.
861 */
862
863 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
864 scsi_sg_count(scsi_cmnd), datadir);
865 if (unlikely(!nseg))
866 return 1;
867
868 lpfc_cmd->seg_cnt = nseg;
869 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
870 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
871 "9064 BLKGRD: %s: Too many sg segments"
872 " from dma_map_sg. Config %d, seg_cnt"
873 " %d\n", __func__, phba->cfg_sg_seg_cnt,
874 lpfc_cmd->seg_cnt);
875 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
876 lpfc_cmd->seg_cnt = 0;
877 scsi_dma_unmap(scsi_cmnd);
878 return 2;
879 }
880
881 /*
882 * The driver established a maximum scatter-gather segment count
883 * during probe that limits the number of sg elements in any
884 * single scsi command. Just run through the seg_cnt and format
885 * the bde's.
886 * When using SLI-3 the driver will try to fit all the BDEs into
887 * the IOCB. If it can't then the BDEs get added to a BPL as it
888 * does for SLI-2 mode.
889 */
890 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
891 physaddr = sg_dma_address(sgel);
892 if (phba->sli_rev == 3 &&
893 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
894 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
895 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
896 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
897 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
898 data_bde->addrLow = putPaddrLow(physaddr);
899 data_bde->addrHigh = putPaddrHigh(physaddr);
900 data_bde++;
901 } else {
902 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
903 bpl->tus.f.bdeSize = sg_dma_len(sgel);
904 bpl->tus.w = le32_to_cpu(bpl->tus.w);
905 bpl->addrLow =
906 le32_to_cpu(putPaddrLow(physaddr));
907 bpl->addrHigh =
908 le32_to_cpu(putPaddrHigh(physaddr));
909 bpl++;
910 }
911 }
912 }
913
914 /*
915 * Finish initializing those IOCB fields that are dependent on the
916 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
917 * explicitly reinitialized and for SLI-3 the extended bde count is
918 * explicitly reinitialized since all iocb memory resources are reused.
919 */
920 if (phba->sli_rev == 3 &&
921 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
922 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
923 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
924 /*
925 * The extended IOCB format can only fit 3 BDE or a BPL.
926 * This I/O has more than 3 BDE so the 1st data bde will
927 * be a BPL that is filled in here.
928 */
929 physaddr = lpfc_cmd->dma_handle;
930 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
931 data_bde->tus.f.bdeSize = (num_bde *
932 sizeof(struct ulp_bde64));
933 physaddr += (sizeof(struct fcp_cmnd) +
934 sizeof(struct fcp_rsp) +
935 (2 * sizeof(struct ulp_bde64)));
936 data_bde->addrHigh = putPaddrHigh(physaddr);
937 data_bde->addrLow = putPaddrLow(physaddr);
938 /* ebde count includes the response bde and data bpl */
939 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
940 } else {
941 /* ebde count includes the response bde and data bdes */
942 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
943 }
944 } else {
945 iocb_cmd->un.fcpi64.bdl.bdeSize =
946 ((num_bde + 2) * sizeof(struct ulp_bde64));
947 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
948 }
949 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
950
951 /*
952 * Due to difference in data length between DIF/non-DIF paths,
953 * we need to set word 4 of IOCB here
954 */
955 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
956 return 0;
957 }
958
959 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
960
961 /* Return BG_ERR_INIT if error injection is detected by Initiator */
962 #define BG_ERR_INIT 0x1
963 /* Return BG_ERR_TGT if error injection is detected by Target */
964 #define BG_ERR_TGT 0x2
965 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
966 #define BG_ERR_SWAP 0x10
967 /**
968 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
969 * error injection
970 **/
971 #define BG_ERR_CHECK 0x20
972
973 /**
974 * lpfc_bg_err_inject - Determine if we should inject an error
975 * @phba: The Hba for which this call is being executed.
976 * @sc: The SCSI command to examine
977 * @reftag: (out) BlockGuard reference tag for transmitted data
978 * @apptag: (out) BlockGuard application tag for transmitted data
979 * @new_guard (in) Value to replace CRC with if needed
980 *
981 * Returns BG_ERR_* bit mask or 0 if request ignored
982 **/
983 static int
lpfc_bg_err_inject(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint32_t * reftag,uint16_t * apptag,uint32_t new_guard)984 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
985 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
986 {
987 struct scatterlist *sgpe; /* s/g prot entry */
988 struct lpfc_io_buf *lpfc_cmd = NULL;
989 struct scsi_dif_tuple *src = NULL;
990 struct lpfc_nodelist *ndlp;
991 struct lpfc_rport_data *rdata;
992 uint32_t op = scsi_get_prot_op(sc);
993 uint32_t blksize;
994 uint32_t numblks;
995 sector_t lba;
996 int rc = 0;
997 int blockoff = 0;
998
999 if (op == SCSI_PROT_NORMAL)
1000 return 0;
1001
1002 sgpe = scsi_prot_sglist(sc);
1003 lba = scsi_get_lba(sc);
1004
1005 /* First check if we need to match the LBA */
1006 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1007 blksize = lpfc_cmd_blksize(sc);
1008 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1009
1010 /* Make sure we have the right LBA if one is specified */
1011 if ((phba->lpfc_injerr_lba < lba) ||
1012 (phba->lpfc_injerr_lba >= (lba + numblks)))
1013 return 0;
1014 if (sgpe) {
1015 blockoff = phba->lpfc_injerr_lba - lba;
1016 numblks = sg_dma_len(sgpe) /
1017 sizeof(struct scsi_dif_tuple);
1018 if (numblks < blockoff)
1019 blockoff = numblks;
1020 }
1021 }
1022
1023 /* Next check if we need to match the remote NPortID or WWPN */
1024 rdata = lpfc_rport_data_from_scsi_device(sc->device);
1025 if (rdata && rdata->pnode) {
1026 ndlp = rdata->pnode;
1027
1028 /* Make sure we have the right NPortID if one is specified */
1029 if (phba->lpfc_injerr_nportid &&
1030 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1031 return 0;
1032
1033 /*
1034 * Make sure we have the right WWPN if one is specified.
1035 * wwn[0] should be a non-zero NAA in a good WWPN.
1036 */
1037 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1038 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1039 sizeof(struct lpfc_name)) != 0))
1040 return 0;
1041 }
1042
1043 /* Setup a ptr to the protection data if the SCSI host provides it */
1044 if (sgpe) {
1045 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1046 src += blockoff;
1047 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1048 }
1049
1050 /* Should we change the Reference Tag */
1051 if (reftag) {
1052 if (phba->lpfc_injerr_wref_cnt) {
1053 switch (op) {
1054 case SCSI_PROT_WRITE_PASS:
1055 if (src) {
1056 /*
1057 * For WRITE_PASS, force the error
1058 * to be sent on the wire. It should
1059 * be detected by the Target.
1060 * If blockoff != 0 error will be
1061 * inserted in middle of the IO.
1062 */
1063
1064 lpfc_printf_log(phba, KERN_ERR,
1065 LOG_TRACE_EVENT,
1066 "9076 BLKGRD: Injecting reftag error: "
1067 "write lba x%lx + x%x oldrefTag x%x\n",
1068 (unsigned long)lba, blockoff,
1069 be32_to_cpu(src->ref_tag));
1070
1071 /*
1072 * Save the old ref_tag so we can
1073 * restore it on completion.
1074 */
1075 if (lpfc_cmd) {
1076 lpfc_cmd->prot_data_type =
1077 LPFC_INJERR_REFTAG;
1078 lpfc_cmd->prot_data_segment =
1079 src;
1080 lpfc_cmd->prot_data =
1081 src->ref_tag;
1082 }
1083 src->ref_tag = cpu_to_be32(0xDEADBEEF);
1084 phba->lpfc_injerr_wref_cnt--;
1085 if (phba->lpfc_injerr_wref_cnt == 0) {
1086 phba->lpfc_injerr_nportid = 0;
1087 phba->lpfc_injerr_lba =
1088 LPFC_INJERR_LBA_OFF;
1089 memset(&phba->lpfc_injerr_wwpn,
1090 0, sizeof(struct lpfc_name));
1091 }
1092 rc = BG_ERR_TGT | BG_ERR_CHECK;
1093
1094 break;
1095 }
1096 fallthrough;
1097 case SCSI_PROT_WRITE_INSERT:
1098 /*
1099 * For WRITE_INSERT, force the error
1100 * to be sent on the wire. It should be
1101 * detected by the Target.
1102 */
1103 /* DEADBEEF will be the reftag on the wire */
1104 *reftag = 0xDEADBEEF;
1105 phba->lpfc_injerr_wref_cnt--;
1106 if (phba->lpfc_injerr_wref_cnt == 0) {
1107 phba->lpfc_injerr_nportid = 0;
1108 phba->lpfc_injerr_lba =
1109 LPFC_INJERR_LBA_OFF;
1110 memset(&phba->lpfc_injerr_wwpn,
1111 0, sizeof(struct lpfc_name));
1112 }
1113 rc = BG_ERR_TGT | BG_ERR_CHECK;
1114
1115 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1116 "9078 BLKGRD: Injecting reftag error: "
1117 "write lba x%lx\n", (unsigned long)lba);
1118 break;
1119 case SCSI_PROT_WRITE_STRIP:
1120 /*
1121 * For WRITE_STRIP and WRITE_PASS,
1122 * force the error on data
1123 * being copied from SLI-Host to SLI-Port.
1124 */
1125 *reftag = 0xDEADBEEF;
1126 phba->lpfc_injerr_wref_cnt--;
1127 if (phba->lpfc_injerr_wref_cnt == 0) {
1128 phba->lpfc_injerr_nportid = 0;
1129 phba->lpfc_injerr_lba =
1130 LPFC_INJERR_LBA_OFF;
1131 memset(&phba->lpfc_injerr_wwpn,
1132 0, sizeof(struct lpfc_name));
1133 }
1134 rc = BG_ERR_INIT;
1135
1136 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1137 "9077 BLKGRD: Injecting reftag error: "
1138 "write lba x%lx\n", (unsigned long)lba);
1139 break;
1140 }
1141 }
1142 if (phba->lpfc_injerr_rref_cnt) {
1143 switch (op) {
1144 case SCSI_PROT_READ_INSERT:
1145 case SCSI_PROT_READ_STRIP:
1146 case SCSI_PROT_READ_PASS:
1147 /*
1148 * For READ_STRIP and READ_PASS, force the
1149 * error on data being read off the wire. It
1150 * should force an IO error to the driver.
1151 */
1152 *reftag = 0xDEADBEEF;
1153 phba->lpfc_injerr_rref_cnt--;
1154 if (phba->lpfc_injerr_rref_cnt == 0) {
1155 phba->lpfc_injerr_nportid = 0;
1156 phba->lpfc_injerr_lba =
1157 LPFC_INJERR_LBA_OFF;
1158 memset(&phba->lpfc_injerr_wwpn,
1159 0, sizeof(struct lpfc_name));
1160 }
1161 rc = BG_ERR_INIT;
1162
1163 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1164 "9079 BLKGRD: Injecting reftag error: "
1165 "read lba x%lx\n", (unsigned long)lba);
1166 break;
1167 }
1168 }
1169 }
1170
1171 /* Should we change the Application Tag */
1172 if (apptag) {
1173 if (phba->lpfc_injerr_wapp_cnt) {
1174 switch (op) {
1175 case SCSI_PROT_WRITE_PASS:
1176 if (src) {
1177 /*
1178 * For WRITE_PASS, force the error
1179 * to be sent on the wire. It should
1180 * be detected by the Target.
1181 * If blockoff != 0 error will be
1182 * inserted in middle of the IO.
1183 */
1184
1185 lpfc_printf_log(phba, KERN_ERR,
1186 LOG_TRACE_EVENT,
1187 "9080 BLKGRD: Injecting apptag error: "
1188 "write lba x%lx + x%x oldappTag x%x\n",
1189 (unsigned long)lba, blockoff,
1190 be16_to_cpu(src->app_tag));
1191
1192 /*
1193 * Save the old app_tag so we can
1194 * restore it on completion.
1195 */
1196 if (lpfc_cmd) {
1197 lpfc_cmd->prot_data_type =
1198 LPFC_INJERR_APPTAG;
1199 lpfc_cmd->prot_data_segment =
1200 src;
1201 lpfc_cmd->prot_data =
1202 src->app_tag;
1203 }
1204 src->app_tag = cpu_to_be16(0xDEAD);
1205 phba->lpfc_injerr_wapp_cnt--;
1206 if (phba->lpfc_injerr_wapp_cnt == 0) {
1207 phba->lpfc_injerr_nportid = 0;
1208 phba->lpfc_injerr_lba =
1209 LPFC_INJERR_LBA_OFF;
1210 memset(&phba->lpfc_injerr_wwpn,
1211 0, sizeof(struct lpfc_name));
1212 }
1213 rc = BG_ERR_TGT | BG_ERR_CHECK;
1214 break;
1215 }
1216 fallthrough;
1217 case SCSI_PROT_WRITE_INSERT:
1218 /*
1219 * For WRITE_INSERT, force the
1220 * error to be sent on the wire. It should be
1221 * detected by the Target.
1222 */
1223 /* DEAD will be the apptag on the wire */
1224 *apptag = 0xDEAD;
1225 phba->lpfc_injerr_wapp_cnt--;
1226 if (phba->lpfc_injerr_wapp_cnt == 0) {
1227 phba->lpfc_injerr_nportid = 0;
1228 phba->lpfc_injerr_lba =
1229 LPFC_INJERR_LBA_OFF;
1230 memset(&phba->lpfc_injerr_wwpn,
1231 0, sizeof(struct lpfc_name));
1232 }
1233 rc = BG_ERR_TGT | BG_ERR_CHECK;
1234
1235 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1236 "0813 BLKGRD: Injecting apptag error: "
1237 "write lba x%lx\n", (unsigned long)lba);
1238 break;
1239 case SCSI_PROT_WRITE_STRIP:
1240 /*
1241 * For WRITE_STRIP and WRITE_PASS,
1242 * force the error on data
1243 * being copied from SLI-Host to SLI-Port.
1244 */
1245 *apptag = 0xDEAD;
1246 phba->lpfc_injerr_wapp_cnt--;
1247 if (phba->lpfc_injerr_wapp_cnt == 0) {
1248 phba->lpfc_injerr_nportid = 0;
1249 phba->lpfc_injerr_lba =
1250 LPFC_INJERR_LBA_OFF;
1251 memset(&phba->lpfc_injerr_wwpn,
1252 0, sizeof(struct lpfc_name));
1253 }
1254 rc = BG_ERR_INIT;
1255
1256 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1257 "0812 BLKGRD: Injecting apptag error: "
1258 "write lba x%lx\n", (unsigned long)lba);
1259 break;
1260 }
1261 }
1262 if (phba->lpfc_injerr_rapp_cnt) {
1263 switch (op) {
1264 case SCSI_PROT_READ_INSERT:
1265 case SCSI_PROT_READ_STRIP:
1266 case SCSI_PROT_READ_PASS:
1267 /*
1268 * For READ_STRIP and READ_PASS, force the
1269 * error on data being read off the wire. It
1270 * should force an IO error to the driver.
1271 */
1272 *apptag = 0xDEAD;
1273 phba->lpfc_injerr_rapp_cnt--;
1274 if (phba->lpfc_injerr_rapp_cnt == 0) {
1275 phba->lpfc_injerr_nportid = 0;
1276 phba->lpfc_injerr_lba =
1277 LPFC_INJERR_LBA_OFF;
1278 memset(&phba->lpfc_injerr_wwpn,
1279 0, sizeof(struct lpfc_name));
1280 }
1281 rc = BG_ERR_INIT;
1282
1283 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1284 "0814 BLKGRD: Injecting apptag error: "
1285 "read lba x%lx\n", (unsigned long)lba);
1286 break;
1287 }
1288 }
1289 }
1290
1291
1292 /* Should we change the Guard Tag */
1293 if (new_guard) {
1294 if (phba->lpfc_injerr_wgrd_cnt) {
1295 switch (op) {
1296 case SCSI_PROT_WRITE_PASS:
1297 rc = BG_ERR_CHECK;
1298 fallthrough;
1299
1300 case SCSI_PROT_WRITE_INSERT:
1301 /*
1302 * For WRITE_INSERT, force the
1303 * error to be sent on the wire. It should be
1304 * detected by the Target.
1305 */
1306 phba->lpfc_injerr_wgrd_cnt--;
1307 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1308 phba->lpfc_injerr_nportid = 0;
1309 phba->lpfc_injerr_lba =
1310 LPFC_INJERR_LBA_OFF;
1311 memset(&phba->lpfc_injerr_wwpn,
1312 0, sizeof(struct lpfc_name));
1313 }
1314
1315 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1316 /* Signals the caller to swap CRC->CSUM */
1317
1318 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1319 "0817 BLKGRD: Injecting guard error: "
1320 "write lba x%lx\n", (unsigned long)lba);
1321 break;
1322 case SCSI_PROT_WRITE_STRIP:
1323 /*
1324 * For WRITE_STRIP and WRITE_PASS,
1325 * force the error on data
1326 * being copied from SLI-Host to SLI-Port.
1327 */
1328 phba->lpfc_injerr_wgrd_cnt--;
1329 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1330 phba->lpfc_injerr_nportid = 0;
1331 phba->lpfc_injerr_lba =
1332 LPFC_INJERR_LBA_OFF;
1333 memset(&phba->lpfc_injerr_wwpn,
1334 0, sizeof(struct lpfc_name));
1335 }
1336
1337 rc = BG_ERR_INIT | BG_ERR_SWAP;
1338 /* Signals the caller to swap CRC->CSUM */
1339
1340 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1341 "0816 BLKGRD: Injecting guard error: "
1342 "write lba x%lx\n", (unsigned long)lba);
1343 break;
1344 }
1345 }
1346 if (phba->lpfc_injerr_rgrd_cnt) {
1347 switch (op) {
1348 case SCSI_PROT_READ_INSERT:
1349 case SCSI_PROT_READ_STRIP:
1350 case SCSI_PROT_READ_PASS:
1351 /*
1352 * For READ_STRIP and READ_PASS, force the
1353 * error on data being read off the wire. It
1354 * should force an IO error to the driver.
1355 */
1356 phba->lpfc_injerr_rgrd_cnt--;
1357 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1358 phba->lpfc_injerr_nportid = 0;
1359 phba->lpfc_injerr_lba =
1360 LPFC_INJERR_LBA_OFF;
1361 memset(&phba->lpfc_injerr_wwpn,
1362 0, sizeof(struct lpfc_name));
1363 }
1364
1365 rc = BG_ERR_INIT | BG_ERR_SWAP;
1366 /* Signals the caller to swap CRC->CSUM */
1367
1368 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1369 "0818 BLKGRD: Injecting guard error: "
1370 "read lba x%lx\n", (unsigned long)lba);
1371 }
1372 }
1373 }
1374
1375 return rc;
1376 }
1377 #endif
1378
1379 /**
1380 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1381 * the specified SCSI command.
1382 * @phba: The Hba for which this call is being executed.
1383 * @sc: The SCSI command to examine
1384 * @txopt: (out) BlockGuard operation for transmitted data
1385 * @rxopt: (out) BlockGuard operation for received data
1386 *
1387 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1388 *
1389 **/
1390 static int
lpfc_sc_to_bg_opcodes(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint8_t * txop,uint8_t * rxop)1391 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1392 uint8_t *txop, uint8_t *rxop)
1393 {
1394 uint8_t ret = 0;
1395
1396 if (lpfc_cmd_guard_csum(sc)) {
1397 switch (scsi_get_prot_op(sc)) {
1398 case SCSI_PROT_READ_INSERT:
1399 case SCSI_PROT_WRITE_STRIP:
1400 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1401 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1402 break;
1403
1404 case SCSI_PROT_READ_STRIP:
1405 case SCSI_PROT_WRITE_INSERT:
1406 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1407 *txop = BG_OP_IN_NODIF_OUT_CRC;
1408 break;
1409
1410 case SCSI_PROT_READ_PASS:
1411 case SCSI_PROT_WRITE_PASS:
1412 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1413 *txop = BG_OP_IN_CSUM_OUT_CRC;
1414 break;
1415
1416 case SCSI_PROT_NORMAL:
1417 default:
1418 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1419 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1420 scsi_get_prot_op(sc));
1421 ret = 1;
1422 break;
1423
1424 }
1425 } else {
1426 switch (scsi_get_prot_op(sc)) {
1427 case SCSI_PROT_READ_STRIP:
1428 case SCSI_PROT_WRITE_INSERT:
1429 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1430 *txop = BG_OP_IN_NODIF_OUT_CRC;
1431 break;
1432
1433 case SCSI_PROT_READ_PASS:
1434 case SCSI_PROT_WRITE_PASS:
1435 *rxop = BG_OP_IN_CRC_OUT_CRC;
1436 *txop = BG_OP_IN_CRC_OUT_CRC;
1437 break;
1438
1439 case SCSI_PROT_READ_INSERT:
1440 case SCSI_PROT_WRITE_STRIP:
1441 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1442 *txop = BG_OP_IN_CRC_OUT_NODIF;
1443 break;
1444
1445 case SCSI_PROT_NORMAL:
1446 default:
1447 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1448 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1449 scsi_get_prot_op(sc));
1450 ret = 1;
1451 break;
1452 }
1453 }
1454
1455 return ret;
1456 }
1457
1458 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1459 /**
1460 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1461 * the specified SCSI command in order to force a guard tag error.
1462 * @phba: The Hba for which this call is being executed.
1463 * @sc: The SCSI command to examine
1464 * @txopt: (out) BlockGuard operation for transmitted data
1465 * @rxopt: (out) BlockGuard operation for received data
1466 *
1467 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1468 *
1469 **/
1470 static int
lpfc_bg_err_opcodes(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint8_t * txop,uint8_t * rxop)1471 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1472 uint8_t *txop, uint8_t *rxop)
1473 {
1474 uint8_t ret = 0;
1475
1476 if (lpfc_cmd_guard_csum(sc)) {
1477 switch (scsi_get_prot_op(sc)) {
1478 case SCSI_PROT_READ_INSERT:
1479 case SCSI_PROT_WRITE_STRIP:
1480 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1481 *txop = BG_OP_IN_CRC_OUT_NODIF;
1482 break;
1483
1484 case SCSI_PROT_READ_STRIP:
1485 case SCSI_PROT_WRITE_INSERT:
1486 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1487 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1488 break;
1489
1490 case SCSI_PROT_READ_PASS:
1491 case SCSI_PROT_WRITE_PASS:
1492 *rxop = BG_OP_IN_CSUM_OUT_CRC;
1493 *txop = BG_OP_IN_CRC_OUT_CSUM;
1494 break;
1495
1496 case SCSI_PROT_NORMAL:
1497 default:
1498 break;
1499
1500 }
1501 } else {
1502 switch (scsi_get_prot_op(sc)) {
1503 case SCSI_PROT_READ_STRIP:
1504 case SCSI_PROT_WRITE_INSERT:
1505 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1506 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1507 break;
1508
1509 case SCSI_PROT_READ_PASS:
1510 case SCSI_PROT_WRITE_PASS:
1511 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1512 *txop = BG_OP_IN_CSUM_OUT_CSUM;
1513 break;
1514
1515 case SCSI_PROT_READ_INSERT:
1516 case SCSI_PROT_WRITE_STRIP:
1517 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1518 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1519 break;
1520
1521 case SCSI_PROT_NORMAL:
1522 default:
1523 break;
1524 }
1525 }
1526
1527 return ret;
1528 }
1529 #endif
1530
1531 /**
1532 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1533 * @phba: The Hba for which this call is being executed.
1534 * @sc: pointer to scsi command we're working on
1535 * @bpl: pointer to buffer list for protection groups
1536 * @datacnt: number of segments of data that have been dma mapped
1537 *
1538 * This function sets up BPL buffer list for protection groups of
1539 * type LPFC_PG_TYPE_NO_DIF
1540 *
1541 * This is usually used when the HBA is instructed to generate
1542 * DIFs and insert them into data stream (or strip DIF from
1543 * incoming data stream)
1544 *
1545 * The buffer list consists of just one protection group described
1546 * below:
1547 * +-------------------------+
1548 * start of prot group --> | PDE_5 |
1549 * +-------------------------+
1550 * | PDE_6 |
1551 * +-------------------------+
1552 * | Data BDE |
1553 * +-------------------------+
1554 * |more Data BDE's ... (opt)|
1555 * +-------------------------+
1556 *
1557 *
1558 * Note: Data s/g buffers have been dma mapped
1559 *
1560 * Returns the number of BDEs added to the BPL.
1561 **/
1562 static int
lpfc_bg_setup_bpl(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct ulp_bde64 * bpl,int datasegcnt)1563 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1564 struct ulp_bde64 *bpl, int datasegcnt)
1565 {
1566 struct scatterlist *sgde = NULL; /* s/g data entry */
1567 struct lpfc_pde5 *pde5 = NULL;
1568 struct lpfc_pde6 *pde6 = NULL;
1569 dma_addr_t physaddr;
1570 int i = 0, num_bde = 0, status;
1571 int datadir = sc->sc_data_direction;
1572 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1573 uint32_t rc;
1574 #endif
1575 uint32_t checking = 1;
1576 uint32_t reftag;
1577 uint8_t txop, rxop;
1578
1579 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1580 if (status)
1581 goto out;
1582
1583 /* extract some info from the scsi command for pde*/
1584 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1585
1586 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1587 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1588 if (rc) {
1589 if (rc & BG_ERR_SWAP)
1590 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1591 if (rc & BG_ERR_CHECK)
1592 checking = 0;
1593 }
1594 #endif
1595
1596 /* setup PDE5 with what we have */
1597 pde5 = (struct lpfc_pde5 *) bpl;
1598 memset(pde5, 0, sizeof(struct lpfc_pde5));
1599 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1600
1601 /* Endianness conversion if necessary for PDE5 */
1602 pde5->word0 = cpu_to_le32(pde5->word0);
1603 pde5->reftag = cpu_to_le32(reftag);
1604
1605 /* advance bpl and increment bde count */
1606 num_bde++;
1607 bpl++;
1608 pde6 = (struct lpfc_pde6 *) bpl;
1609
1610 /* setup PDE6 with the rest of the info */
1611 memset(pde6, 0, sizeof(struct lpfc_pde6));
1612 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1613 bf_set(pde6_optx, pde6, txop);
1614 bf_set(pde6_oprx, pde6, rxop);
1615
1616 /*
1617 * We only need to check the data on READs, for WRITEs
1618 * protection data is automatically generated, not checked.
1619 */
1620 if (datadir == DMA_FROM_DEVICE) {
1621 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1622 bf_set(pde6_ce, pde6, checking);
1623 else
1624 bf_set(pde6_ce, pde6, 0);
1625
1626 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1627 bf_set(pde6_re, pde6, checking);
1628 else
1629 bf_set(pde6_re, pde6, 0);
1630 }
1631 bf_set(pde6_ai, pde6, 1);
1632 bf_set(pde6_ae, pde6, 0);
1633 bf_set(pde6_apptagval, pde6, 0);
1634
1635 /* Endianness conversion if necessary for PDE6 */
1636 pde6->word0 = cpu_to_le32(pde6->word0);
1637 pde6->word1 = cpu_to_le32(pde6->word1);
1638 pde6->word2 = cpu_to_le32(pde6->word2);
1639
1640 /* advance bpl and increment bde count */
1641 num_bde++;
1642 bpl++;
1643
1644 /* assumption: caller has already run dma_map_sg on command data */
1645 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1646 physaddr = sg_dma_address(sgde);
1647 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1648 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1649 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1650 if (datadir == DMA_TO_DEVICE)
1651 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1652 else
1653 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1654 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1655 bpl++;
1656 num_bde++;
1657 }
1658
1659 out:
1660 return num_bde;
1661 }
1662
1663 /**
1664 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1665 * @phba: The Hba for which this call is being executed.
1666 * @sc: pointer to scsi command we're working on
1667 * @bpl: pointer to buffer list for protection groups
1668 * @datacnt: number of segments of data that have been dma mapped
1669 * @protcnt: number of segment of protection data that have been dma mapped
1670 *
1671 * This function sets up BPL buffer list for protection groups of
1672 * type LPFC_PG_TYPE_DIF
1673 *
1674 * This is usually used when DIFs are in their own buffers,
1675 * separate from the data. The HBA can then by instructed
1676 * to place the DIFs in the outgoing stream. For read operations,
1677 * The HBA could extract the DIFs and place it in DIF buffers.
1678 *
1679 * The buffer list for this type consists of one or more of the
1680 * protection groups described below:
1681 * +-------------------------+
1682 * start of first prot group --> | PDE_5 |
1683 * +-------------------------+
1684 * | PDE_6 |
1685 * +-------------------------+
1686 * | PDE_7 (Prot BDE) |
1687 * +-------------------------+
1688 * | Data BDE |
1689 * +-------------------------+
1690 * |more Data BDE's ... (opt)|
1691 * +-------------------------+
1692 * start of new prot group --> | PDE_5 |
1693 * +-------------------------+
1694 * | ... |
1695 * +-------------------------+
1696 *
1697 * Note: It is assumed that both data and protection s/g buffers have been
1698 * mapped for DMA
1699 *
1700 * Returns the number of BDEs added to the BPL.
1701 **/
1702 static int
lpfc_bg_setup_bpl_prot(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct ulp_bde64 * bpl,int datacnt,int protcnt)1703 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1704 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1705 {
1706 struct scatterlist *sgde = NULL; /* s/g data entry */
1707 struct scatterlist *sgpe = NULL; /* s/g prot entry */
1708 struct lpfc_pde5 *pde5 = NULL;
1709 struct lpfc_pde6 *pde6 = NULL;
1710 struct lpfc_pde7 *pde7 = NULL;
1711 dma_addr_t dataphysaddr, protphysaddr;
1712 unsigned short curr_data = 0, curr_prot = 0;
1713 unsigned int split_offset;
1714 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1715 unsigned int protgrp_blks, protgrp_bytes;
1716 unsigned int remainder, subtotal;
1717 int status;
1718 int datadir = sc->sc_data_direction;
1719 unsigned char pgdone = 0, alldone = 0;
1720 unsigned blksize;
1721 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1722 uint32_t rc;
1723 #endif
1724 uint32_t checking = 1;
1725 uint32_t reftag;
1726 uint8_t txop, rxop;
1727 int num_bde = 0;
1728
1729 sgpe = scsi_prot_sglist(sc);
1730 sgde = scsi_sglist(sc);
1731
1732 if (!sgpe || !sgde) {
1733 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1734 "9020 Invalid s/g entry: data=x%px prot=x%px\n",
1735 sgpe, sgde);
1736 return 0;
1737 }
1738
1739 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1740 if (status)
1741 goto out;
1742
1743 /* extract some info from the scsi command */
1744 blksize = lpfc_cmd_blksize(sc);
1745 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1746
1747 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1748 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1749 if (rc) {
1750 if (rc & BG_ERR_SWAP)
1751 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1752 if (rc & BG_ERR_CHECK)
1753 checking = 0;
1754 }
1755 #endif
1756
1757 split_offset = 0;
1758 do {
1759 /* Check to see if we ran out of space */
1760 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1761 return num_bde + 3;
1762
1763 /* setup PDE5 with what we have */
1764 pde5 = (struct lpfc_pde5 *) bpl;
1765 memset(pde5, 0, sizeof(struct lpfc_pde5));
1766 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1767
1768 /* Endianness conversion if necessary for PDE5 */
1769 pde5->word0 = cpu_to_le32(pde5->word0);
1770 pde5->reftag = cpu_to_le32(reftag);
1771
1772 /* advance bpl and increment bde count */
1773 num_bde++;
1774 bpl++;
1775 pde6 = (struct lpfc_pde6 *) bpl;
1776
1777 /* setup PDE6 with the rest of the info */
1778 memset(pde6, 0, sizeof(struct lpfc_pde6));
1779 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1780 bf_set(pde6_optx, pde6, txop);
1781 bf_set(pde6_oprx, pde6, rxop);
1782
1783 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1784 bf_set(pde6_ce, pde6, checking);
1785 else
1786 bf_set(pde6_ce, pde6, 0);
1787
1788 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1789 bf_set(pde6_re, pde6, checking);
1790 else
1791 bf_set(pde6_re, pde6, 0);
1792
1793 bf_set(pde6_ai, pde6, 1);
1794 bf_set(pde6_ae, pde6, 0);
1795 bf_set(pde6_apptagval, pde6, 0);
1796
1797 /* Endianness conversion if necessary for PDE6 */
1798 pde6->word0 = cpu_to_le32(pde6->word0);
1799 pde6->word1 = cpu_to_le32(pde6->word1);
1800 pde6->word2 = cpu_to_le32(pde6->word2);
1801
1802 /* advance bpl and increment bde count */
1803 num_bde++;
1804 bpl++;
1805
1806 /* setup the first BDE that points to protection buffer */
1807 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1808 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1809
1810 /* must be integer multiple of the DIF block length */
1811 BUG_ON(protgroup_len % 8);
1812
1813 pde7 = (struct lpfc_pde7 *) bpl;
1814 memset(pde7, 0, sizeof(struct lpfc_pde7));
1815 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1816
1817 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1818 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1819
1820 protgrp_blks = protgroup_len / 8;
1821 protgrp_bytes = protgrp_blks * blksize;
1822
1823 /* check if this pde is crossing the 4K boundary; if so split */
1824 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1825 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1826 protgroup_offset += protgroup_remainder;
1827 protgrp_blks = protgroup_remainder / 8;
1828 protgrp_bytes = protgrp_blks * blksize;
1829 } else {
1830 protgroup_offset = 0;
1831 curr_prot++;
1832 }
1833
1834 num_bde++;
1835
1836 /* setup BDE's for data blocks associated with DIF data */
1837 pgdone = 0;
1838 subtotal = 0; /* total bytes processed for current prot grp */
1839 while (!pgdone) {
1840 /* Check to see if we ran out of space */
1841 if (num_bde >= phba->cfg_total_seg_cnt)
1842 return num_bde + 1;
1843
1844 if (!sgde) {
1845 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1846 "9065 BLKGRD:%s Invalid data segment\n",
1847 __func__);
1848 return 0;
1849 }
1850 bpl++;
1851 dataphysaddr = sg_dma_address(sgde) + split_offset;
1852 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1853 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1854
1855 remainder = sg_dma_len(sgde) - split_offset;
1856
1857 if ((subtotal + remainder) <= protgrp_bytes) {
1858 /* we can use this whole buffer */
1859 bpl->tus.f.bdeSize = remainder;
1860 split_offset = 0;
1861
1862 if ((subtotal + remainder) == protgrp_bytes)
1863 pgdone = 1;
1864 } else {
1865 /* must split this buffer with next prot grp */
1866 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1867 split_offset += bpl->tus.f.bdeSize;
1868 }
1869
1870 subtotal += bpl->tus.f.bdeSize;
1871
1872 if (datadir == DMA_TO_DEVICE)
1873 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1874 else
1875 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1876 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1877
1878 num_bde++;
1879 curr_data++;
1880
1881 if (split_offset)
1882 break;
1883
1884 /* Move to the next s/g segment if possible */
1885 sgde = sg_next(sgde);
1886
1887 }
1888
1889 if (protgroup_offset) {
1890 /* update the reference tag */
1891 reftag += protgrp_blks;
1892 bpl++;
1893 continue;
1894 }
1895
1896 /* are we done ? */
1897 if (curr_prot == protcnt) {
1898 alldone = 1;
1899 } else if (curr_prot < protcnt) {
1900 /* advance to next prot buffer */
1901 sgpe = sg_next(sgpe);
1902 bpl++;
1903
1904 /* update the reference tag */
1905 reftag += protgrp_blks;
1906 } else {
1907 /* if we're here, we have a bug */
1908 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1909 "9054 BLKGRD: bug in %s\n", __func__);
1910 }
1911
1912 } while (!alldone);
1913 out:
1914
1915 return num_bde;
1916 }
1917
1918 /**
1919 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
1920 * @phba: The Hba for which this call is being executed.
1921 * @sc: pointer to scsi command we're working on
1922 * @sgl: pointer to buffer list for protection groups
1923 * @datacnt: number of segments of data that have been dma mapped
1924 *
1925 * This function sets up SGL buffer list for protection groups of
1926 * type LPFC_PG_TYPE_NO_DIF
1927 *
1928 * This is usually used when the HBA is instructed to generate
1929 * DIFs and insert them into data stream (or strip DIF from
1930 * incoming data stream)
1931 *
1932 * The buffer list consists of just one protection group described
1933 * below:
1934 * +-------------------------+
1935 * start of prot group --> | DI_SEED |
1936 * +-------------------------+
1937 * | Data SGE |
1938 * +-------------------------+
1939 * |more Data SGE's ... (opt)|
1940 * +-------------------------+
1941 *
1942 *
1943 * Note: Data s/g buffers have been dma mapped
1944 *
1945 * Returns the number of SGEs added to the SGL.
1946 **/
1947 static uint32_t
lpfc_bg_setup_sgl(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct sli4_sge * sgl,int datasegcnt,struct lpfc_io_buf * lpfc_cmd)1948 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1949 struct sli4_sge *sgl, int datasegcnt,
1950 struct lpfc_io_buf *lpfc_cmd)
1951 {
1952 struct scatterlist *sgde = NULL; /* s/g data entry */
1953 struct sli4_sge_diseed *diseed = NULL;
1954 dma_addr_t physaddr;
1955 int i = 0, status;
1956 uint32_t reftag, num_sge = 0;
1957 uint8_t txop, rxop;
1958 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1959 uint32_t rc;
1960 #endif
1961 uint32_t checking = 1;
1962 uint32_t dma_len;
1963 uint32_t dma_offset = 0;
1964 struct sli4_hybrid_sgl *sgl_xtra = NULL;
1965 int j;
1966 bool lsp_just_set = false;
1967
1968 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1969 if (status)
1970 goto out;
1971
1972 /* extract some info from the scsi command for pde*/
1973 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1974
1975 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1976 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1977 if (rc) {
1978 if (rc & BG_ERR_SWAP)
1979 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1980 if (rc & BG_ERR_CHECK)
1981 checking = 0;
1982 }
1983 #endif
1984
1985 /* setup DISEED with what we have */
1986 diseed = (struct sli4_sge_diseed *) sgl;
1987 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
1988 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
1989
1990 /* Endianness conversion if necessary */
1991 diseed->ref_tag = cpu_to_le32(reftag);
1992 diseed->ref_tag_tran = diseed->ref_tag;
1993
1994 /*
1995 * We only need to check the data on READs, for WRITEs
1996 * protection data is automatically generated, not checked.
1997 */
1998 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1999 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2000 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2001 else
2002 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2003
2004 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2005 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2006 else
2007 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2008 }
2009
2010 /* setup DISEED with the rest of the info */
2011 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2012 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2013
2014 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2015 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2016
2017 /* Endianness conversion if necessary for DISEED */
2018 diseed->word2 = cpu_to_le32(diseed->word2);
2019 diseed->word3 = cpu_to_le32(diseed->word3);
2020
2021 /* advance bpl and increment sge count */
2022 num_sge++;
2023 sgl++;
2024
2025 /* assumption: caller has already run dma_map_sg on command data */
2026 sgde = scsi_sglist(sc);
2027 j = 3;
2028 for (i = 0; i < datasegcnt; i++) {
2029 /* clear it */
2030 sgl->word2 = 0;
2031
2032 /* do we need to expand the segment */
2033 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2034 ((datasegcnt - 1) != i)) {
2035 /* set LSP type */
2036 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2037
2038 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2039
2040 if (unlikely(!sgl_xtra)) {
2041 lpfc_cmd->seg_cnt = 0;
2042 return 0;
2043 }
2044 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2045 sgl_xtra->dma_phys_sgl));
2046 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2047 sgl_xtra->dma_phys_sgl));
2048
2049 } else {
2050 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2051 }
2052
2053 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2054 if ((datasegcnt - 1) == i)
2055 bf_set(lpfc_sli4_sge_last, sgl, 1);
2056 physaddr = sg_dma_address(sgde);
2057 dma_len = sg_dma_len(sgde);
2058 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2059 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2060
2061 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2062 sgl->word2 = cpu_to_le32(sgl->word2);
2063 sgl->sge_len = cpu_to_le32(dma_len);
2064
2065 dma_offset += dma_len;
2066 sgde = sg_next(sgde);
2067
2068 sgl++;
2069 num_sge++;
2070 lsp_just_set = false;
2071
2072 } else {
2073 sgl->word2 = cpu_to_le32(sgl->word2);
2074 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2075
2076 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2077 i = i - 1;
2078
2079 lsp_just_set = true;
2080 }
2081
2082 j++;
2083
2084 }
2085
2086 out:
2087 return num_sge;
2088 }
2089
2090 /**
2091 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2092 * @phba: The Hba for which this call is being executed.
2093 * @sc: pointer to scsi command we're working on
2094 * @sgl: pointer to buffer list for protection groups
2095 * @datacnt: number of segments of data that have been dma mapped
2096 * @protcnt: number of segment of protection data that have been dma mapped
2097 *
2098 * This function sets up SGL buffer list for protection groups of
2099 * type LPFC_PG_TYPE_DIF
2100 *
2101 * This is usually used when DIFs are in their own buffers,
2102 * separate from the data. The HBA can then by instructed
2103 * to place the DIFs in the outgoing stream. For read operations,
2104 * The HBA could extract the DIFs and place it in DIF buffers.
2105 *
2106 * The buffer list for this type consists of one or more of the
2107 * protection groups described below:
2108 * +-------------------------+
2109 * start of first prot group --> | DISEED |
2110 * +-------------------------+
2111 * | DIF (Prot SGE) |
2112 * +-------------------------+
2113 * | Data SGE |
2114 * +-------------------------+
2115 * |more Data SGE's ... (opt)|
2116 * +-------------------------+
2117 * start of new prot group --> | DISEED |
2118 * +-------------------------+
2119 * | ... |
2120 * +-------------------------+
2121 *
2122 * Note: It is assumed that both data and protection s/g buffers have been
2123 * mapped for DMA
2124 *
2125 * Returns the number of SGEs added to the SGL.
2126 **/
2127 static uint32_t
lpfc_bg_setup_sgl_prot(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct sli4_sge * sgl,int datacnt,int protcnt,struct lpfc_io_buf * lpfc_cmd)2128 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2129 struct sli4_sge *sgl, int datacnt, int protcnt,
2130 struct lpfc_io_buf *lpfc_cmd)
2131 {
2132 struct scatterlist *sgde = NULL; /* s/g data entry */
2133 struct scatterlist *sgpe = NULL; /* s/g prot entry */
2134 struct sli4_sge_diseed *diseed = NULL;
2135 dma_addr_t dataphysaddr, protphysaddr;
2136 unsigned short curr_data = 0, curr_prot = 0;
2137 unsigned int split_offset;
2138 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2139 unsigned int protgrp_blks, protgrp_bytes;
2140 unsigned int remainder, subtotal;
2141 int status;
2142 unsigned char pgdone = 0, alldone = 0;
2143 unsigned blksize;
2144 uint32_t reftag;
2145 uint8_t txop, rxop;
2146 uint32_t dma_len;
2147 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2148 uint32_t rc;
2149 #endif
2150 uint32_t checking = 1;
2151 uint32_t dma_offset = 0, num_sge = 0;
2152 int j = 2;
2153 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2154
2155 sgpe = scsi_prot_sglist(sc);
2156 sgde = scsi_sglist(sc);
2157
2158 if (!sgpe || !sgde) {
2159 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2160 "9082 Invalid s/g entry: data=x%px prot=x%px\n",
2161 sgpe, sgde);
2162 return 0;
2163 }
2164
2165 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2166 if (status)
2167 goto out;
2168
2169 /* extract some info from the scsi command */
2170 blksize = lpfc_cmd_blksize(sc);
2171 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2172
2173 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2174 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2175 if (rc) {
2176 if (rc & BG_ERR_SWAP)
2177 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2178 if (rc & BG_ERR_CHECK)
2179 checking = 0;
2180 }
2181 #endif
2182
2183 split_offset = 0;
2184 do {
2185 /* Check to see if we ran out of space */
2186 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2187 !(phba->cfg_xpsgl))
2188 return num_sge + 3;
2189
2190 /* DISEED and DIF have to be together */
2191 if (!((j + 1) % phba->border_sge_num) ||
2192 !((j + 2) % phba->border_sge_num) ||
2193 !((j + 3) % phba->border_sge_num)) {
2194 sgl->word2 = 0;
2195
2196 /* set LSP type */
2197 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2198
2199 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2200
2201 if (unlikely(!sgl_xtra)) {
2202 goto out;
2203 } else {
2204 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2205 sgl_xtra->dma_phys_sgl));
2206 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2207 sgl_xtra->dma_phys_sgl));
2208 }
2209
2210 sgl->word2 = cpu_to_le32(sgl->word2);
2211 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2212
2213 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2214 j = 0;
2215 }
2216
2217 /* setup DISEED with what we have */
2218 diseed = (struct sli4_sge_diseed *) sgl;
2219 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2220 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2221
2222 /* Endianness conversion if necessary */
2223 diseed->ref_tag = cpu_to_le32(reftag);
2224 diseed->ref_tag_tran = diseed->ref_tag;
2225
2226 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2227 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2228
2229 } else {
2230 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2231 /*
2232 * When in this mode, the hardware will replace
2233 * the guard tag from the host with a
2234 * newly generated good CRC for the wire.
2235 * Switch to raw mode here to avoid this
2236 * behavior. What the host sends gets put on the wire.
2237 */
2238 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2239 txop = BG_OP_RAW_MODE;
2240 rxop = BG_OP_RAW_MODE;
2241 }
2242 }
2243
2244
2245 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2246 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2247 else
2248 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2249
2250 /* setup DISEED with the rest of the info */
2251 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2252 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2253
2254 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2255 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2256
2257 /* Endianness conversion if necessary for DISEED */
2258 diseed->word2 = cpu_to_le32(diseed->word2);
2259 diseed->word3 = cpu_to_le32(diseed->word3);
2260
2261 /* advance sgl and increment bde count */
2262 num_sge++;
2263
2264 sgl++;
2265 j++;
2266
2267 /* setup the first BDE that points to protection buffer */
2268 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2269 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2270
2271 /* must be integer multiple of the DIF block length */
2272 BUG_ON(protgroup_len % 8);
2273
2274 /* Now setup DIF SGE */
2275 sgl->word2 = 0;
2276 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2277 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2278 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2279 sgl->word2 = cpu_to_le32(sgl->word2);
2280 sgl->sge_len = 0;
2281
2282 protgrp_blks = protgroup_len / 8;
2283 protgrp_bytes = protgrp_blks * blksize;
2284
2285 /* check if DIF SGE is crossing the 4K boundary; if so split */
2286 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2287 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2288 protgroup_offset += protgroup_remainder;
2289 protgrp_blks = protgroup_remainder / 8;
2290 protgrp_bytes = protgrp_blks * blksize;
2291 } else {
2292 protgroup_offset = 0;
2293 curr_prot++;
2294 }
2295
2296 num_sge++;
2297
2298 /* setup SGE's for data blocks associated with DIF data */
2299 pgdone = 0;
2300 subtotal = 0; /* total bytes processed for current prot grp */
2301
2302 sgl++;
2303 j++;
2304
2305 while (!pgdone) {
2306 /* Check to see if we ran out of space */
2307 if ((num_sge >= phba->cfg_total_seg_cnt) &&
2308 !phba->cfg_xpsgl)
2309 return num_sge + 1;
2310
2311 if (!sgde) {
2312 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2313 "9086 BLKGRD:%s Invalid data segment\n",
2314 __func__);
2315 return 0;
2316 }
2317
2318 if (!((j + 1) % phba->border_sge_num)) {
2319 sgl->word2 = 0;
2320
2321 /* set LSP type */
2322 bf_set(lpfc_sli4_sge_type, sgl,
2323 LPFC_SGE_TYPE_LSP);
2324
2325 sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2326 lpfc_cmd);
2327
2328 if (unlikely(!sgl_xtra)) {
2329 goto out;
2330 } else {
2331 sgl->addr_lo = cpu_to_le32(
2332 putPaddrLow(sgl_xtra->dma_phys_sgl));
2333 sgl->addr_hi = cpu_to_le32(
2334 putPaddrHigh(sgl_xtra->dma_phys_sgl));
2335 }
2336
2337 sgl->word2 = cpu_to_le32(sgl->word2);
2338 sgl->sge_len = cpu_to_le32(
2339 phba->cfg_sg_dma_buf_size);
2340
2341 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2342 } else {
2343 dataphysaddr = sg_dma_address(sgde) +
2344 split_offset;
2345
2346 remainder = sg_dma_len(sgde) - split_offset;
2347
2348 if ((subtotal + remainder) <= protgrp_bytes) {
2349 /* we can use this whole buffer */
2350 dma_len = remainder;
2351 split_offset = 0;
2352
2353 if ((subtotal + remainder) ==
2354 protgrp_bytes)
2355 pgdone = 1;
2356 } else {
2357 /* must split this buffer with next
2358 * prot grp
2359 */
2360 dma_len = protgrp_bytes - subtotal;
2361 split_offset += dma_len;
2362 }
2363
2364 subtotal += dma_len;
2365
2366 sgl->word2 = 0;
2367 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2368 dataphysaddr));
2369 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2370 dataphysaddr));
2371 bf_set(lpfc_sli4_sge_last, sgl, 0);
2372 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2373 bf_set(lpfc_sli4_sge_type, sgl,
2374 LPFC_SGE_TYPE_DATA);
2375
2376 sgl->sge_len = cpu_to_le32(dma_len);
2377 dma_offset += dma_len;
2378
2379 num_sge++;
2380 curr_data++;
2381
2382 if (split_offset) {
2383 sgl++;
2384 j++;
2385 break;
2386 }
2387
2388 /* Move to the next s/g segment if possible */
2389 sgde = sg_next(sgde);
2390
2391 sgl++;
2392 }
2393
2394 j++;
2395 }
2396
2397 if (protgroup_offset) {
2398 /* update the reference tag */
2399 reftag += protgrp_blks;
2400 continue;
2401 }
2402
2403 /* are we done ? */
2404 if (curr_prot == protcnt) {
2405 /* mark the last SGL */
2406 sgl--;
2407 bf_set(lpfc_sli4_sge_last, sgl, 1);
2408 alldone = 1;
2409 } else if (curr_prot < protcnt) {
2410 /* advance to next prot buffer */
2411 sgpe = sg_next(sgpe);
2412
2413 /* update the reference tag */
2414 reftag += protgrp_blks;
2415 } else {
2416 /* if we're here, we have a bug */
2417 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2418 "9085 BLKGRD: bug in %s\n", __func__);
2419 }
2420
2421 } while (!alldone);
2422
2423 out:
2424
2425 return num_sge;
2426 }
2427
2428 /**
2429 * lpfc_prot_group_type - Get prtotection group type of SCSI command
2430 * @phba: The Hba for which this call is being executed.
2431 * @sc: pointer to scsi command we're working on
2432 *
2433 * Given a SCSI command that supports DIF, determine composition of protection
2434 * groups involved in setting up buffer lists
2435 *
2436 * Returns: Protection group type (with or without DIF)
2437 *
2438 **/
2439 static int
lpfc_prot_group_type(struct lpfc_hba * phba,struct scsi_cmnd * sc)2440 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2441 {
2442 int ret = LPFC_PG_TYPE_INVALID;
2443 unsigned char op = scsi_get_prot_op(sc);
2444
2445 switch (op) {
2446 case SCSI_PROT_READ_STRIP:
2447 case SCSI_PROT_WRITE_INSERT:
2448 ret = LPFC_PG_TYPE_NO_DIF;
2449 break;
2450 case SCSI_PROT_READ_INSERT:
2451 case SCSI_PROT_WRITE_STRIP:
2452 case SCSI_PROT_READ_PASS:
2453 case SCSI_PROT_WRITE_PASS:
2454 ret = LPFC_PG_TYPE_DIF_BUF;
2455 break;
2456 default:
2457 if (phba)
2458 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2459 "9021 Unsupported protection op:%d\n",
2460 op);
2461 break;
2462 }
2463 return ret;
2464 }
2465
2466 /**
2467 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2468 * @phba: The Hba for which this call is being executed.
2469 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2470 *
2471 * Adjust the data length to account for how much data
2472 * is actually on the wire.
2473 *
2474 * returns the adjusted data length
2475 **/
2476 static int
lpfc_bg_scsi_adjust_dl(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2477 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2478 struct lpfc_io_buf *lpfc_cmd)
2479 {
2480 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2481 int fcpdl;
2482
2483 fcpdl = scsi_bufflen(sc);
2484
2485 /* Check if there is protection data on the wire */
2486 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2487 /* Read check for protection data */
2488 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2489 return fcpdl;
2490
2491 } else {
2492 /* Write check for protection data */
2493 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2494 return fcpdl;
2495 }
2496
2497 /*
2498 * If we are in DIF Type 1 mode every data block has a 8 byte
2499 * DIF (trailer) attached to it. Must ajust FCP data length
2500 * to account for the protection data.
2501 */
2502 fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2503
2504 return fcpdl;
2505 }
2506
2507 /**
2508 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2509 * @phba: The Hba for which this call is being executed.
2510 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2511 *
2512 * This is the protection/DIF aware version of
2513 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2514 * two functions eventually, but for now, it's here.
2515 * RETURNS 0 - SUCCESS,
2516 * 1 - Failed DMA map, retry.
2517 * 2 - Invalid scsi cmd or prot-type. Do not rety.
2518 **/
2519 static int
lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2520 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2521 struct lpfc_io_buf *lpfc_cmd)
2522 {
2523 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2524 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2525 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2526 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2527 uint32_t num_bde = 0;
2528 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2529 int prot_group_type = 0;
2530 int fcpdl;
2531 int ret = 1;
2532 struct lpfc_vport *vport = phba->pport;
2533
2534 /*
2535 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2536 * fcp_rsp regions to the first data bde entry
2537 */
2538 bpl += 2;
2539 if (scsi_sg_count(scsi_cmnd)) {
2540 /*
2541 * The driver stores the segment count returned from pci_map_sg
2542 * because this a count of dma-mappings used to map the use_sg
2543 * pages. They are not guaranteed to be the same for those
2544 * architectures that implement an IOMMU.
2545 */
2546 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2547 scsi_sglist(scsi_cmnd),
2548 scsi_sg_count(scsi_cmnd), datadir);
2549 if (unlikely(!datasegcnt))
2550 return 1;
2551
2552 lpfc_cmd->seg_cnt = datasegcnt;
2553
2554 /* First check if data segment count from SCSI Layer is good */
2555 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2556 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2557 ret = 2;
2558 goto err;
2559 }
2560
2561 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2562
2563 switch (prot_group_type) {
2564 case LPFC_PG_TYPE_NO_DIF:
2565
2566 /* Here we need to add a PDE5 and PDE6 to the count */
2567 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2568 ret = 2;
2569 goto err;
2570 }
2571
2572 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2573 datasegcnt);
2574 /* we should have 2 or more entries in buffer list */
2575 if (num_bde < 2) {
2576 ret = 2;
2577 goto err;
2578 }
2579 break;
2580
2581 case LPFC_PG_TYPE_DIF_BUF:
2582 /*
2583 * This type indicates that protection buffers are
2584 * passed to the driver, so that needs to be prepared
2585 * for DMA
2586 */
2587 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2588 scsi_prot_sglist(scsi_cmnd),
2589 scsi_prot_sg_count(scsi_cmnd), datadir);
2590 if (unlikely(!protsegcnt)) {
2591 scsi_dma_unmap(scsi_cmnd);
2592 return 1;
2593 }
2594
2595 lpfc_cmd->prot_seg_cnt = protsegcnt;
2596
2597 /*
2598 * There is a minimun of 4 BPLs used for every
2599 * protection data segment.
2600 */
2601 if ((lpfc_cmd->prot_seg_cnt * 4) >
2602 (phba->cfg_total_seg_cnt - 2)) {
2603 ret = 2;
2604 goto err;
2605 }
2606
2607 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2608 datasegcnt, protsegcnt);
2609 /* we should have 3 or more entries in buffer list */
2610 if ((num_bde < 3) ||
2611 (num_bde > phba->cfg_total_seg_cnt)) {
2612 ret = 2;
2613 goto err;
2614 }
2615 break;
2616
2617 case LPFC_PG_TYPE_INVALID:
2618 default:
2619 scsi_dma_unmap(scsi_cmnd);
2620 lpfc_cmd->seg_cnt = 0;
2621
2622 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2623 "9022 Unexpected protection group %i\n",
2624 prot_group_type);
2625 return 2;
2626 }
2627 }
2628
2629 /*
2630 * Finish initializing those IOCB fields that are dependent on the
2631 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
2632 * reinitialized since all iocb memory resources are used many times
2633 * for transmit, receive, and continuation bpl's.
2634 */
2635 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2636 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2637 iocb_cmd->ulpBdeCount = 1;
2638 iocb_cmd->ulpLe = 1;
2639
2640 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2641 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2642
2643 /*
2644 * Due to difference in data length between DIF/non-DIF paths,
2645 * we need to set word 4 of IOCB here
2646 */
2647 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2648
2649 /*
2650 * For First burst, we may need to adjust the initial transfer
2651 * length for DIF
2652 */
2653 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2654 (fcpdl < vport->cfg_first_burst_size))
2655 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2656
2657 return 0;
2658 err:
2659 if (lpfc_cmd->seg_cnt)
2660 scsi_dma_unmap(scsi_cmnd);
2661 if (lpfc_cmd->prot_seg_cnt)
2662 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2663 scsi_prot_sg_count(scsi_cmnd),
2664 scsi_cmnd->sc_data_direction);
2665
2666 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2667 "9023 Cannot setup S/G List for HBA"
2668 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2669 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2670 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2671 prot_group_type, num_bde);
2672
2673 lpfc_cmd->seg_cnt = 0;
2674 lpfc_cmd->prot_seg_cnt = 0;
2675 return ret;
2676 }
2677
2678 /*
2679 * This function calcuates the T10 DIF guard tag
2680 * on the specified data using a CRC algorithmn
2681 * using crc_t10dif.
2682 */
2683 static uint16_t
lpfc_bg_crc(uint8_t * data,int count)2684 lpfc_bg_crc(uint8_t *data, int count)
2685 {
2686 uint16_t crc = 0;
2687 uint16_t x;
2688
2689 crc = crc_t10dif(data, count);
2690 x = cpu_to_be16(crc);
2691 return x;
2692 }
2693
2694 /*
2695 * This function calcuates the T10 DIF guard tag
2696 * on the specified data using a CSUM algorithmn
2697 * using ip_compute_csum.
2698 */
2699 static uint16_t
lpfc_bg_csum(uint8_t * data,int count)2700 lpfc_bg_csum(uint8_t *data, int count)
2701 {
2702 uint16_t ret;
2703
2704 ret = ip_compute_csum(data, count);
2705 return ret;
2706 }
2707
2708 /*
2709 * This function examines the protection data to try to determine
2710 * what type of T10-DIF error occurred.
2711 */
2712 static void
lpfc_calc_bg_err(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2713 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2714 {
2715 struct scatterlist *sgpe; /* s/g prot entry */
2716 struct scatterlist *sgde; /* s/g data entry */
2717 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2718 struct scsi_dif_tuple *src = NULL;
2719 uint8_t *data_src = NULL;
2720 uint16_t guard_tag;
2721 uint16_t start_app_tag, app_tag;
2722 uint32_t start_ref_tag, ref_tag;
2723 int prot, protsegcnt;
2724 int err_type, len, data_len;
2725 int chk_ref, chk_app, chk_guard;
2726 uint16_t sum;
2727 unsigned blksize;
2728
2729 err_type = BGS_GUARD_ERR_MASK;
2730 sum = 0;
2731 guard_tag = 0;
2732
2733 /* First check to see if there is protection data to examine */
2734 prot = scsi_get_prot_op(cmd);
2735 if ((prot == SCSI_PROT_READ_STRIP) ||
2736 (prot == SCSI_PROT_WRITE_INSERT) ||
2737 (prot == SCSI_PROT_NORMAL))
2738 goto out;
2739
2740 /* Currently the driver just supports ref_tag and guard_tag checking */
2741 chk_ref = 1;
2742 chk_app = 0;
2743 chk_guard = 0;
2744
2745 /* Setup a ptr to the protection data provided by the SCSI host */
2746 sgpe = scsi_prot_sglist(cmd);
2747 protsegcnt = lpfc_cmd->prot_seg_cnt;
2748
2749 if (sgpe && protsegcnt) {
2750
2751 /*
2752 * We will only try to verify guard tag if the segment
2753 * data length is a multiple of the blksize.
2754 */
2755 sgde = scsi_sglist(cmd);
2756 blksize = lpfc_cmd_blksize(cmd);
2757 data_src = (uint8_t *)sg_virt(sgde);
2758 data_len = sgde->length;
2759 if ((data_len & (blksize - 1)) == 0)
2760 chk_guard = 1;
2761
2762 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2763 start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
2764 start_app_tag = src->app_tag;
2765 len = sgpe->length;
2766 while (src && protsegcnt) {
2767 while (len) {
2768
2769 /*
2770 * First check to see if a protection data
2771 * check is valid
2772 */
2773 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2774 (src->app_tag == T10_PI_APP_ESCAPE)) {
2775 start_ref_tag++;
2776 goto skipit;
2777 }
2778
2779 /* First Guard Tag checking */
2780 if (chk_guard) {
2781 guard_tag = src->guard_tag;
2782 if (lpfc_cmd_guard_csum(cmd))
2783 sum = lpfc_bg_csum(data_src,
2784 blksize);
2785 else
2786 sum = lpfc_bg_crc(data_src,
2787 blksize);
2788 if ((guard_tag != sum)) {
2789 err_type = BGS_GUARD_ERR_MASK;
2790 goto out;
2791 }
2792 }
2793
2794 /* Reference Tag checking */
2795 ref_tag = be32_to_cpu(src->ref_tag);
2796 if (chk_ref && (ref_tag != start_ref_tag)) {
2797 err_type = BGS_REFTAG_ERR_MASK;
2798 goto out;
2799 }
2800 start_ref_tag++;
2801
2802 /* App Tag checking */
2803 app_tag = src->app_tag;
2804 if (chk_app && (app_tag != start_app_tag)) {
2805 err_type = BGS_APPTAG_ERR_MASK;
2806 goto out;
2807 }
2808 skipit:
2809 len -= sizeof(struct scsi_dif_tuple);
2810 if (len < 0)
2811 len = 0;
2812 src++;
2813
2814 data_src += blksize;
2815 data_len -= blksize;
2816
2817 /*
2818 * Are we at the end of the Data segment?
2819 * The data segment is only used for Guard
2820 * tag checking.
2821 */
2822 if (chk_guard && (data_len == 0)) {
2823 chk_guard = 0;
2824 sgde = sg_next(sgde);
2825 if (!sgde)
2826 goto out;
2827
2828 data_src = (uint8_t *)sg_virt(sgde);
2829 data_len = sgde->length;
2830 if ((data_len & (blksize - 1)) == 0)
2831 chk_guard = 1;
2832 }
2833 }
2834
2835 /* Goto the next Protection data segment */
2836 sgpe = sg_next(sgpe);
2837 if (sgpe) {
2838 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2839 len = sgpe->length;
2840 } else {
2841 src = NULL;
2842 }
2843 protsegcnt--;
2844 }
2845 }
2846 out:
2847 if (err_type == BGS_GUARD_ERR_MASK) {
2848 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2849 0x10, 0x1);
2850 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2851 SAM_STAT_CHECK_CONDITION;
2852 phba->bg_guard_err_cnt++;
2853 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2854 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
2855 (unsigned long)scsi_get_lba(cmd),
2856 sum, guard_tag);
2857
2858 } else if (err_type == BGS_REFTAG_ERR_MASK) {
2859 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2860 0x10, 0x3);
2861 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2862 SAM_STAT_CHECK_CONDITION;
2863
2864 phba->bg_reftag_err_cnt++;
2865 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2866 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
2867 (unsigned long)scsi_get_lba(cmd),
2868 ref_tag, start_ref_tag);
2869
2870 } else if (err_type == BGS_APPTAG_ERR_MASK) {
2871 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2872 0x10, 0x2);
2873 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2874 SAM_STAT_CHECK_CONDITION;
2875
2876 phba->bg_apptag_err_cnt++;
2877 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2878 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
2879 (unsigned long)scsi_get_lba(cmd),
2880 app_tag, start_app_tag);
2881 }
2882 }
2883
2884
2885 /*
2886 * This function checks for BlockGuard errors detected by
2887 * the HBA. In case of errors, the ASC/ASCQ fields in the
2888 * sense buffer will be set accordingly, paired with
2889 * ILLEGAL_REQUEST to signal to the kernel that the HBA
2890 * detected corruption.
2891 *
2892 * Returns:
2893 * 0 - No error found
2894 * 1 - BlockGuard error found
2895 * -1 - Internal error (bad profile, ...etc)
2896 */
2897 static int
lpfc_parse_bg_err(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd,struct lpfc_iocbq * pIocbOut)2898 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2899 struct lpfc_iocbq *pIocbOut)
2900 {
2901 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2902 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2903 int ret = 0;
2904 uint32_t bghm = bgf->bghm;
2905 uint32_t bgstat = bgf->bgstat;
2906 uint64_t failing_sector = 0;
2907
2908 if (lpfc_bgs_get_invalid_prof(bgstat)) {
2909 cmd->result = DID_ERROR << 16;
2910 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2911 "9072 BLKGRD: Invalid BG Profile in cmd"
2912 " 0x%x lba 0x%llx blk cnt 0x%x "
2913 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2914 (unsigned long long)scsi_get_lba(cmd),
2915 blk_rq_sectors(cmd->request), bgstat, bghm);
2916 ret = (-1);
2917 goto out;
2918 }
2919
2920 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2921 cmd->result = DID_ERROR << 16;
2922 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2923 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
2924 " 0x%x lba 0x%llx blk cnt 0x%x "
2925 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2926 (unsigned long long)scsi_get_lba(cmd),
2927 blk_rq_sectors(cmd->request), bgstat, bghm);
2928 ret = (-1);
2929 goto out;
2930 }
2931
2932 if (lpfc_bgs_get_guard_err(bgstat)) {
2933 ret = 1;
2934
2935 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2936 0x10, 0x1);
2937 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2938 SAM_STAT_CHECK_CONDITION;
2939 phba->bg_guard_err_cnt++;
2940 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2941 "9055 BLKGRD: Guard Tag error in cmd"
2942 " 0x%x lba 0x%llx blk cnt 0x%x "
2943 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2944 (unsigned long long)scsi_get_lba(cmd),
2945 blk_rq_sectors(cmd->request), bgstat, bghm);
2946 }
2947
2948 if (lpfc_bgs_get_reftag_err(bgstat)) {
2949 ret = 1;
2950
2951 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2952 0x10, 0x3);
2953 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2954 SAM_STAT_CHECK_CONDITION;
2955
2956 phba->bg_reftag_err_cnt++;
2957 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2958 "9056 BLKGRD: Ref Tag error in cmd"
2959 " 0x%x lba 0x%llx blk cnt 0x%x "
2960 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2961 (unsigned long long)scsi_get_lba(cmd),
2962 blk_rq_sectors(cmd->request), bgstat, bghm);
2963 }
2964
2965 if (lpfc_bgs_get_apptag_err(bgstat)) {
2966 ret = 1;
2967
2968 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2969 0x10, 0x2);
2970 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2971 SAM_STAT_CHECK_CONDITION;
2972
2973 phba->bg_apptag_err_cnt++;
2974 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2975 "9061 BLKGRD: App Tag error in cmd"
2976 " 0x%x lba 0x%llx blk cnt 0x%x "
2977 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2978 (unsigned long long)scsi_get_lba(cmd),
2979 blk_rq_sectors(cmd->request), bgstat, bghm);
2980 }
2981
2982 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2983 /*
2984 * setup sense data descriptor 0 per SPC-4 as an information
2985 * field, and put the failing LBA in it.
2986 * This code assumes there was also a guard/app/ref tag error
2987 * indication.
2988 */
2989 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
2990 cmd->sense_buffer[8] = 0; /* Information descriptor type */
2991 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
2992 cmd->sense_buffer[10] = 0x80; /* Validity bit */
2993
2994 /* bghm is a "on the wire" FC frame based count */
2995 switch (scsi_get_prot_op(cmd)) {
2996 case SCSI_PROT_READ_INSERT:
2997 case SCSI_PROT_WRITE_STRIP:
2998 bghm /= cmd->device->sector_size;
2999 break;
3000 case SCSI_PROT_READ_STRIP:
3001 case SCSI_PROT_WRITE_INSERT:
3002 case SCSI_PROT_READ_PASS:
3003 case SCSI_PROT_WRITE_PASS:
3004 bghm /= (cmd->device->sector_size +
3005 sizeof(struct scsi_dif_tuple));
3006 break;
3007 }
3008
3009 failing_sector = scsi_get_lba(cmd);
3010 failing_sector += bghm;
3011
3012 /* Descriptor Information */
3013 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3014 }
3015
3016 if (!ret) {
3017 /* No error was reported - problem in FW? */
3018 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3019 "9057 BLKGRD: Unknown error in cmd"
3020 " 0x%x lba 0x%llx blk cnt 0x%x "
3021 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3022 (unsigned long long)scsi_get_lba(cmd),
3023 blk_rq_sectors(cmd->request), bgstat, bghm);
3024
3025 /* Calcuate what type of error it was */
3026 lpfc_calc_bg_err(phba, lpfc_cmd);
3027 }
3028 out:
3029 return ret;
3030 }
3031
3032 /**
3033 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3034 * @phba: The Hba for which this call is being executed.
3035 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3036 *
3037 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3038 * field of @lpfc_cmd for device with SLI-4 interface spec.
3039 *
3040 * Return codes:
3041 * 2 - Error - Do not retry
3042 * 1 - Error - Retry
3043 * 0 - Success
3044 **/
3045 static int
lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3046 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3047 {
3048 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3049 struct scatterlist *sgel = NULL;
3050 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3051 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
3052 struct sli4_sge *first_data_sgl;
3053 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3054 dma_addr_t physaddr;
3055 uint32_t num_bde = 0;
3056 uint32_t dma_len;
3057 uint32_t dma_offset = 0;
3058 int nseg, i, j;
3059 struct ulp_bde64 *bde;
3060 bool lsp_just_set = false;
3061 struct sli4_hybrid_sgl *sgl_xtra = NULL;
3062
3063 /*
3064 * There are three possibilities here - use scatter-gather segment, use
3065 * the single mapping, or neither. Start the lpfc command prep by
3066 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3067 * data bde entry.
3068 */
3069 if (scsi_sg_count(scsi_cmnd)) {
3070 /*
3071 * The driver stores the segment count returned from pci_map_sg
3072 * because this a count of dma-mappings used to map the use_sg
3073 * pages. They are not guaranteed to be the same for those
3074 * architectures that implement an IOMMU.
3075 */
3076
3077 nseg = scsi_dma_map(scsi_cmnd);
3078 if (unlikely(nseg <= 0))
3079 return 1;
3080 sgl += 1;
3081 /* clear the last flag in the fcp_rsp map entry */
3082 sgl->word2 = le32_to_cpu(sgl->word2);
3083 bf_set(lpfc_sli4_sge_last, sgl, 0);
3084 sgl->word2 = cpu_to_le32(sgl->word2);
3085 sgl += 1;
3086 first_data_sgl = sgl;
3087 lpfc_cmd->seg_cnt = nseg;
3088 if (!phba->cfg_xpsgl &&
3089 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3090 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3091 "9074 BLKGRD:"
3092 " %s: Too many sg segments from "
3093 "dma_map_sg. Config %d, seg_cnt %d\n",
3094 __func__, phba->cfg_sg_seg_cnt,
3095 lpfc_cmd->seg_cnt);
3096 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3097 lpfc_cmd->seg_cnt = 0;
3098 scsi_dma_unmap(scsi_cmnd);
3099 return 2;
3100 }
3101
3102 /*
3103 * The driver established a maximum scatter-gather segment count
3104 * during probe that limits the number of sg elements in any
3105 * single scsi command. Just run through the seg_cnt and format
3106 * the sge's.
3107 * When using SLI-3 the driver will try to fit all the BDEs into
3108 * the IOCB. If it can't then the BDEs get added to a BPL as it
3109 * does for SLI-2 mode.
3110 */
3111
3112 /* for tracking segment boundaries */
3113 sgel = scsi_sglist(scsi_cmnd);
3114 j = 2;
3115 for (i = 0; i < nseg; i++) {
3116 sgl->word2 = 0;
3117 if ((num_bde + 1) == nseg) {
3118 bf_set(lpfc_sli4_sge_last, sgl, 1);
3119 bf_set(lpfc_sli4_sge_type, sgl,
3120 LPFC_SGE_TYPE_DATA);
3121 } else {
3122 bf_set(lpfc_sli4_sge_last, sgl, 0);
3123
3124 /* do we need to expand the segment */
3125 if (!lsp_just_set &&
3126 !((j + 1) % phba->border_sge_num) &&
3127 ((nseg - 1) != i)) {
3128 /* set LSP type */
3129 bf_set(lpfc_sli4_sge_type, sgl,
3130 LPFC_SGE_TYPE_LSP);
3131
3132 sgl_xtra = lpfc_get_sgl_per_hdwq(
3133 phba, lpfc_cmd);
3134
3135 if (unlikely(!sgl_xtra)) {
3136 lpfc_cmd->seg_cnt = 0;
3137 scsi_dma_unmap(scsi_cmnd);
3138 return 1;
3139 }
3140 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3141 sgl_xtra->dma_phys_sgl));
3142 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3143 sgl_xtra->dma_phys_sgl));
3144
3145 } else {
3146 bf_set(lpfc_sli4_sge_type, sgl,
3147 LPFC_SGE_TYPE_DATA);
3148 }
3149 }
3150
3151 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3152 LPFC_SGE_TYPE_LSP)) {
3153 if ((nseg - 1) == i)
3154 bf_set(lpfc_sli4_sge_last, sgl, 1);
3155
3156 physaddr = sg_dma_address(sgel);
3157 dma_len = sg_dma_len(sgel);
3158 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3159 physaddr));
3160 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3161 physaddr));
3162
3163 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3164 sgl->word2 = cpu_to_le32(sgl->word2);
3165 sgl->sge_len = cpu_to_le32(dma_len);
3166
3167 dma_offset += dma_len;
3168 sgel = sg_next(sgel);
3169
3170 sgl++;
3171 lsp_just_set = false;
3172
3173 } else {
3174 sgl->word2 = cpu_to_le32(sgl->word2);
3175 sgl->sge_len = cpu_to_le32(
3176 phba->cfg_sg_dma_buf_size);
3177
3178 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3179 i = i - 1;
3180
3181 lsp_just_set = true;
3182 }
3183
3184 j++;
3185 }
3186 /*
3187 * Setup the first Payload BDE. For FCoE we just key off
3188 * Performance Hints, for FC we use lpfc_enable_pbde.
3189 * We populate words 13-15 of IOCB/WQE.
3190 */
3191 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3192 phba->cfg_enable_pbde) {
3193 bde = (struct ulp_bde64 *)
3194 &(iocb_cmd->unsli3.sli3Words[5]);
3195 bde->addrLow = first_data_sgl->addr_lo;
3196 bde->addrHigh = first_data_sgl->addr_hi;
3197 bde->tus.f.bdeSize =
3198 le32_to_cpu(first_data_sgl->sge_len);
3199 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3200 bde->tus.w = cpu_to_le32(bde->tus.w);
3201 }
3202 } else {
3203 sgl += 1;
3204 /* clear the last flag in the fcp_rsp map entry */
3205 sgl->word2 = le32_to_cpu(sgl->word2);
3206 bf_set(lpfc_sli4_sge_last, sgl, 1);
3207 sgl->word2 = cpu_to_le32(sgl->word2);
3208
3209 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3210 phba->cfg_enable_pbde) {
3211 bde = (struct ulp_bde64 *)
3212 &(iocb_cmd->unsli3.sli3Words[5]);
3213 memset(bde, 0, (sizeof(uint32_t) * 3));
3214 }
3215 }
3216
3217 /*
3218 * Finish initializing those IOCB fields that are dependent on the
3219 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
3220 * explicitly reinitialized.
3221 * all iocb memory resources are reused.
3222 */
3223 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3224
3225 /*
3226 * Due to difference in data length between DIF/non-DIF paths,
3227 * we need to set word 4 of IOCB here
3228 */
3229 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3230
3231 /*
3232 * If the OAS driver feature is enabled and the lun is enabled for
3233 * OAS, set the oas iocb related flags.
3234 */
3235 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3236 scsi_cmnd->device->hostdata)->oas_enabled) {
3237 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3238 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3239 scsi_cmnd->device->hostdata)->priority;
3240 }
3241
3242 return 0;
3243 }
3244
3245 /**
3246 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3247 * @phba: The Hba for which this call is being executed.
3248 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3249 *
3250 * This is the protection/DIF aware version of
3251 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3252 * two functions eventually, but for now, it's here
3253 * Return codes:
3254 * 2 - Error - Do not retry
3255 * 1 - Error - Retry
3256 * 0 - Success
3257 **/
3258 static int
lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3259 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3260 struct lpfc_io_buf *lpfc_cmd)
3261 {
3262 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3263 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3264 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3265 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3266 uint32_t num_sge = 0;
3267 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3268 int prot_group_type = 0;
3269 int fcpdl;
3270 int ret = 1;
3271 struct lpfc_vport *vport = phba->pport;
3272
3273 /*
3274 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3275 * fcp_rsp regions to the first data sge entry
3276 */
3277 if (scsi_sg_count(scsi_cmnd)) {
3278 /*
3279 * The driver stores the segment count returned from pci_map_sg
3280 * because this a count of dma-mappings used to map the use_sg
3281 * pages. They are not guaranteed to be the same for those
3282 * architectures that implement an IOMMU.
3283 */
3284 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3285 scsi_sglist(scsi_cmnd),
3286 scsi_sg_count(scsi_cmnd), datadir);
3287 if (unlikely(!datasegcnt))
3288 return 1;
3289
3290 sgl += 1;
3291 /* clear the last flag in the fcp_rsp map entry */
3292 sgl->word2 = le32_to_cpu(sgl->word2);
3293 bf_set(lpfc_sli4_sge_last, sgl, 0);
3294 sgl->word2 = cpu_to_le32(sgl->word2);
3295
3296 sgl += 1;
3297 lpfc_cmd->seg_cnt = datasegcnt;
3298
3299 /* First check if data segment count from SCSI Layer is good */
3300 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3301 !phba->cfg_xpsgl) {
3302 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3303 ret = 2;
3304 goto err;
3305 }
3306
3307 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3308
3309 switch (prot_group_type) {
3310 case LPFC_PG_TYPE_NO_DIF:
3311 /* Here we need to add a DISEED to the count */
3312 if (((lpfc_cmd->seg_cnt + 1) >
3313 phba->cfg_total_seg_cnt) &&
3314 !phba->cfg_xpsgl) {
3315 ret = 2;
3316 goto err;
3317 }
3318
3319 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3320 datasegcnt, lpfc_cmd);
3321
3322 /* we should have 2 or more entries in buffer list */
3323 if (num_sge < 2) {
3324 ret = 2;
3325 goto err;
3326 }
3327 break;
3328
3329 case LPFC_PG_TYPE_DIF_BUF:
3330 /*
3331 * This type indicates that protection buffers are
3332 * passed to the driver, so that needs to be prepared
3333 * for DMA
3334 */
3335 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3336 scsi_prot_sglist(scsi_cmnd),
3337 scsi_prot_sg_count(scsi_cmnd), datadir);
3338 if (unlikely(!protsegcnt)) {
3339 scsi_dma_unmap(scsi_cmnd);
3340 return 1;
3341 }
3342
3343 lpfc_cmd->prot_seg_cnt = protsegcnt;
3344 /*
3345 * There is a minimun of 3 SGEs used for every
3346 * protection data segment.
3347 */
3348 if (((lpfc_cmd->prot_seg_cnt * 3) >
3349 (phba->cfg_total_seg_cnt - 2)) &&
3350 !phba->cfg_xpsgl) {
3351 ret = 2;
3352 goto err;
3353 }
3354
3355 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3356 datasegcnt, protsegcnt, lpfc_cmd);
3357
3358 /* we should have 3 or more entries in buffer list */
3359 if (num_sge < 3 ||
3360 (num_sge > phba->cfg_total_seg_cnt &&
3361 !phba->cfg_xpsgl)) {
3362 ret = 2;
3363 goto err;
3364 }
3365 break;
3366
3367 case LPFC_PG_TYPE_INVALID:
3368 default:
3369 scsi_dma_unmap(scsi_cmnd);
3370 lpfc_cmd->seg_cnt = 0;
3371
3372 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3373 "9083 Unexpected protection group %i\n",
3374 prot_group_type);
3375 return 2;
3376 }
3377 }
3378
3379 switch (scsi_get_prot_op(scsi_cmnd)) {
3380 case SCSI_PROT_WRITE_STRIP:
3381 case SCSI_PROT_READ_STRIP:
3382 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3383 break;
3384 case SCSI_PROT_WRITE_INSERT:
3385 case SCSI_PROT_READ_INSERT:
3386 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3387 break;
3388 case SCSI_PROT_WRITE_PASS:
3389 case SCSI_PROT_READ_PASS:
3390 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3391 break;
3392 }
3393
3394 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3395 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3396
3397 /*
3398 * Due to difference in data length between DIF/non-DIF paths,
3399 * we need to set word 4 of IOCB here
3400 */
3401 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3402
3403 /*
3404 * For First burst, we may need to adjust the initial transfer
3405 * length for DIF
3406 */
3407 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
3408 (fcpdl < vport->cfg_first_burst_size))
3409 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
3410
3411 /*
3412 * If the OAS driver feature is enabled and the lun is enabled for
3413 * OAS, set the oas iocb related flags.
3414 */
3415 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3416 scsi_cmnd->device->hostdata)->oas_enabled)
3417 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3418
3419 return 0;
3420 err:
3421 if (lpfc_cmd->seg_cnt)
3422 scsi_dma_unmap(scsi_cmnd);
3423 if (lpfc_cmd->prot_seg_cnt)
3424 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3425 scsi_prot_sg_count(scsi_cmnd),
3426 scsi_cmnd->sc_data_direction);
3427
3428 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3429 "9084 Cannot setup S/G List for HBA"
3430 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3431 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3432 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3433 prot_group_type, num_sge);
3434
3435 lpfc_cmd->seg_cnt = 0;
3436 lpfc_cmd->prot_seg_cnt = 0;
3437 return ret;
3438 }
3439
3440 /**
3441 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3442 * @phba: The Hba for which this call is being executed.
3443 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3444 *
3445 * This routine wraps the actual DMA mapping function pointer from the
3446 * lpfc_hba struct.
3447 *
3448 * Return codes:
3449 * 1 - Error
3450 * 0 - Success
3451 **/
3452 static inline int
lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3453 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3454 {
3455 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3456 }
3457
3458 /**
3459 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3460 * using BlockGuard.
3461 * @phba: The Hba for which this call is being executed.
3462 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3463 *
3464 * This routine wraps the actual DMA mapping function pointer from the
3465 * lpfc_hba struct.
3466 *
3467 * Return codes:
3468 * 1 - Error
3469 * 0 - Success
3470 **/
3471 static inline int
lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3472 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3473 {
3474 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3475 }
3476
3477 /**
3478 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3479 * @phba: Pointer to hba context object.
3480 * @vport: Pointer to vport object.
3481 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3482 * @rsp_iocb: Pointer to response iocb object which reported error.
3483 *
3484 * This function posts an event when there is a SCSI command reporting
3485 * error from the scsi device.
3486 **/
3487 static void
lpfc_send_scsi_error_event(struct lpfc_hba * phba,struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,struct lpfc_iocbq * rsp_iocb)3488 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3489 struct lpfc_io_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3490 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3491 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3492 uint32_t resp_info = fcprsp->rspStatus2;
3493 uint32_t scsi_status = fcprsp->rspStatus3;
3494 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3495 struct lpfc_fast_path_event *fast_path_evt = NULL;
3496 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3497 unsigned long flags;
3498
3499 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3500 return;
3501
3502 /* If there is queuefull or busy condition send a scsi event */
3503 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3504 (cmnd->result == SAM_STAT_BUSY)) {
3505 fast_path_evt = lpfc_alloc_fast_evt(phba);
3506 if (!fast_path_evt)
3507 return;
3508 fast_path_evt->un.scsi_evt.event_type =
3509 FC_REG_SCSI_EVENT;
3510 fast_path_evt->un.scsi_evt.subcategory =
3511 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3512 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3513 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3514 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3515 &pnode->nlp_portname, sizeof(struct lpfc_name));
3516 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3517 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3518 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3519 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3520 fast_path_evt = lpfc_alloc_fast_evt(phba);
3521 if (!fast_path_evt)
3522 return;
3523 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3524 FC_REG_SCSI_EVENT;
3525 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3526 LPFC_EVENT_CHECK_COND;
3527 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3528 cmnd->device->lun;
3529 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3530 &pnode->nlp_portname, sizeof(struct lpfc_name));
3531 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3532 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3533 fast_path_evt->un.check_cond_evt.sense_key =
3534 cmnd->sense_buffer[2] & 0xf;
3535 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3536 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3537 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3538 fcpi_parm &&
3539 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3540 ((scsi_status == SAM_STAT_GOOD) &&
3541 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3542 /*
3543 * If status is good or resid does not match with fcp_param and
3544 * there is valid fcpi_parm, then there is a read_check error
3545 */
3546 fast_path_evt = lpfc_alloc_fast_evt(phba);
3547 if (!fast_path_evt)
3548 return;
3549 fast_path_evt->un.read_check_error.header.event_type =
3550 FC_REG_FABRIC_EVENT;
3551 fast_path_evt->un.read_check_error.header.subcategory =
3552 LPFC_EVENT_FCPRDCHKERR;
3553 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3554 &pnode->nlp_portname, sizeof(struct lpfc_name));
3555 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3556 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3557 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3558 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3559 fast_path_evt->un.read_check_error.fcpiparam =
3560 fcpi_parm;
3561 } else
3562 return;
3563
3564 fast_path_evt->vport = vport;
3565 spin_lock_irqsave(&phba->hbalock, flags);
3566 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3567 spin_unlock_irqrestore(&phba->hbalock, flags);
3568 lpfc_worker_wake_up(phba);
3569 return;
3570 }
3571
3572 /**
3573 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3574 * @phba: The HBA for which this call is being executed.
3575 * @psb: The scsi buffer which is going to be un-mapped.
3576 *
3577 * This routine does DMA un-mapping of scatter gather list of scsi command
3578 * field of @lpfc_cmd for device with SLI-3 interface spec.
3579 **/
3580 static void
lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * psb)3581 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3582 {
3583 /*
3584 * There are only two special cases to consider. (1) the scsi command
3585 * requested scatter-gather usage or (2) the scsi command allocated
3586 * a request buffer, but did not request use_sg. There is a third
3587 * case, but it does not require resource deallocation.
3588 */
3589 if (psb->seg_cnt > 0)
3590 scsi_dma_unmap(psb->pCmd);
3591 if (psb->prot_seg_cnt > 0)
3592 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3593 scsi_prot_sg_count(psb->pCmd),
3594 psb->pCmd->sc_data_direction);
3595 }
3596
3597 /**
3598 * lpfc_handler_fcp_err - FCP response handler
3599 * @vport: The virtual port for which this call is being executed.
3600 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
3601 * @rsp_iocb: The response IOCB which contains FCP error.
3602 *
3603 * This routine is called to process response IOCB with status field
3604 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3605 * based upon SCSI and FCP error.
3606 **/
3607 static void
lpfc_handle_fcp_err(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,struct lpfc_iocbq * rsp_iocb)3608 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3609 struct lpfc_iocbq *rsp_iocb)
3610 {
3611 struct lpfc_hba *phba = vport->phba;
3612 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3613 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3614 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3615 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3616 uint32_t resp_info = fcprsp->rspStatus2;
3617 uint32_t scsi_status = fcprsp->rspStatus3;
3618 uint32_t *lp;
3619 uint32_t host_status = DID_OK;
3620 uint32_t rsplen = 0;
3621 uint32_t fcpDl;
3622 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3623
3624
3625 /*
3626 * If this is a task management command, there is no
3627 * scsi packet associated with this lpfc_cmd. The driver
3628 * consumes it.
3629 */
3630 if (fcpcmd->fcpCntl2) {
3631 scsi_status = 0;
3632 goto out;
3633 }
3634
3635 if (resp_info & RSP_LEN_VALID) {
3636 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3637 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3638 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3639 "2719 Invalid response length: "
3640 "tgt x%x lun x%llx cmnd x%x rsplen "
3641 "x%x\n", cmnd->device->id,
3642 cmnd->device->lun, cmnd->cmnd[0],
3643 rsplen);
3644 host_status = DID_ERROR;
3645 goto out;
3646 }
3647 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3648 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3649 "2757 Protocol failure detected during "
3650 "processing of FCP I/O op: "
3651 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3652 cmnd->device->id,
3653 cmnd->device->lun, cmnd->cmnd[0],
3654 fcprsp->rspInfo3);
3655 host_status = DID_ERROR;
3656 goto out;
3657 }
3658 }
3659
3660 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3661 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3662 if (snslen > SCSI_SENSE_BUFFERSIZE)
3663 snslen = SCSI_SENSE_BUFFERSIZE;
3664
3665 if (resp_info & RSP_LEN_VALID)
3666 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3667 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3668 }
3669 lp = (uint32_t *)cmnd->sense_buffer;
3670
3671 /* special handling for under run conditions */
3672 if (!scsi_status && (resp_info & RESID_UNDER)) {
3673 /* don't log under runs if fcp set... */
3674 if (vport->cfg_log_verbose & LOG_FCP)
3675 logit = LOG_FCP_ERROR;
3676 /* unless operator says so */
3677 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3678 logit = LOG_FCP_UNDER;
3679 }
3680
3681 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3682 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3683 "Data: x%x x%x x%x x%x x%x\n",
3684 cmnd->cmnd[0], scsi_status,
3685 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3686 be32_to_cpu(fcprsp->rspResId),
3687 be32_to_cpu(fcprsp->rspSnsLen),
3688 be32_to_cpu(fcprsp->rspRspLen),
3689 fcprsp->rspInfo3);
3690
3691 scsi_set_resid(cmnd, 0);
3692 fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3693 if (resp_info & RESID_UNDER) {
3694 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3695
3696 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3697 "9025 FCP Underrun, expected %d, "
3698 "residual %d Data: x%x x%x x%x\n",
3699 fcpDl,
3700 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3701 cmnd->underflow);
3702
3703 /*
3704 * If there is an under run, check if under run reported by
3705 * storage array is same as the under run reported by HBA.
3706 * If this is not same, there is a dropped frame.
3707 */
3708 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3709 lpfc_printf_vlog(vport, KERN_WARNING,
3710 LOG_FCP | LOG_FCP_ERROR,
3711 "9026 FCP Read Check Error "
3712 "and Underrun Data: x%x x%x x%x x%x\n",
3713 fcpDl,
3714 scsi_get_resid(cmnd), fcpi_parm,
3715 cmnd->cmnd[0]);
3716 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3717 host_status = DID_ERROR;
3718 }
3719 /*
3720 * The cmnd->underflow is the minimum number of bytes that must
3721 * be transferred for this command. Provided a sense condition
3722 * is not present, make sure the actual amount transferred is at
3723 * least the underflow value or fail.
3724 */
3725 if (!(resp_info & SNS_LEN_VALID) &&
3726 (scsi_status == SAM_STAT_GOOD) &&
3727 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3728 < cmnd->underflow)) {
3729 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3730 "9027 FCP command x%x residual "
3731 "underrun converted to error "
3732 "Data: x%x x%x x%x\n",
3733 cmnd->cmnd[0], scsi_bufflen(cmnd),
3734 scsi_get_resid(cmnd), cmnd->underflow);
3735 host_status = DID_ERROR;
3736 }
3737 } else if (resp_info & RESID_OVER) {
3738 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3739 "9028 FCP command x%x residual overrun error. "
3740 "Data: x%x x%x\n", cmnd->cmnd[0],
3741 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3742 host_status = DID_ERROR;
3743
3744 /*
3745 * Check SLI validation that all the transfer was actually done
3746 * (fcpi_parm should be zero). Apply check only to reads.
3747 */
3748 } else if (fcpi_parm) {
3749 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3750 "9029 FCP %s Check Error xri x%x Data: "
3751 "x%x x%x x%x x%x x%x\n",
3752 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
3753 "Read" : "Write"),
3754 ((phba->sli_rev == LPFC_SLI_REV4) ?
3755 lpfc_cmd->cur_iocbq.sli4_xritag :
3756 rsp_iocb->iocb.ulpContext),
3757 fcpDl, be32_to_cpu(fcprsp->rspResId),
3758 fcpi_parm, cmnd->cmnd[0], scsi_status);
3759
3760 /* There is some issue with the LPe12000 that causes it
3761 * to miscalculate the fcpi_parm and falsely trip this
3762 * recovery logic. Detect this case and don't error when true.
3763 */
3764 if (fcpi_parm > fcpDl)
3765 goto out;
3766
3767 switch (scsi_status) {
3768 case SAM_STAT_GOOD:
3769 case SAM_STAT_CHECK_CONDITION:
3770 /* Fabric dropped a data frame. Fail any successful
3771 * command in which we detected dropped frames.
3772 * A status of good or some check conditions could
3773 * be considered a successful command.
3774 */
3775 host_status = DID_ERROR;
3776 break;
3777 }
3778 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3779 }
3780
3781 out:
3782 cmnd->result = host_status << 16 | scsi_status;
3783 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
3784 }
3785
3786 /**
3787 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3788 * @phba: The Hba for which this call is being executed.
3789 * @pIocbIn: The command IOCBQ for the scsi cmnd.
3790 * @pIocbOut: The response IOCBQ for the scsi cmnd.
3791 *
3792 * This routine assigns scsi command result by looking into response IOCB
3793 * status field appropriately. This routine handles QUEUE FULL condition as
3794 * well by ramping down device queue depth.
3795 **/
3796 static void
lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * pIocbIn,struct lpfc_iocbq * pIocbOut)3797 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3798 struct lpfc_iocbq *pIocbOut)
3799 {
3800 struct lpfc_io_buf *lpfc_cmd =
3801 (struct lpfc_io_buf *) pIocbIn->context1;
3802 struct lpfc_vport *vport = pIocbIn->vport;
3803 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3804 struct lpfc_nodelist *pnode = rdata->pnode;
3805 struct scsi_cmnd *cmd;
3806 unsigned long flags;
3807 struct lpfc_fast_path_event *fast_path_evt;
3808 struct Scsi_Host *shost;
3809 int idx;
3810 uint32_t logit = LOG_FCP;
3811
3812 /* Guard against abort handler being called at same time */
3813 spin_lock(&lpfc_cmd->buf_lock);
3814
3815 /* Sanity check on return of outstanding command */
3816 cmd = lpfc_cmd->pCmd;
3817 if (!cmd || !phba) {
3818 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3819 "2621 IO completion: Not an active IO\n");
3820 spin_unlock(&lpfc_cmd->buf_lock);
3821 return;
3822 }
3823
3824 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
3825 if (phba->sli4_hba.hdwq)
3826 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
3827
3828 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3829 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
3830 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
3831 #endif
3832 shost = cmd->device->host;
3833
3834 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
3835 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3836 /* pick up SLI4 exhange busy status from HBA */
3837 if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
3838 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
3839 else
3840 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
3841
3842 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3843 if (lpfc_cmd->prot_data_type) {
3844 struct scsi_dif_tuple *src = NULL;
3845
3846 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
3847 /*
3848 * Used to restore any changes to protection
3849 * data for error injection.
3850 */
3851 switch (lpfc_cmd->prot_data_type) {
3852 case LPFC_INJERR_REFTAG:
3853 src->ref_tag =
3854 lpfc_cmd->prot_data;
3855 break;
3856 case LPFC_INJERR_APPTAG:
3857 src->app_tag =
3858 (uint16_t)lpfc_cmd->prot_data;
3859 break;
3860 case LPFC_INJERR_GUARD:
3861 src->guard_tag =
3862 (uint16_t)lpfc_cmd->prot_data;
3863 break;
3864 default:
3865 break;
3866 }
3867
3868 lpfc_cmd->prot_data = 0;
3869 lpfc_cmd->prot_data_type = 0;
3870 lpfc_cmd->prot_data_segment = NULL;
3871 }
3872 #endif
3873
3874 if (unlikely(lpfc_cmd->status)) {
3875 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
3876 (lpfc_cmd->result & IOERR_DRVR_MASK))
3877 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3878 else if (lpfc_cmd->status >= IOSTAT_CNT)
3879 lpfc_cmd->status = IOSTAT_DEFAULT;
3880 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
3881 !lpfc_cmd->fcp_rsp->rspStatus3 &&
3882 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
3883 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
3884 logit = 0;
3885 else
3886 logit = LOG_FCP | LOG_FCP_UNDER;
3887 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3888 "9030 FCP cmd x%x failed <%d/%lld> "
3889 "status: x%x result: x%x "
3890 "sid: x%x did: x%x oxid: x%x "
3891 "Data: x%x x%x\n",
3892 cmd->cmnd[0],
3893 cmd->device ? cmd->device->id : 0xffff,
3894 cmd->device ? cmd->device->lun : 0xffff,
3895 lpfc_cmd->status, lpfc_cmd->result,
3896 vport->fc_myDID,
3897 (pnode) ? pnode->nlp_DID : 0,
3898 phba->sli_rev == LPFC_SLI_REV4 ?
3899 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
3900 pIocbOut->iocb.ulpContext,
3901 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
3902
3903 switch (lpfc_cmd->status) {
3904 case IOSTAT_FCP_RSP_ERROR:
3905 /* Call FCP RSP handler to determine result */
3906 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
3907 break;
3908 case IOSTAT_NPORT_BSY:
3909 case IOSTAT_FABRIC_BSY:
3910 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
3911 fast_path_evt = lpfc_alloc_fast_evt(phba);
3912 if (!fast_path_evt)
3913 break;
3914 fast_path_evt->un.fabric_evt.event_type =
3915 FC_REG_FABRIC_EVENT;
3916 fast_path_evt->un.fabric_evt.subcategory =
3917 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
3918 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
3919 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3920 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
3921 &pnode->nlp_portname,
3922 sizeof(struct lpfc_name));
3923 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
3924 &pnode->nlp_nodename,
3925 sizeof(struct lpfc_name));
3926 }
3927 fast_path_evt->vport = vport;
3928 fast_path_evt->work_evt.evt =
3929 LPFC_EVT_FASTPATH_MGMT_EVT;
3930 spin_lock_irqsave(&phba->hbalock, flags);
3931 list_add_tail(&fast_path_evt->work_evt.evt_listp,
3932 &phba->work_list);
3933 spin_unlock_irqrestore(&phba->hbalock, flags);
3934 lpfc_worker_wake_up(phba);
3935 break;
3936 case IOSTAT_LOCAL_REJECT:
3937 case IOSTAT_REMOTE_STOP:
3938 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
3939 lpfc_cmd->result ==
3940 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
3941 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
3942 lpfc_cmd->result ==
3943 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
3944 cmd->result = DID_NO_CONNECT << 16;
3945 break;
3946 }
3947 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
3948 lpfc_cmd->result == IOERR_NO_RESOURCES ||
3949 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
3950 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
3951 cmd->result = DID_REQUEUE << 16;
3952 break;
3953 }
3954 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
3955 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
3956 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
3957 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
3958 /*
3959 * This is a response for a BG enabled
3960 * cmd. Parse BG error
3961 */
3962 lpfc_parse_bg_err(phba, lpfc_cmd,
3963 pIocbOut);
3964 break;
3965 } else {
3966 lpfc_printf_vlog(vport, KERN_WARNING,
3967 LOG_BG,
3968 "9031 non-zero BGSTAT "
3969 "on unprotected cmd\n");
3970 }
3971 }
3972 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
3973 && (phba->sli_rev == LPFC_SLI_REV4)
3974 && (pnode && NLP_CHK_NODE_ACT(pnode))) {
3975 /* This IO was aborted by the target, we don't
3976 * know the rxid and because we did not send the
3977 * ABTS we cannot generate and RRQ.
3978 */
3979 lpfc_set_rrq_active(phba, pnode,
3980 lpfc_cmd->cur_iocbq.sli4_lxritag,
3981 0, 0);
3982 }
3983 fallthrough;
3984 default:
3985 cmd->result = DID_ERROR << 16;
3986 break;
3987 }
3988
3989 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
3990 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3991 cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
3992 SAM_STAT_BUSY;
3993 } else
3994 cmd->result = DID_OK << 16;
3995
3996 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
3997 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
3998
3999 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4000 "0710 Iodone <%d/%llu> cmd x%px, error "
4001 "x%x SNS x%x x%x Data: x%x x%x\n",
4002 cmd->device->id, cmd->device->lun, cmd,
4003 cmd->result, *lp, *(lp + 3), cmd->retries,
4004 scsi_get_resid(cmd));
4005 }
4006
4007 lpfc_update_stats(vport, lpfc_cmd);
4008 if (vport->cfg_max_scsicmpl_time &&
4009 time_after(jiffies, lpfc_cmd->start_time +
4010 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4011 spin_lock_irqsave(shost->host_lock, flags);
4012 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4013 if (pnode->cmd_qdepth >
4014 atomic_read(&pnode->cmd_pending) &&
4015 (atomic_read(&pnode->cmd_pending) >
4016 LPFC_MIN_TGT_QDEPTH) &&
4017 ((cmd->cmnd[0] == READ_10) ||
4018 (cmd->cmnd[0] == WRITE_10)))
4019 pnode->cmd_qdepth =
4020 atomic_read(&pnode->cmd_pending);
4021
4022 pnode->last_change_time = jiffies;
4023 }
4024 spin_unlock_irqrestore(shost->host_lock, flags);
4025 }
4026 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4027
4028 lpfc_cmd->pCmd = NULL;
4029 spin_unlock(&lpfc_cmd->buf_lock);
4030
4031 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4032 if (lpfc_cmd->ts_cmd_start) {
4033 lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
4034 lpfc_cmd->ts_data_io = ktime_get_ns();
4035 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4036 lpfc_io_ktime(phba, lpfc_cmd);
4037 }
4038 #endif
4039 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4040 cmd->scsi_done(cmd);
4041
4042 /*
4043 * If there is an abort thread waiting for command completion
4044 * wake up the thread.
4045 */
4046 spin_lock(&lpfc_cmd->buf_lock);
4047 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
4048 if (lpfc_cmd->waitq)
4049 wake_up(lpfc_cmd->waitq);
4050 spin_unlock(&lpfc_cmd->buf_lock);
4051
4052 lpfc_release_scsi_buf(phba, lpfc_cmd);
4053 }
4054
4055 /**
4056 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
4057 * @data: A pointer to the immediate command data portion of the IOCB.
4058 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
4059 *
4060 * The routine copies the entire FCP command from @fcp_cmnd to @data while
4061 * byte swapping the data to big endian format for transmission on the wire.
4062 **/
4063 static void
lpfc_fcpcmd_to_iocb(uint8_t * data,struct fcp_cmnd * fcp_cmnd)4064 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
4065 {
4066 int i, j;
4067 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
4068 i += sizeof(uint32_t), j++) {
4069 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
4070 }
4071 }
4072
4073 /**
4074 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4075 * @vport: The virtual port for which this call is being executed.
4076 * @lpfc_cmd: The scsi command which needs to send.
4077 * @pnode: Pointer to lpfc_nodelist.
4078 *
4079 * This routine initializes fcp_cmnd and iocb data structure from scsi command
4080 * to transfer for device with SLI3 interface spec.
4081 **/
4082 static void
lpfc_scsi_prep_cmnd(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,struct lpfc_nodelist * pnode)4083 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4084 struct lpfc_nodelist *pnode)
4085 {
4086 struct lpfc_hba *phba = vport->phba;
4087 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4088 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4089 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4090 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
4091 struct lpfc_sli4_hdw_queue *hdwq = NULL;
4092 int datadir = scsi_cmnd->sc_data_direction;
4093 int idx;
4094 uint8_t *ptr;
4095 bool sli4;
4096 uint32_t fcpdl;
4097
4098 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4099 return;
4100
4101 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4102 /* clear task management bits */
4103 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4104
4105 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4106 &lpfc_cmd->fcp_cmnd->fcp_lun);
4107
4108 ptr = &fcp_cmnd->fcpCdb[0];
4109 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4110 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4111 ptr += scsi_cmnd->cmd_len;
4112 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4113 }
4114
4115 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4116
4117 sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4118 piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4119 idx = lpfc_cmd->hdwq_no;
4120 if (phba->sli4_hba.hdwq)
4121 hdwq = &phba->sli4_hba.hdwq[idx];
4122
4123 /*
4124 * There are three possibilities here - use scatter-gather segment, use
4125 * the single mapping, or neither. Start the lpfc command prep by
4126 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4127 * data bde entry.
4128 */
4129 if (scsi_sg_count(scsi_cmnd)) {
4130 if (datadir == DMA_TO_DEVICE) {
4131 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4132 iocb_cmd->ulpPU = PARM_READ_CHECK;
4133 if (vport->cfg_first_burst_size &&
4134 (pnode->nlp_flag & NLP_FIRSTBURST)) {
4135 fcpdl = scsi_bufflen(scsi_cmnd);
4136 if (fcpdl < vport->cfg_first_burst_size)
4137 piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
4138 else
4139 piocbq->iocb.un.fcpi.fcpi_XRdy =
4140 vport->cfg_first_burst_size;
4141 }
4142 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4143 if (hdwq)
4144 hdwq->scsi_cstat.output_requests++;
4145 } else {
4146 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4147 iocb_cmd->ulpPU = PARM_READ_CHECK;
4148 fcp_cmnd->fcpCntl3 = READ_DATA;
4149 if (hdwq)
4150 hdwq->scsi_cstat.input_requests++;
4151 }
4152 } else {
4153 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4154 iocb_cmd->un.fcpi.fcpi_parm = 0;
4155 iocb_cmd->ulpPU = 0;
4156 fcp_cmnd->fcpCntl3 = 0;
4157 if (hdwq)
4158 hdwq->scsi_cstat.control_requests++;
4159 }
4160 if (phba->sli_rev == 3 &&
4161 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4162 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
4163 /*
4164 * Finish initializing those IOCB fields that are independent
4165 * of the scsi_cmnd request_buffer
4166 */
4167 piocbq->iocb.ulpContext = pnode->nlp_rpi;
4168 if (sli4)
4169 piocbq->iocb.ulpContext =
4170 phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
4171 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4172 piocbq->iocb.ulpFCP2Rcvy = 1;
4173 else
4174 piocbq->iocb.ulpFCP2Rcvy = 0;
4175
4176 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4177 piocbq->context1 = lpfc_cmd;
4178 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4179 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
4180 piocbq->vport = vport;
4181 }
4182
4183 /**
4184 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4185 * @vport: The virtual port for which this call is being executed.
4186 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4187 * @lun: Logical unit number.
4188 * @task_mgmt_cmd: SCSI task management command.
4189 *
4190 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4191 * for device with SLI-3 interface spec.
4192 *
4193 * Return codes:
4194 * 0 - Error
4195 * 1 - Success
4196 **/
4197 static int
lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint64_t lun,uint8_t task_mgmt_cmd)4198 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4199 struct lpfc_io_buf *lpfc_cmd,
4200 uint64_t lun,
4201 uint8_t task_mgmt_cmd)
4202 {
4203 struct lpfc_iocbq *piocbq;
4204 IOCB_t *piocb;
4205 struct fcp_cmnd *fcp_cmnd;
4206 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4207 struct lpfc_nodelist *ndlp = rdata->pnode;
4208
4209 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4210 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4211 return 0;
4212
4213 piocbq = &(lpfc_cmd->cur_iocbq);
4214 piocbq->vport = vport;
4215
4216 piocb = &piocbq->iocb;
4217
4218 fcp_cmnd = lpfc_cmd->fcp_cmnd;
4219 /* Clear out any old data in the FCP command area */
4220 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4221 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4222 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4223 if (vport->phba->sli_rev == 3 &&
4224 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4225 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4226 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4227 piocb->ulpContext = ndlp->nlp_rpi;
4228 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4229 piocb->ulpContext =
4230 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4231 }
4232 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4233 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4234 piocb->ulpPU = 0;
4235 piocb->un.fcpi.fcpi_parm = 0;
4236
4237 /* ulpTimeout is only one byte */
4238 if (lpfc_cmd->timeout > 0xff) {
4239 /*
4240 * Do not timeout the command at the firmware level.
4241 * The driver will provide the timeout mechanism.
4242 */
4243 piocb->ulpTimeout = 0;
4244 } else
4245 piocb->ulpTimeout = lpfc_cmd->timeout;
4246
4247 if (vport->phba->sli_rev == LPFC_SLI_REV4)
4248 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4249
4250 return 1;
4251 }
4252
4253 /**
4254 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4255 * @phba: The hba struct for which this call is being executed.
4256 * @dev_grp: The HBA PCI-Device group number.
4257 *
4258 * This routine sets up the SCSI interface API function jump table in @phba
4259 * struct.
4260 * Returns: 0 - success, -ENODEV - failure.
4261 **/
4262 int
lpfc_scsi_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)4263 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4264 {
4265
4266 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4267 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
4268
4269 switch (dev_grp) {
4270 case LPFC_PCI_DEV_LP:
4271 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4272 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4273 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4274 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4275 break;
4276 case LPFC_PCI_DEV_OC:
4277 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4278 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4279 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4280 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4281 break;
4282 default:
4283 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4284 "1418 Invalid HBA PCI-device group: 0x%x\n",
4285 dev_grp);
4286 return -ENODEV;
4287 break;
4288 }
4289 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4290 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4291 return 0;
4292 }
4293
4294 /**
4295 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4296 * @phba: The Hba for which this call is being executed.
4297 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4298 * @rspiocbq: Pointer to lpfc_iocbq data structure.
4299 *
4300 * This routine is IOCB completion routine for device reset and target reset
4301 * routine. This routine release scsi buffer associated with lpfc_cmd.
4302 **/
4303 static void
lpfc_tskmgmt_def_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)4304 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4305 struct lpfc_iocbq *cmdiocbq,
4306 struct lpfc_iocbq *rspiocbq)
4307 {
4308 struct lpfc_io_buf *lpfc_cmd =
4309 (struct lpfc_io_buf *) cmdiocbq->context1;
4310 if (lpfc_cmd)
4311 lpfc_release_scsi_buf(phba, lpfc_cmd);
4312 return;
4313 }
4314
4315 /**
4316 * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
4317 * if issuing a pci_bus_reset is possibly unsafe
4318 * @phba: lpfc_hba pointer.
4319 *
4320 * Description:
4321 * Walks the bus_list to ensure only PCI devices with Emulex
4322 * vendor id, device ids that support hot reset, and only one occurrence
4323 * of function 0.
4324 *
4325 * Returns:
4326 * -EBADSLT, detected invalid device
4327 * 0, successful
4328 */
4329 int
lpfc_check_pci_resettable(struct lpfc_hba * phba)4330 lpfc_check_pci_resettable(struct lpfc_hba *phba)
4331 {
4332 const struct pci_dev *pdev = phba->pcidev;
4333 struct pci_dev *ptr = NULL;
4334 u8 counter = 0;
4335
4336 /* Walk the list of devices on the pci_dev's bus */
4337 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
4338 /* Check for Emulex Vendor ID */
4339 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
4340 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4341 "8346 Non-Emulex vendor found: "
4342 "0x%04x\n", ptr->vendor);
4343 return -EBADSLT;
4344 }
4345
4346 /* Check for valid Emulex Device ID */
4347 switch (ptr->device) {
4348 case PCI_DEVICE_ID_LANCER_FC:
4349 case PCI_DEVICE_ID_LANCER_G6_FC:
4350 case PCI_DEVICE_ID_LANCER_G7_FC:
4351 break;
4352 default:
4353 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4354 "8347 Invalid device found: "
4355 "0x%04x\n", ptr->device);
4356 return -EBADSLT;
4357 }
4358
4359 /* Check for only one function 0 ID to ensure only one HBA on
4360 * secondary bus
4361 */
4362 if (ptr->devfn == 0) {
4363 if (++counter > 1) {
4364 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4365 "8348 More than one device on "
4366 "secondary bus found\n");
4367 return -EBADSLT;
4368 }
4369 }
4370 }
4371
4372 return 0;
4373 }
4374
4375 /**
4376 * lpfc_info - Info entry point of scsi_host_template data structure
4377 * @host: The scsi host for which this call is being executed.
4378 *
4379 * This routine provides module information about hba.
4380 *
4381 * Reutrn code:
4382 * Pointer to char - Success.
4383 **/
4384 const char *
lpfc_info(struct Scsi_Host * host)4385 lpfc_info(struct Scsi_Host *host)
4386 {
4387 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4388 struct lpfc_hba *phba = vport->phba;
4389 int link_speed = 0;
4390 static char lpfcinfobuf[384];
4391 char tmp[384] = {0};
4392
4393 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
4394 if (phba && phba->pcidev){
4395 /* Model Description */
4396 scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
4397 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4398 sizeof(lpfcinfobuf))
4399 goto buffer_done;
4400
4401 /* PCI Info */
4402 scnprintf(tmp, sizeof(tmp),
4403 " on PCI bus %02x device %02x irq %d",
4404 phba->pcidev->bus->number, phba->pcidev->devfn,
4405 phba->pcidev->irq);
4406 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4407 sizeof(lpfcinfobuf))
4408 goto buffer_done;
4409
4410 /* Port Number */
4411 if (phba->Port[0]) {
4412 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
4413 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4414 sizeof(lpfcinfobuf))
4415 goto buffer_done;
4416 }
4417
4418 /* Link Speed */
4419 link_speed = lpfc_sli_port_speed_get(phba);
4420 if (link_speed != 0) {
4421 scnprintf(tmp, sizeof(tmp),
4422 " Logical Link Speed: %d Mbps", link_speed);
4423 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4424 sizeof(lpfcinfobuf))
4425 goto buffer_done;
4426 }
4427
4428 /* PCI resettable */
4429 if (!lpfc_check_pci_resettable(phba)) {
4430 scnprintf(tmp, sizeof(tmp), " PCI resettable");
4431 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
4432 }
4433 }
4434
4435 buffer_done:
4436 return lpfcinfobuf;
4437 }
4438
4439 /**
4440 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4441 * @phba: The Hba for which this call is being executed.
4442 *
4443 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
4444 * The default value of cfg_poll_tmo is 10 milliseconds.
4445 **/
lpfc_poll_rearm_timer(struct lpfc_hba * phba)4446 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4447 {
4448 unsigned long poll_tmo_expires =
4449 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4450
4451 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
4452 mod_timer(&phba->fcp_poll_timer,
4453 poll_tmo_expires);
4454 }
4455
4456 /**
4457 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4458 * @phba: The Hba for which this call is being executed.
4459 *
4460 * This routine starts the fcp_poll_timer of @phba.
4461 **/
lpfc_poll_start_timer(struct lpfc_hba * phba)4462 void lpfc_poll_start_timer(struct lpfc_hba * phba)
4463 {
4464 lpfc_poll_rearm_timer(phba);
4465 }
4466
4467 /**
4468 * lpfc_poll_timeout - Restart polling timer
4469 * @ptr: Map to lpfc_hba data structure pointer.
4470 *
4471 * This routine restarts fcp_poll timer, when FCP ring polling is enable
4472 * and FCP Ring interrupt is disable.
4473 **/
4474
lpfc_poll_timeout(struct timer_list * t)4475 void lpfc_poll_timeout(struct timer_list *t)
4476 {
4477 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
4478
4479 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4480 lpfc_sli_handle_fast_ring_event(phba,
4481 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4482
4483 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4484 lpfc_poll_rearm_timer(phba);
4485 }
4486 }
4487
4488 /**
4489 * lpfc_queuecommand - scsi_host_template queuecommand entry point
4490 * @cmnd: Pointer to scsi_cmnd data structure.
4491 * @done: Pointer to done routine.
4492 *
4493 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
4494 * This routine prepares an IOCB from scsi command and provides to firmware.
4495 * The @done callback is invoked after driver finished processing the command.
4496 *
4497 * Return value :
4498 * 0 - Success
4499 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4500 **/
4501 static int
lpfc_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * cmnd)4502 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4503 {
4504 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4505 struct lpfc_hba *phba = vport->phba;
4506 struct lpfc_rport_data *rdata;
4507 struct lpfc_nodelist *ndlp;
4508 struct lpfc_io_buf *lpfc_cmd;
4509 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4510 int err, idx;
4511 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4512 uint64_t start = 0L;
4513
4514 if (phba->ktime_on)
4515 start = ktime_get_ns();
4516 #endif
4517
4518 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4519
4520 /* sanity check on references */
4521 if (unlikely(!rdata) || unlikely(!rport))
4522 goto out_fail_command;
4523
4524 err = fc_remote_port_chkready(rport);
4525 if (err) {
4526 cmnd->result = err;
4527 goto out_fail_command;
4528 }
4529 ndlp = rdata->pnode;
4530
4531 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4532 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4533
4534 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4535 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4536 " op:%02x str=%s without registering for"
4537 " BlockGuard - Rejecting command\n",
4538 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4539 dif_op_str[scsi_get_prot_op(cmnd)]);
4540 goto out_fail_command;
4541 }
4542
4543 /*
4544 * Catch race where our node has transitioned, but the
4545 * transport is still transitioning.
4546 */
4547 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4548 goto out_tgt_busy;
4549 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
4550 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
4551 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4552 "3377 Target Queue Full, scsi Id:%d "
4553 "Qdepth:%d Pending command:%d"
4554 " WWNN:%02x:%02x:%02x:%02x:"
4555 "%02x:%02x:%02x:%02x, "
4556 " WWPN:%02x:%02x:%02x:%02x:"
4557 "%02x:%02x:%02x:%02x",
4558 ndlp->nlp_sid, ndlp->cmd_qdepth,
4559 atomic_read(&ndlp->cmd_pending),
4560 ndlp->nlp_nodename.u.wwn[0],
4561 ndlp->nlp_nodename.u.wwn[1],
4562 ndlp->nlp_nodename.u.wwn[2],
4563 ndlp->nlp_nodename.u.wwn[3],
4564 ndlp->nlp_nodename.u.wwn[4],
4565 ndlp->nlp_nodename.u.wwn[5],
4566 ndlp->nlp_nodename.u.wwn[6],
4567 ndlp->nlp_nodename.u.wwn[7],
4568 ndlp->nlp_portname.u.wwn[0],
4569 ndlp->nlp_portname.u.wwn[1],
4570 ndlp->nlp_portname.u.wwn[2],
4571 ndlp->nlp_portname.u.wwn[3],
4572 ndlp->nlp_portname.u.wwn[4],
4573 ndlp->nlp_portname.u.wwn[5],
4574 ndlp->nlp_portname.u.wwn[6],
4575 ndlp->nlp_portname.u.wwn[7]);
4576 goto out_tgt_busy;
4577 }
4578 }
4579
4580 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
4581 if (lpfc_cmd == NULL) {
4582 lpfc_rampdown_queue_depth(phba);
4583
4584 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4585 "0707 driver's buffer pool is empty, "
4586 "IO busied\n");
4587 goto out_host_busy;
4588 }
4589
4590 /*
4591 * Store the midlayer's command structure for the completion phase
4592 * and complete the command initialization.
4593 */
4594 lpfc_cmd->pCmd = cmnd;
4595 lpfc_cmd->rdata = rdata;
4596 lpfc_cmd->ndlp = ndlp;
4597 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4598
4599 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4600 if (vport->phba->cfg_enable_bg) {
4601 lpfc_printf_vlog(vport,
4602 KERN_INFO, LOG_SCSI_CMD,
4603 "9033 BLKGRD: rcvd %s cmd:x%x "
4604 "sector x%llx cnt %u pt %x\n",
4605 dif_op_str[scsi_get_prot_op(cmnd)],
4606 cmnd->cmnd[0],
4607 (unsigned long long)scsi_get_lba(cmnd),
4608 blk_rq_sectors(cmnd->request),
4609 (cmnd->cmnd[1]>>5));
4610 }
4611 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4612 } else {
4613 if (vport->phba->cfg_enable_bg) {
4614 lpfc_printf_vlog(vport,
4615 KERN_INFO, LOG_SCSI_CMD,
4616 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4617 "x%x sector x%llx cnt %u pt %x\n",
4618 cmnd->cmnd[0],
4619 (unsigned long long)scsi_get_lba(cmnd),
4620 blk_rq_sectors(cmnd->request),
4621 (cmnd->cmnd[1]>>5));
4622 }
4623 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4624 }
4625
4626 if (unlikely(err)) {
4627 if (err == 2) {
4628 cmnd->result = DID_ERROR << 16;
4629 goto out_fail_command_release_buf;
4630 }
4631 goto out_host_busy_free_buf;
4632 }
4633
4634 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4635
4636 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4637 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4638 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
4639 #endif
4640 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4641 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4642 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4643 if (start) {
4644 lpfc_cmd->ts_cmd_start = start;
4645 lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
4646 lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
4647 } else {
4648 lpfc_cmd->ts_cmd_start = 0;
4649 }
4650 #endif
4651 if (err) {
4652 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4653 "3376 FCP could not issue IOCB err %x"
4654 "FCP cmd x%x <%d/%llu> "
4655 "sid: x%x did: x%x oxid: x%x "
4656 "Data: x%x x%x x%x x%x\n",
4657 err, cmnd->cmnd[0],
4658 cmnd->device ? cmnd->device->id : 0xffff,
4659 cmnd->device ? cmnd->device->lun : (u64) -1,
4660 vport->fc_myDID, ndlp->nlp_DID,
4661 phba->sli_rev == LPFC_SLI_REV4 ?
4662 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4663 lpfc_cmd->cur_iocbq.iocb.ulpContext,
4664 lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
4665 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
4666 (uint32_t)
4667 (cmnd->request->timeout / 1000));
4668
4669 goto out_host_busy_free_buf;
4670 }
4671 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4672 lpfc_sli_handle_fast_ring_event(phba,
4673 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4674
4675 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4676 lpfc_poll_rearm_timer(phba);
4677 }
4678
4679 if (phba->cfg_xri_rebalancing)
4680 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
4681
4682 return 0;
4683
4684 out_host_busy_free_buf:
4685 idx = lpfc_cmd->hdwq_no;
4686 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4687 if (phba->sli4_hba.hdwq) {
4688 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
4689 case WRITE_DATA:
4690 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
4691 break;
4692 case READ_DATA:
4693 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
4694 break;
4695 default:
4696 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
4697 }
4698 }
4699 lpfc_release_scsi_buf(phba, lpfc_cmd);
4700 out_host_busy:
4701 return SCSI_MLQUEUE_HOST_BUSY;
4702
4703 out_tgt_busy:
4704 return SCSI_MLQUEUE_TARGET_BUSY;
4705
4706 out_fail_command_release_buf:
4707 lpfc_release_scsi_buf(phba, lpfc_cmd);
4708
4709 out_fail_command:
4710 cmnd->scsi_done(cmnd);
4711 return 0;
4712 }
4713
4714
4715 /**
4716 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4717 * @cmnd: Pointer to scsi_cmnd data structure.
4718 *
4719 * This routine aborts @cmnd pending in base driver.
4720 *
4721 * Return code :
4722 * 0x2003 - Error
4723 * 0x2002 - Success
4724 **/
4725 static int
lpfc_abort_handler(struct scsi_cmnd * cmnd)4726 lpfc_abort_handler(struct scsi_cmnd *cmnd)
4727 {
4728 struct Scsi_Host *shost = cmnd->device->host;
4729 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4730 struct lpfc_hba *phba = vport->phba;
4731 struct lpfc_iocbq *iocb;
4732 struct lpfc_iocbq *abtsiocb;
4733 struct lpfc_io_buf *lpfc_cmd;
4734 IOCB_t *cmd, *icmd;
4735 int ret = SUCCESS, status = 0;
4736 struct lpfc_sli_ring *pring_s4 = NULL;
4737 int ret_val;
4738 unsigned long flags;
4739 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4740
4741 status = fc_block_scsi_eh(cmnd);
4742 if (status != 0 && status != SUCCESS)
4743 return status;
4744
4745 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
4746 if (!lpfc_cmd)
4747 return ret;
4748
4749 spin_lock_irqsave(&phba->hbalock, flags);
4750 /* driver queued commands are in process of being flushed */
4751 if (phba->hba_flag & HBA_IOQ_FLUSH) {
4752 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4753 "3168 SCSI Layer abort requested I/O has been "
4754 "flushed by LLD.\n");
4755 ret = FAILED;
4756 goto out_unlock;
4757 }
4758
4759 /* Guard against IO completion being called at same time */
4760 spin_lock(&lpfc_cmd->buf_lock);
4761
4762 if (!lpfc_cmd->pCmd) {
4763 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4764 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4765 "x%x ID %d LUN %llu\n",
4766 SUCCESS, cmnd->device->id, cmnd->device->lun);
4767 goto out_unlock_buf;
4768 }
4769
4770 iocb = &lpfc_cmd->cur_iocbq;
4771 if (phba->sli_rev == LPFC_SLI_REV4) {
4772 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
4773 if (!pring_s4) {
4774 ret = FAILED;
4775 goto out_unlock_buf;
4776 }
4777 spin_lock(&pring_s4->ring_lock);
4778 }
4779 /* the command is in process of being cancelled */
4780 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4781 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4782 "3169 SCSI Layer abort requested I/O has been "
4783 "cancelled by LLD.\n");
4784 ret = FAILED;
4785 goto out_unlock_ring;
4786 }
4787 /*
4788 * If pCmd field of the corresponding lpfc_io_buf structure
4789 * points to a different SCSI command, then the driver has
4790 * already completed this command, but the midlayer did not
4791 * see the completion before the eh fired. Just return SUCCESS.
4792 */
4793 if (lpfc_cmd->pCmd != cmnd) {
4794 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4795 "3170 SCSI Layer abort requested I/O has been "
4796 "completed by LLD.\n");
4797 goto out_unlock_ring;
4798 }
4799
4800 BUG_ON(iocb->context1 != lpfc_cmd);
4801
4802 /* abort issued in recovery is still in progress */
4803 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
4804 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4805 "3389 SCSI Layer I/O Abort Request is pending\n");
4806 if (phba->sli_rev == LPFC_SLI_REV4)
4807 spin_unlock(&pring_s4->ring_lock);
4808 spin_unlock(&lpfc_cmd->buf_lock);
4809 spin_unlock_irqrestore(&phba->hbalock, flags);
4810 goto wait_for_cmpl;
4811 }
4812
4813 abtsiocb = __lpfc_sli_get_iocbq(phba);
4814 if (abtsiocb == NULL) {
4815 ret = FAILED;
4816 goto out_unlock_ring;
4817 }
4818
4819 /* Indicate the IO is being aborted by the driver. */
4820 iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
4821
4822 /*
4823 * The scsi command can not be in txq and it is in flight because the
4824 * pCmd is still pointig at the SCSI command we have to abort. There
4825 * is no need to search the txcmplq. Just send an abort to the FW.
4826 */
4827
4828 cmd = &iocb->iocb;
4829 icmd = &abtsiocb->iocb;
4830 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4831 icmd->un.acxri.abortContextTag = cmd->ulpContext;
4832 if (phba->sli_rev == LPFC_SLI_REV4)
4833 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4834 else
4835 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
4836
4837 icmd->ulpLe = 1;
4838 icmd->ulpClass = cmd->ulpClass;
4839
4840 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
4841 abtsiocb->hba_wqidx = iocb->hba_wqidx;
4842 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4843 if (iocb->iocb_flag & LPFC_IO_FOF)
4844 abtsiocb->iocb_flag |= LPFC_IO_FOF;
4845
4846 if (lpfc_is_link_up(phba))
4847 icmd->ulpCommand = CMD_ABORT_XRI_CN;
4848 else
4849 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
4850
4851 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4852 abtsiocb->vport = vport;
4853 lpfc_cmd->waitq = &waitq;
4854 if (phba->sli_rev == LPFC_SLI_REV4) {
4855 /* Note: both hbalock and ring_lock must be set here */
4856 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
4857 abtsiocb, 0);
4858 spin_unlock(&pring_s4->ring_lock);
4859 } else {
4860 ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4861 abtsiocb, 0);
4862 }
4863
4864 if (ret_val == IOCB_ERROR) {
4865 /* Indicate the IO is not being aborted by the driver. */
4866 iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
4867 lpfc_cmd->waitq = NULL;
4868 spin_unlock(&lpfc_cmd->buf_lock);
4869 spin_unlock_irqrestore(&phba->hbalock, flags);
4870 lpfc_sli_release_iocbq(phba, abtsiocb);
4871 ret = FAILED;
4872 goto out;
4873 }
4874
4875 /* no longer need the lock after this point */
4876 spin_unlock(&lpfc_cmd->buf_lock);
4877 spin_unlock_irqrestore(&phba->hbalock, flags);
4878
4879 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4880 lpfc_sli_handle_fast_ring_event(phba,
4881 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4882
4883 wait_for_cmpl:
4884 /* Wait for abort to complete */
4885 wait_event_timeout(waitq,
4886 (lpfc_cmd->pCmd != cmnd),
4887 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4888
4889 spin_lock(&lpfc_cmd->buf_lock);
4890
4891 if (lpfc_cmd->pCmd == cmnd) {
4892 ret = FAILED;
4893 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4894 "0748 abort handler timed out waiting "
4895 "for aborting I/O (xri:x%x) to complete: "
4896 "ret %#x, ID %d, LUN %llu\n",
4897 iocb->sli4_xritag, ret,
4898 cmnd->device->id, cmnd->device->lun);
4899 }
4900
4901 lpfc_cmd->waitq = NULL;
4902
4903 spin_unlock(&lpfc_cmd->buf_lock);
4904 goto out;
4905
4906 out_unlock_ring:
4907 if (phba->sli_rev == LPFC_SLI_REV4)
4908 spin_unlock(&pring_s4->ring_lock);
4909 out_unlock_buf:
4910 spin_unlock(&lpfc_cmd->buf_lock);
4911 out_unlock:
4912 spin_unlock_irqrestore(&phba->hbalock, flags);
4913 out:
4914 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4915 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4916 "LUN %llu\n", ret, cmnd->device->id,
4917 cmnd->device->lun);
4918 return ret;
4919 }
4920
4921 static char *
lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)4922 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4923 {
4924 switch (task_mgmt_cmd) {
4925 case FCP_ABORT_TASK_SET:
4926 return "ABORT_TASK_SET";
4927 case FCP_CLEAR_TASK_SET:
4928 return "FCP_CLEAR_TASK_SET";
4929 case FCP_BUS_RESET:
4930 return "FCP_BUS_RESET";
4931 case FCP_LUN_RESET:
4932 return "FCP_LUN_RESET";
4933 case FCP_TARGET_RESET:
4934 return "FCP_TARGET_RESET";
4935 case FCP_CLEAR_ACA:
4936 return "FCP_CLEAR_ACA";
4937 case FCP_TERMINATE_TASK:
4938 return "FCP_TERMINATE_TASK";
4939 default:
4940 return "unknown";
4941 }
4942 }
4943
4944
4945 /**
4946 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
4947 * @vport: The virtual port for which this call is being executed.
4948 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4949 *
4950 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
4951 *
4952 * Return code :
4953 * 0x2003 - Error
4954 * 0x2002 - Success
4955 **/
4956 static int
lpfc_check_fcp_rsp(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd)4957 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
4958 {
4959 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
4960 uint32_t rsp_info;
4961 uint32_t rsp_len;
4962 uint8_t rsp_info_code;
4963 int ret = FAILED;
4964
4965
4966 if (fcprsp == NULL)
4967 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4968 "0703 fcp_rsp is missing\n");
4969 else {
4970 rsp_info = fcprsp->rspStatus2;
4971 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
4972 rsp_info_code = fcprsp->rspInfo3;
4973
4974
4975 lpfc_printf_vlog(vport, KERN_INFO,
4976 LOG_FCP,
4977 "0706 fcp_rsp valid 0x%x,"
4978 " rsp len=%d code 0x%x\n",
4979 rsp_info,
4980 rsp_len, rsp_info_code);
4981
4982 /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN
4983 * field specifies the number of valid bytes of FCP_RSP_INFO.
4984 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08
4985 */
4986 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
4987 ((rsp_len == 8) || (rsp_len == 4))) {
4988 switch (rsp_info_code) {
4989 case RSP_NO_FAILURE:
4990 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4991 "0715 Task Mgmt No Failure\n");
4992 ret = SUCCESS;
4993 break;
4994 case RSP_TM_NOT_SUPPORTED: /* TM rejected */
4995 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4996 "0716 Task Mgmt Target "
4997 "reject\n");
4998 break;
4999 case RSP_TM_NOT_COMPLETED: /* TM failed */
5000 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5001 "0717 Task Mgmt Target "
5002 "failed TM\n");
5003 break;
5004 case RSP_TM_INVALID_LU: /* TM to invalid LU! */
5005 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5006 "0718 Task Mgmt to invalid "
5007 "LUN\n");
5008 break;
5009 }
5010 }
5011 }
5012 return ret;
5013 }
5014
5015
5016 /**
5017 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
5018 * @vport: The virtual port for which this call is being executed.
5019 * @rdata: Pointer to remote port local data
5020 * @tgt_id: Target ID of remote device.
5021 * @lun_id: Lun number for the TMF
5022 * @task_mgmt_cmd: type of TMF to send
5023 *
5024 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
5025 * a remote port.
5026 *
5027 * Return Code:
5028 * 0x2003 - Error
5029 * 0x2002 - Success.
5030 **/
5031 static int
lpfc_send_taskmgmt(struct lpfc_vport * vport,struct scsi_cmnd * cmnd,unsigned int tgt_id,uint64_t lun_id,uint8_t task_mgmt_cmd)5032 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
5033 unsigned int tgt_id, uint64_t lun_id,
5034 uint8_t task_mgmt_cmd)
5035 {
5036 struct lpfc_hba *phba = vport->phba;
5037 struct lpfc_io_buf *lpfc_cmd;
5038 struct lpfc_iocbq *iocbq;
5039 struct lpfc_iocbq *iocbqrsp;
5040 struct lpfc_rport_data *rdata;
5041 struct lpfc_nodelist *pnode;
5042 int ret;
5043 int status;
5044
5045 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5046 if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
5047 return FAILED;
5048 pnode = rdata->pnode;
5049
5050 lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
5051 if (lpfc_cmd == NULL)
5052 return FAILED;
5053 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5054 lpfc_cmd->rdata = rdata;
5055 lpfc_cmd->pCmd = cmnd;
5056 lpfc_cmd->ndlp = pnode;
5057
5058 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5059 task_mgmt_cmd);
5060 if (!status) {
5061 lpfc_release_scsi_buf(phba, lpfc_cmd);
5062 return FAILED;
5063 }
5064
5065 iocbq = &lpfc_cmd->cur_iocbq;
5066 iocbqrsp = lpfc_sli_get_iocbq(phba);
5067 if (iocbqrsp == NULL) {
5068 lpfc_release_scsi_buf(phba, lpfc_cmd);
5069 return FAILED;
5070 }
5071 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
5072
5073 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5074 "0702 Issue %s to TGT %d LUN %llu "
5075 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5076 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5077 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5078 iocbq->iocb_flag);
5079
5080 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5081 iocbq, iocbqrsp, lpfc_cmd->timeout);
5082 if ((status != IOCB_SUCCESS) ||
5083 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
5084 if (status != IOCB_SUCCESS ||
5085 iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
5086 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5087 "0727 TMF %s to TGT %d LUN %llu "
5088 "failed (%d, %d) iocb_flag x%x\n",
5089 lpfc_taskmgmt_name(task_mgmt_cmd),
5090 tgt_id, lun_id,
5091 iocbqrsp->iocb.ulpStatus,
5092 iocbqrsp->iocb.un.ulpWord[4],
5093 iocbq->iocb_flag);
5094 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
5095 if (status == IOCB_SUCCESS) {
5096 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
5097 /* Something in the FCP_RSP was invalid.
5098 * Check conditions */
5099 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
5100 else
5101 ret = FAILED;
5102 } else if (status == IOCB_TIMEDOUT) {
5103 ret = TIMEOUT_ERROR;
5104 } else {
5105 ret = FAILED;
5106 }
5107 } else
5108 ret = SUCCESS;
5109
5110 lpfc_sli_release_iocbq(phba, iocbqrsp);
5111
5112 if (ret != TIMEOUT_ERROR)
5113 lpfc_release_scsi_buf(phba, lpfc_cmd);
5114
5115 return ret;
5116 }
5117
5118 /**
5119 * lpfc_chk_tgt_mapped -
5120 * @vport: The virtual port to check on
5121 * @cmnd: Pointer to scsi_cmnd data structure.
5122 *
5123 * This routine delays until the scsi target (aka rport) for the
5124 * command exists (is present and logged in) or we declare it non-existent.
5125 *
5126 * Return code :
5127 * 0x2003 - Error
5128 * 0x2002 - Success
5129 **/
5130 static int
lpfc_chk_tgt_mapped(struct lpfc_vport * vport,struct scsi_cmnd * cmnd)5131 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5132 {
5133 struct lpfc_rport_data *rdata;
5134 struct lpfc_nodelist *pnode;
5135 unsigned long later;
5136
5137 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5138 if (!rdata) {
5139 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5140 "0797 Tgt Map rport failure: rdata x%px\n", rdata);
5141 return FAILED;
5142 }
5143 pnode = rdata->pnode;
5144 /*
5145 * If target is not in a MAPPED state, delay until
5146 * target is rediscovered or devloss timeout expires.
5147 */
5148 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5149 while (time_after(later, jiffies)) {
5150 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
5151 return FAILED;
5152 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5153 return SUCCESS;
5154 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5155 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5156 if (!rdata)
5157 return FAILED;
5158 pnode = rdata->pnode;
5159 }
5160 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
5161 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5162 return FAILED;
5163 return SUCCESS;
5164 }
5165
5166 /**
5167 * lpfc_reset_flush_io_context -
5168 * @vport: The virtual port (scsi_host) for the flush context
5169 * @tgt_id: If aborting by Target contect - specifies the target id
5170 * @lun_id: If aborting by Lun context - specifies the lun id
5171 * @context: specifies the context level to flush at.
5172 *
5173 * After a reset condition via TMF, we need to flush orphaned i/o
5174 * contexts from the adapter. This routine aborts any contexts
5175 * outstanding, then waits for their completions. The wait is
5176 * bounded by devloss_tmo though.
5177 *
5178 * Return code :
5179 * 0x2003 - Error
5180 * 0x2002 - Success
5181 **/
5182 static int
lpfc_reset_flush_io_context(struct lpfc_vport * vport,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd context)5183 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5184 uint64_t lun_id, lpfc_ctx_cmd context)
5185 {
5186 struct lpfc_hba *phba = vport->phba;
5187 unsigned long later;
5188 int cnt;
5189
5190 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5191 if (cnt)
5192 lpfc_sli_abort_taskmgmt(vport,
5193 &phba->sli.sli3_ring[LPFC_FCP_RING],
5194 tgt_id, lun_id, context);
5195 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5196 while (time_after(later, jiffies) && cnt) {
5197 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5198 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5199 }
5200 if (cnt) {
5201 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5202 "0724 I/O flush failure for context %s : cnt x%x\n",
5203 ((context == LPFC_CTX_LUN) ? "LUN" :
5204 ((context == LPFC_CTX_TGT) ? "TGT" :
5205 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5206 cnt);
5207 return FAILED;
5208 }
5209 return SUCCESS;
5210 }
5211
5212 /**
5213 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5214 * @cmnd: Pointer to scsi_cmnd data structure.
5215 *
5216 * This routine does a device reset by sending a LUN_RESET task management
5217 * command.
5218 *
5219 * Return code :
5220 * 0x2003 - Error
5221 * 0x2002 - Success
5222 **/
5223 static int
lpfc_device_reset_handler(struct scsi_cmnd * cmnd)5224 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5225 {
5226 struct Scsi_Host *shost = cmnd->device->host;
5227 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5228 struct lpfc_rport_data *rdata;
5229 struct lpfc_nodelist *pnode;
5230 unsigned tgt_id = cmnd->device->id;
5231 uint64_t lun_id = cmnd->device->lun;
5232 struct lpfc_scsi_event_header scsi_event;
5233 int status;
5234
5235 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5236 if (!rdata || !rdata->pnode) {
5237 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5238 "0798 Device Reset rdata failure: rdata x%px\n",
5239 rdata);
5240 return FAILED;
5241 }
5242 pnode = rdata->pnode;
5243 status = fc_block_scsi_eh(cmnd);
5244 if (status != 0 && status != SUCCESS)
5245 return status;
5246
5247 status = lpfc_chk_tgt_mapped(vport, cmnd);
5248 if (status == FAILED) {
5249 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5250 "0721 Device Reset rport failure: rdata x%px\n", rdata);
5251 return FAILED;
5252 }
5253
5254 scsi_event.event_type = FC_REG_SCSI_EVENT;
5255 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
5256 scsi_event.lun = lun_id;
5257 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5258 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5259
5260 fc_host_post_vendor_event(shost, fc_get_event_number(),
5261 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5262
5263 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5264 FCP_LUN_RESET);
5265
5266 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5267 "0713 SCSI layer issued Device Reset (%d, %llu) "
5268 "return x%x\n", tgt_id, lun_id, status);
5269
5270 /*
5271 * We have to clean up i/o as : they may be orphaned by the TMF;
5272 * or if the TMF failed, they may be in an indeterminate state.
5273 * So, continue on.
5274 * We will report success if all the i/o aborts successfully.
5275 */
5276 if (status == SUCCESS)
5277 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5278 LPFC_CTX_LUN);
5279
5280 return status;
5281 }
5282
5283 /**
5284 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
5285 * @cmnd: Pointer to scsi_cmnd data structure.
5286 *
5287 * This routine does a target reset by sending a TARGET_RESET task management
5288 * command.
5289 *
5290 * Return code :
5291 * 0x2003 - Error
5292 * 0x2002 - Success
5293 **/
5294 static int
lpfc_target_reset_handler(struct scsi_cmnd * cmnd)5295 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5296 {
5297 struct Scsi_Host *shost = cmnd->device->host;
5298 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5299 struct lpfc_rport_data *rdata;
5300 struct lpfc_nodelist *pnode;
5301 unsigned tgt_id = cmnd->device->id;
5302 uint64_t lun_id = cmnd->device->lun;
5303 struct lpfc_scsi_event_header scsi_event;
5304 int status;
5305
5306 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5307 if (!rdata || !rdata->pnode) {
5308 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5309 "0799 Target Reset rdata failure: rdata x%px\n",
5310 rdata);
5311 return FAILED;
5312 }
5313 pnode = rdata->pnode;
5314 status = fc_block_scsi_eh(cmnd);
5315 if (status != 0 && status != SUCCESS)
5316 return status;
5317
5318 status = lpfc_chk_tgt_mapped(vport, cmnd);
5319 if (status == FAILED) {
5320 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5321 "0722 Target Reset rport failure: rdata x%px\n", rdata);
5322 if (pnode) {
5323 spin_lock_irq(shost->host_lock);
5324 pnode->nlp_flag &= ~NLP_NPR_ADISC;
5325 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
5326 spin_unlock_irq(shost->host_lock);
5327 }
5328 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5329 LPFC_CTX_TGT);
5330 return FAST_IO_FAIL;
5331 }
5332
5333 scsi_event.event_type = FC_REG_SCSI_EVENT;
5334 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
5335 scsi_event.lun = 0;
5336 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5337 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5338
5339 fc_host_post_vendor_event(shost, fc_get_event_number(),
5340 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5341
5342 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5343 FCP_TARGET_RESET);
5344
5345 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5346 "0723 SCSI layer issued Target Reset (%d, %llu) "
5347 "return x%x\n", tgt_id, lun_id, status);
5348
5349 /*
5350 * We have to clean up i/o as : they may be orphaned by the TMF;
5351 * or if the TMF failed, they may be in an indeterminate state.
5352 * So, continue on.
5353 * We will report success if all the i/o aborts successfully.
5354 */
5355 if (status == SUCCESS)
5356 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5357 LPFC_CTX_TGT);
5358 return status;
5359 }
5360
5361 /**
5362 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5363 * @cmnd: Pointer to scsi_cmnd data structure.
5364 *
5365 * This routine does target reset to all targets on @cmnd->device->host.
5366 * This emulates Parallel SCSI Bus Reset Semantics.
5367 *
5368 * Return code :
5369 * 0x2003 - Error
5370 * 0x2002 - Success
5371 **/
5372 static int
lpfc_bus_reset_handler(struct scsi_cmnd * cmnd)5373 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
5374 {
5375 struct Scsi_Host *shost = cmnd->device->host;
5376 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5377 struct lpfc_nodelist *ndlp = NULL;
5378 struct lpfc_scsi_event_header scsi_event;
5379 int match;
5380 int ret = SUCCESS, status, i;
5381
5382 scsi_event.event_type = FC_REG_SCSI_EVENT;
5383 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
5384 scsi_event.lun = 0;
5385 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
5386 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
5387
5388 fc_host_post_vendor_event(shost, fc_get_event_number(),
5389 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5390
5391 status = fc_block_scsi_eh(cmnd);
5392 if (status != 0 && status != SUCCESS)
5393 return status;
5394
5395 /*
5396 * Since the driver manages a single bus device, reset all
5397 * targets known to the driver. Should any target reset
5398 * fail, this routine returns failure to the midlayer.
5399 */
5400 for (i = 0; i < LPFC_MAX_TARGET; i++) {
5401 /* Search for mapped node by target ID */
5402 match = 0;
5403 spin_lock_irq(shost->host_lock);
5404 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5405 if (!NLP_CHK_NODE_ACT(ndlp))
5406 continue;
5407 if (vport->phba->cfg_fcp2_no_tgt_reset &&
5408 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
5409 continue;
5410 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5411 ndlp->nlp_sid == i &&
5412 ndlp->rport &&
5413 ndlp->nlp_type & NLP_FCP_TARGET) {
5414 match = 1;
5415 break;
5416 }
5417 }
5418 spin_unlock_irq(shost->host_lock);
5419 if (!match)
5420 continue;
5421
5422 status = lpfc_send_taskmgmt(vport, cmnd,
5423 i, 0, FCP_TARGET_RESET);
5424
5425 if (status != SUCCESS) {
5426 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5427 "0700 Bus Reset on target %d failed\n",
5428 i);
5429 ret = FAILED;
5430 }
5431 }
5432 /*
5433 * We have to clean up i/o as : they may be orphaned by the TMFs
5434 * above; or if any of the TMFs failed, they may be in an
5435 * indeterminate state.
5436 * We will report success if all the i/o aborts successfully.
5437 */
5438
5439 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
5440 if (status != SUCCESS)
5441 ret = FAILED;
5442
5443 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5444 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
5445 return ret;
5446 }
5447
5448 /**
5449 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
5450 * @cmnd: Pointer to scsi_cmnd data structure.
5451 *
5452 * This routine does host reset to the adaptor port. It brings the HBA
5453 * offline, performs a board restart, and then brings the board back online.
5454 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
5455 * reject all outstanding SCSI commands to the host and error returned
5456 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
5457 * of error handling, it will only return error if resetting of the adapter
5458 * is not successful; in all other cases, will return success.
5459 *
5460 * Return code :
5461 * 0x2003 - Error
5462 * 0x2002 - Success
5463 **/
5464 static int
lpfc_host_reset_handler(struct scsi_cmnd * cmnd)5465 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5466 {
5467 struct Scsi_Host *shost = cmnd->device->host;
5468 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5469 struct lpfc_hba *phba = vport->phba;
5470 int rc, ret = SUCCESS;
5471
5472 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5473 "3172 SCSI layer issued Host Reset Data:\n");
5474
5475 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5476 lpfc_offline(phba);
5477 rc = lpfc_sli_brdrestart(phba);
5478 if (rc)
5479 goto error;
5480
5481 rc = lpfc_online(phba);
5482 if (rc)
5483 goto error;
5484
5485 lpfc_unblock_mgmt_io(phba);
5486
5487 return ret;
5488 error:
5489 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5490 "3323 Failed host reset\n");
5491 lpfc_unblock_mgmt_io(phba);
5492 return FAILED;
5493 }
5494
5495 /**
5496 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5497 * @sdev: Pointer to scsi_device.
5498 *
5499 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
5500 * globally available list of scsi buffers. This routine also makes sure scsi
5501 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
5502 * of scsi buffer exists for the lifetime of the driver.
5503 *
5504 * Return codes:
5505 * non-0 - Error
5506 * 0 - Success
5507 **/
5508 static int
lpfc_slave_alloc(struct scsi_device * sdev)5509 lpfc_slave_alloc(struct scsi_device *sdev)
5510 {
5511 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5512 struct lpfc_hba *phba = vport->phba;
5513 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5514 uint32_t total = 0;
5515 uint32_t num_to_alloc = 0;
5516 int num_allocated = 0;
5517 uint32_t sdev_cnt;
5518 struct lpfc_device_data *device_data;
5519 unsigned long flags;
5520 struct lpfc_name target_wwpn;
5521
5522 if (!rport || fc_remote_port_chkready(rport))
5523 return -ENXIO;
5524
5525 if (phba->cfg_fof) {
5526
5527 /*
5528 * Check to see if the device data structure for the lun
5529 * exists. If not, create one.
5530 */
5531
5532 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
5533 spin_lock_irqsave(&phba->devicelock, flags);
5534 device_data = __lpfc_get_device_data(phba,
5535 &phba->luns,
5536 &vport->fc_portname,
5537 &target_wwpn,
5538 sdev->lun);
5539 if (!device_data) {
5540 spin_unlock_irqrestore(&phba->devicelock, flags);
5541 device_data = lpfc_create_device_data(phba,
5542 &vport->fc_portname,
5543 &target_wwpn,
5544 sdev->lun,
5545 phba->cfg_XLanePriority,
5546 true);
5547 if (!device_data)
5548 return -ENOMEM;
5549 spin_lock_irqsave(&phba->devicelock, flags);
5550 list_add_tail(&device_data->listentry, &phba->luns);
5551 }
5552 device_data->rport_data = rport->dd_data;
5553 device_data->available = true;
5554 spin_unlock_irqrestore(&phba->devicelock, flags);
5555 sdev->hostdata = device_data;
5556 } else {
5557 sdev->hostdata = rport->dd_data;
5558 }
5559 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5560
5561 /* For SLI4, all IO buffers are pre-allocated */
5562 if (phba->sli_rev == LPFC_SLI_REV4)
5563 return 0;
5564
5565 /* This code path is now ONLY for SLI3 adapters */
5566
5567 /*
5568 * Populate the cmds_per_lun count scsi_bufs into this host's globally
5569 * available list of scsi buffers. Don't allocate more than the
5570 * HBA limit conveyed to the midlayer via the host structure. The
5571 * formula accounts for the lun_queue_depth + error handlers + 1
5572 * extra. This list of scsi bufs exists for the lifetime of the driver.
5573 */
5574 total = phba->total_scsi_bufs;
5575 num_to_alloc = vport->cfg_lun_queue_depth + 2;
5576
5577 /* If allocated buffers are enough do nothing */
5578 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5579 return 0;
5580
5581 /* Allow some exchanges to be available always to complete discovery */
5582 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5583 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5584 "0704 At limitation of %d preallocated "
5585 "command buffers\n", total);
5586 return 0;
5587 /* Allow some exchanges to be available always to complete discovery */
5588 } else if (total + num_to_alloc >
5589 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5590 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5591 "0705 Allocation request of %d "
5592 "command buffers will exceed max of %d. "
5593 "Reducing allocation request to %d.\n",
5594 num_to_alloc, phba->cfg_hba_queue_depth,
5595 (phba->cfg_hba_queue_depth - total));
5596 num_to_alloc = phba->cfg_hba_queue_depth - total;
5597 }
5598 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
5599 if (num_to_alloc != num_allocated) {
5600 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5601 "0708 Allocation request of %d "
5602 "command buffers did not succeed. "
5603 "Allocated %d buffers.\n",
5604 num_to_alloc, num_allocated);
5605 }
5606 if (num_allocated > 0)
5607 phba->total_scsi_bufs += num_allocated;
5608 return 0;
5609 }
5610
5611 /**
5612 * lpfc_slave_configure - scsi_host_template slave_configure entry point
5613 * @sdev: Pointer to scsi_device.
5614 *
5615 * This routine configures following items
5616 * - Tag command queuing support for @sdev if supported.
5617 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
5618 *
5619 * Return codes:
5620 * 0 - Success
5621 **/
5622 static int
lpfc_slave_configure(struct scsi_device * sdev)5623 lpfc_slave_configure(struct scsi_device *sdev)
5624 {
5625 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5626 struct lpfc_hba *phba = vport->phba;
5627
5628 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
5629
5630 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5631 lpfc_sli_handle_fast_ring_event(phba,
5632 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5633 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5634 lpfc_poll_rearm_timer(phba);
5635 }
5636
5637 return 0;
5638 }
5639
5640 /**
5641 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5642 * @sdev: Pointer to scsi_device.
5643 *
5644 * This routine sets @sdev hostatdata filed to null.
5645 **/
5646 static void
lpfc_slave_destroy(struct scsi_device * sdev)5647 lpfc_slave_destroy(struct scsi_device *sdev)
5648 {
5649 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5650 struct lpfc_hba *phba = vport->phba;
5651 unsigned long flags;
5652 struct lpfc_device_data *device_data = sdev->hostdata;
5653
5654 atomic_dec(&phba->sdev_cnt);
5655 if ((phba->cfg_fof) && (device_data)) {
5656 spin_lock_irqsave(&phba->devicelock, flags);
5657 device_data->available = false;
5658 if (!device_data->oas_enabled)
5659 lpfc_delete_device_data(phba, device_data);
5660 spin_unlock_irqrestore(&phba->devicelock, flags);
5661 }
5662 sdev->hostdata = NULL;
5663 return;
5664 }
5665
5666 /**
5667 * lpfc_create_device_data - creates and initializes device data structure for OAS
5668 * @pha: Pointer to host bus adapter structure.
5669 * @vport_wwpn: Pointer to vport's wwpn information
5670 * @target_wwpn: Pointer to target's wwpn information
5671 * @lun: Lun on target
5672 * @atomic_create: Flag to indicate if memory should be allocated using the
5673 * GFP_ATOMIC flag or not.
5674 *
5675 * This routine creates a device data structure which will contain identifying
5676 * information for the device (host wwpn, target wwpn, lun), state of OAS,
5677 * whether or not the corresponding lun is available by the system,
5678 * and pointer to the rport data.
5679 *
5680 * Return codes:
5681 * NULL - Error
5682 * Pointer to lpfc_device_data - Success
5683 **/
5684 struct lpfc_device_data*
lpfc_create_device_data(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint32_t pri,bool atomic_create)5685 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5686 struct lpfc_name *target_wwpn, uint64_t lun,
5687 uint32_t pri, bool atomic_create)
5688 {
5689
5690 struct lpfc_device_data *lun_info;
5691 int memory_flags;
5692
5693 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5694 !(phba->cfg_fof))
5695 return NULL;
5696
5697 /* Attempt to create the device data to contain lun info */
5698
5699 if (atomic_create)
5700 memory_flags = GFP_ATOMIC;
5701 else
5702 memory_flags = GFP_KERNEL;
5703 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
5704 if (!lun_info)
5705 return NULL;
5706 INIT_LIST_HEAD(&lun_info->listentry);
5707 lun_info->rport_data = NULL;
5708 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
5709 sizeof(struct lpfc_name));
5710 memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
5711 sizeof(struct lpfc_name));
5712 lun_info->device_id.lun = lun;
5713 lun_info->oas_enabled = false;
5714 lun_info->priority = pri;
5715 lun_info->available = false;
5716 return lun_info;
5717 }
5718
5719 /**
5720 * lpfc_delete_device_data - frees a device data structure for OAS
5721 * @pha: Pointer to host bus adapter structure.
5722 * @lun_info: Pointer to device data structure to free.
5723 *
5724 * This routine frees the previously allocated device data structure passed.
5725 *
5726 **/
5727 void
lpfc_delete_device_data(struct lpfc_hba * phba,struct lpfc_device_data * lun_info)5728 lpfc_delete_device_data(struct lpfc_hba *phba,
5729 struct lpfc_device_data *lun_info)
5730 {
5731
5732 if (unlikely(!phba) || !lun_info ||
5733 !(phba->cfg_fof))
5734 return;
5735
5736 if (!list_empty(&lun_info->listentry))
5737 list_del(&lun_info->listentry);
5738 mempool_free(lun_info, phba->device_data_mem_pool);
5739 return;
5740 }
5741
5742 /**
5743 * __lpfc_get_device_data - returns the device data for the specified lun
5744 * @pha: Pointer to host bus adapter structure.
5745 * @list: Point to list to search.
5746 * @vport_wwpn: Pointer to vport's wwpn information
5747 * @target_wwpn: Pointer to target's wwpn information
5748 * @lun: Lun on target
5749 *
5750 * This routine searches the list passed for the specified lun's device data.
5751 * This function does not hold locks, it is the responsibility of the caller
5752 * to ensure the proper lock is held before calling the function.
5753 *
5754 * Return codes:
5755 * NULL - Error
5756 * Pointer to lpfc_device_data - Success
5757 **/
5758 struct lpfc_device_data*
__lpfc_get_device_data(struct lpfc_hba * phba,struct list_head * list,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun)5759 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
5760 struct lpfc_name *vport_wwpn,
5761 struct lpfc_name *target_wwpn, uint64_t lun)
5762 {
5763
5764 struct lpfc_device_data *lun_info;
5765
5766 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5767 !phba->cfg_fof)
5768 return NULL;
5769
5770 /* Check to see if the lun is already enabled for OAS. */
5771
5772 list_for_each_entry(lun_info, list, listentry) {
5773 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5774 sizeof(struct lpfc_name)) == 0) &&
5775 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5776 sizeof(struct lpfc_name)) == 0) &&
5777 (lun_info->device_id.lun == lun))
5778 return lun_info;
5779 }
5780
5781 return NULL;
5782 }
5783
5784 /**
5785 * lpfc_find_next_oas_lun - searches for the next oas lun
5786 * @pha: Pointer to host bus adapter structure.
5787 * @vport_wwpn: Pointer to vport's wwpn information
5788 * @target_wwpn: Pointer to target's wwpn information
5789 * @starting_lun: Pointer to the lun to start searching for
5790 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
5791 * @found_target_wwpn: Pointer to the found lun's target wwpn information
5792 * @found_lun: Pointer to the found lun.
5793 * @found_lun_status: Pointer to status of the found lun.
5794 *
5795 * This routine searches the luns list for the specified lun
5796 * or the first lun for the vport/target. If the vport wwpn contains
5797 * a zero value then a specific vport is not specified. In this case
5798 * any vport which contains the lun will be considered a match. If the
5799 * target wwpn contains a zero value then a specific target is not specified.
5800 * In this case any target which contains the lun will be considered a
5801 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status
5802 * are returned. The function will also return the next lun if available.
5803 * If the next lun is not found, starting_lun parameter will be set to
5804 * NO_MORE_OAS_LUN.
5805 *
5806 * Return codes:
5807 * non-0 - Error
5808 * 0 - Success
5809 **/
5810 bool
lpfc_find_next_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t * starting_lun,struct lpfc_name * found_vport_wwpn,struct lpfc_name * found_target_wwpn,uint64_t * found_lun,uint32_t * found_lun_status,uint32_t * found_lun_pri)5811 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5812 struct lpfc_name *target_wwpn, uint64_t *starting_lun,
5813 struct lpfc_name *found_vport_wwpn,
5814 struct lpfc_name *found_target_wwpn,
5815 uint64_t *found_lun,
5816 uint32_t *found_lun_status,
5817 uint32_t *found_lun_pri)
5818 {
5819
5820 unsigned long flags;
5821 struct lpfc_device_data *lun_info;
5822 struct lpfc_device_id *device_id;
5823 uint64_t lun;
5824 bool found = false;
5825
5826 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5827 !starting_lun || !found_vport_wwpn ||
5828 !found_target_wwpn || !found_lun || !found_lun_status ||
5829 (*starting_lun == NO_MORE_OAS_LUN) ||
5830 !phba->cfg_fof)
5831 return false;
5832
5833 lun = *starting_lun;
5834 *found_lun = NO_MORE_OAS_LUN;
5835 *starting_lun = NO_MORE_OAS_LUN;
5836
5837 /* Search for lun or the lun closet in value */
5838
5839 spin_lock_irqsave(&phba->devicelock, flags);
5840 list_for_each_entry(lun_info, &phba->luns, listentry) {
5841 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
5842 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5843 sizeof(struct lpfc_name)) == 0)) &&
5844 ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
5845 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5846 sizeof(struct lpfc_name)) == 0)) &&
5847 (lun_info->oas_enabled)) {
5848 device_id = &lun_info->device_id;
5849 if ((!found) &&
5850 ((lun == FIND_FIRST_OAS_LUN) ||
5851 (device_id->lun == lun))) {
5852 *found_lun = device_id->lun;
5853 memcpy(found_vport_wwpn,
5854 &device_id->vport_wwpn,
5855 sizeof(struct lpfc_name));
5856 memcpy(found_target_wwpn,
5857 &device_id->target_wwpn,
5858 sizeof(struct lpfc_name));
5859 if (lun_info->available)
5860 *found_lun_status =
5861 OAS_LUN_STATUS_EXISTS;
5862 else
5863 *found_lun_status = 0;
5864 *found_lun_pri = lun_info->priority;
5865 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
5866 memset(vport_wwpn, 0x0,
5867 sizeof(struct lpfc_name));
5868 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
5869 memset(target_wwpn, 0x0,
5870 sizeof(struct lpfc_name));
5871 found = true;
5872 } else if (found) {
5873 *starting_lun = device_id->lun;
5874 memcpy(vport_wwpn, &device_id->vport_wwpn,
5875 sizeof(struct lpfc_name));
5876 memcpy(target_wwpn, &device_id->target_wwpn,
5877 sizeof(struct lpfc_name));
5878 break;
5879 }
5880 }
5881 }
5882 spin_unlock_irqrestore(&phba->devicelock, flags);
5883 return found;
5884 }
5885
5886 /**
5887 * lpfc_enable_oas_lun - enables a lun for OAS operations
5888 * @pha: Pointer to host bus adapter structure.
5889 * @vport_wwpn: Pointer to vport's wwpn information
5890 * @target_wwpn: Pointer to target's wwpn information
5891 * @lun: Lun
5892 *
5893 * This routine enables a lun for oas operations. The routines does so by
5894 * doing the following :
5895 *
5896 * 1) Checks to see if the device data for the lun has been created.
5897 * 2) If found, sets the OAS enabled flag if not set and returns.
5898 * 3) Otherwise, creates a device data structure.
5899 * 4) If successfully created, indicates the device data is for an OAS lun,
5900 * indicates the lun is not available and add to the list of luns.
5901 *
5902 * Return codes:
5903 * false - Error
5904 * true - Success
5905 **/
5906 bool
lpfc_enable_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint8_t pri)5907 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5908 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5909 {
5910
5911 struct lpfc_device_data *lun_info;
5912 unsigned long flags;
5913
5914 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5915 !phba->cfg_fof)
5916 return false;
5917
5918 spin_lock_irqsave(&phba->devicelock, flags);
5919
5920 /* Check to see if the device data for the lun has been created */
5921 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
5922 target_wwpn, lun);
5923 if (lun_info) {
5924 if (!lun_info->oas_enabled)
5925 lun_info->oas_enabled = true;
5926 lun_info->priority = pri;
5927 spin_unlock_irqrestore(&phba->devicelock, flags);
5928 return true;
5929 }
5930
5931 /* Create an lun info structure and add to list of luns */
5932 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
5933 pri, true);
5934 if (lun_info) {
5935 lun_info->oas_enabled = true;
5936 lun_info->priority = pri;
5937 lun_info->available = false;
5938 list_add_tail(&lun_info->listentry, &phba->luns);
5939 spin_unlock_irqrestore(&phba->devicelock, flags);
5940 return true;
5941 }
5942 spin_unlock_irqrestore(&phba->devicelock, flags);
5943 return false;
5944 }
5945
5946 /**
5947 * lpfc_disable_oas_lun - disables a lun for OAS operations
5948 * @pha: Pointer to host bus adapter structure.
5949 * @vport_wwpn: Pointer to vport's wwpn information
5950 * @target_wwpn: Pointer to target's wwpn information
5951 * @lun: Lun
5952 *
5953 * This routine disables a lun for oas operations. The routines does so by
5954 * doing the following :
5955 *
5956 * 1) Checks to see if the device data for the lun is created.
5957 * 2) If present, clears the flag indicating this lun is for OAS.
5958 * 3) If the lun is not available by the system, the device data is
5959 * freed.
5960 *
5961 * Return codes:
5962 * false - Error
5963 * true - Success
5964 **/
5965 bool
lpfc_disable_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint8_t pri)5966 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5967 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5968 {
5969
5970 struct lpfc_device_data *lun_info;
5971 unsigned long flags;
5972
5973 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5974 !phba->cfg_fof)
5975 return false;
5976
5977 spin_lock_irqsave(&phba->devicelock, flags);
5978
5979 /* Check to see if the lun is available. */
5980 lun_info = __lpfc_get_device_data(phba,
5981 &phba->luns, vport_wwpn,
5982 target_wwpn, lun);
5983 if (lun_info) {
5984 lun_info->oas_enabled = false;
5985 lun_info->priority = pri;
5986 if (!lun_info->available)
5987 lpfc_delete_device_data(phba, lun_info);
5988 spin_unlock_irqrestore(&phba->devicelock, flags);
5989 return true;
5990 }
5991
5992 spin_unlock_irqrestore(&phba->devicelock, flags);
5993 return false;
5994 }
5995
5996 static int
lpfc_no_command(struct Scsi_Host * shost,struct scsi_cmnd * cmnd)5997 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5998 {
5999 return SCSI_MLQUEUE_HOST_BUSY;
6000 }
6001
6002 static int
lpfc_no_handler(struct scsi_cmnd * cmnd)6003 lpfc_no_handler(struct scsi_cmnd *cmnd)
6004 {
6005 return FAILED;
6006 }
6007
6008 static int
lpfc_no_slave(struct scsi_device * sdev)6009 lpfc_no_slave(struct scsi_device *sdev)
6010 {
6011 return -ENODEV;
6012 }
6013
6014 struct scsi_host_template lpfc_template_nvme = {
6015 .module = THIS_MODULE,
6016 .name = LPFC_DRIVER_NAME,
6017 .proc_name = LPFC_DRIVER_NAME,
6018 .info = lpfc_info,
6019 .queuecommand = lpfc_no_command,
6020 .eh_abort_handler = lpfc_no_handler,
6021 .eh_device_reset_handler = lpfc_no_handler,
6022 .eh_target_reset_handler = lpfc_no_handler,
6023 .eh_bus_reset_handler = lpfc_no_handler,
6024 .eh_host_reset_handler = lpfc_no_handler,
6025 .slave_alloc = lpfc_no_slave,
6026 .slave_configure = lpfc_no_slave,
6027 .scan_finished = lpfc_scan_finished,
6028 .this_id = -1,
6029 .sg_tablesize = 1,
6030 .cmd_per_lun = 1,
6031 .shost_attrs = lpfc_hba_attrs,
6032 .max_sectors = 0xFFFF,
6033 .vendor_id = LPFC_NL_VENDOR_ID,
6034 .track_queue_depth = 0,
6035 };
6036
6037 struct scsi_host_template lpfc_template = {
6038 .module = THIS_MODULE,
6039 .name = LPFC_DRIVER_NAME,
6040 .proc_name = LPFC_DRIVER_NAME,
6041 .info = lpfc_info,
6042 .queuecommand = lpfc_queuecommand,
6043 .eh_timed_out = fc_eh_timed_out,
6044 .eh_abort_handler = lpfc_abort_handler,
6045 .eh_device_reset_handler = lpfc_device_reset_handler,
6046 .eh_target_reset_handler = lpfc_target_reset_handler,
6047 .eh_bus_reset_handler = lpfc_bus_reset_handler,
6048 .eh_host_reset_handler = lpfc_host_reset_handler,
6049 .slave_alloc = lpfc_slave_alloc,
6050 .slave_configure = lpfc_slave_configure,
6051 .slave_destroy = lpfc_slave_destroy,
6052 .scan_finished = lpfc_scan_finished,
6053 .this_id = -1,
6054 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6055 .cmd_per_lun = LPFC_CMD_PER_LUN,
6056 .shost_attrs = lpfc_hba_attrs,
6057 .max_sectors = 0xFFFF,
6058 .vendor_id = LPFC_NL_VENDOR_ID,
6059 .change_queue_depth = scsi_change_queue_depth,
6060 .track_queue_depth = 1,
6061 };
6062