1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
38 #include <linux/crash_dump.h>
39 #ifdef CONFIG_X86
40 #include <asm/set_memory.h>
41 #endif
42
43 #include "lpfc_hw4.h"
44 #include "lpfc_hw.h"
45 #include "lpfc_sli.h"
46 #include "lpfc_sli4.h"
47 #include "lpfc_nl.h"
48 #include "lpfc_disc.h"
49 #include "lpfc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_compat.h"
55 #include "lpfc_debugfs.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_version.h"
58
59 /* There are only four IOCB completion types. */
60 typedef enum _lpfc_iocb_type {
61 LPFC_UNKNOWN_IOCB,
62 LPFC_UNSOL_IOCB,
63 LPFC_SOL_IOCB,
64 LPFC_ABORT_IOCB
65 } lpfc_iocb_type;
66
67
68 /* Provide function prototypes local to this module. */
69 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
70 uint32_t);
71 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint8_t *, uint32_t *);
73 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
74 struct lpfc_iocbq *);
75 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
76 struct hbq_dmabuf *);
77 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78 struct hbq_dmabuf *dmabuf);
79 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
80 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
81 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
82 int);
83 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
84 struct lpfc_queue *eq,
85 struct lpfc_eqe *eqe);
86 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
87 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
88 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
89 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
90 struct lpfc_queue *cq,
91 struct lpfc_cqe *cqe);
92
93 static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq * iocbq)94 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
95 {
96 return &iocbq->iocb;
97 }
98
99 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
100 /**
101 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
102 * @srcp: Source memory pointer.
103 * @destp: Destination memory pointer.
104 * @cnt: Number of words required to be copied.
105 * Must be a multiple of sizeof(uint64_t)
106 *
107 * This function is used for copying data between driver memory
108 * and the SLI WQ. This function also changes the endianness
109 * of each word if native endianness is different from SLI
110 * endianness. This function can be called with or without
111 * lock.
112 **/
113 static void
lpfc_sli4_pcimem_bcopy(void * srcp,void * destp,uint32_t cnt)114 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
115 {
116 uint64_t *src = srcp;
117 uint64_t *dest = destp;
118 int i;
119
120 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
121 *dest++ = *src++;
122 }
123 #else
124 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
125 #endif
126
127 /**
128 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
129 * @q: The Work Queue to operate on.
130 * @wqe: The work Queue Entry to put on the Work queue.
131 *
132 * This routine will copy the contents of @wqe to the next available entry on
133 * the @q. This function will then ring the Work Queue Doorbell to signal the
134 * HBA to start processing the Work Queue Entry. This function returns 0 if
135 * successful. If no entries are available on @q then this function will return
136 * -ENOMEM.
137 * The caller is expected to hold the hbalock when calling this routine.
138 **/
139 static int
lpfc_sli4_wq_put(struct lpfc_queue * q,union lpfc_wqe128 * wqe)140 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
141 {
142 union lpfc_wqe *temp_wqe;
143 struct lpfc_register doorbell;
144 uint32_t host_index;
145 uint32_t idx;
146 uint32_t i = 0;
147 uint8_t *tmp;
148 u32 if_type;
149
150 /* sanity check on queue memory */
151 if (unlikely(!q))
152 return -ENOMEM;
153 temp_wqe = lpfc_sli4_qe(q, q->host_index);
154
155 /* If the host has not yet processed the next entry then we are done */
156 idx = ((q->host_index + 1) % q->entry_count);
157 if (idx == q->hba_index) {
158 q->WQ_overflow++;
159 return -EBUSY;
160 }
161 q->WQ_posted++;
162 /* set consumption flag every once in a while */
163 if (!((q->host_index + 1) % q->notify_interval))
164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
165 else
166 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
167 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
168 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
169 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
170 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
171 /* write to DPP aperture taking advatage of Combined Writes */
172 tmp = (uint8_t *)temp_wqe;
173 #ifdef __raw_writeq
174 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
175 __raw_writeq(*((uint64_t *)(tmp + i)),
176 q->dpp_regaddr + i);
177 #else
178 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
179 __raw_writel(*((uint32_t *)(tmp + i)),
180 q->dpp_regaddr + i);
181 #endif
182 }
183 /* ensure WQE bcopy and DPP flushed before doorbell write */
184 wmb();
185
186 /* Update the host index before invoking device */
187 host_index = q->host_index;
188
189 q->host_index = idx;
190
191 /* Ring Doorbell */
192 doorbell.word0 = 0;
193 if (q->db_format == LPFC_DB_LIST_FORMAT) {
194 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
195 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
196 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
197 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
198 q->dpp_id);
199 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
200 q->queue_id);
201 } else {
202 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
203 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
204
205 /* Leave bits <23:16> clear for if_type 6 dpp */
206 if_type = bf_get(lpfc_sli_intf_if_type,
207 &q->phba->sli4_hba.sli_intf);
208 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
209 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
210 host_index);
211 }
212 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
213 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
214 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
215 } else {
216 return -EINVAL;
217 }
218 writel(doorbell.word0, q->db_regaddr);
219
220 return 0;
221 }
222
223 /**
224 * lpfc_sli4_wq_release - Updates internal hba index for WQ
225 * @q: The Work Queue to operate on.
226 * @index: The index to advance the hba index to.
227 *
228 * This routine will update the HBA index of a queue to reflect consumption of
229 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
230 * an entry the host calls this function to update the queue's internal
231 * pointers.
232 **/
233 static void
lpfc_sli4_wq_release(struct lpfc_queue * q,uint32_t index)234 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
235 {
236 /* sanity check on queue memory */
237 if (unlikely(!q))
238 return;
239
240 q->hba_index = index;
241 }
242
243 /**
244 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
245 * @q: The Mailbox Queue to operate on.
246 * @mqe: The Mailbox Queue Entry to put on the Work queue.
247 *
248 * This routine will copy the contents of @mqe to the next available entry on
249 * the @q. This function will then ring the Work Queue Doorbell to signal the
250 * HBA to start processing the Work Queue Entry. This function returns 0 if
251 * successful. If no entries are available on @q then this function will return
252 * -ENOMEM.
253 * The caller is expected to hold the hbalock when calling this routine.
254 **/
255 static uint32_t
lpfc_sli4_mq_put(struct lpfc_queue * q,struct lpfc_mqe * mqe)256 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
257 {
258 struct lpfc_mqe *temp_mqe;
259 struct lpfc_register doorbell;
260
261 /* sanity check on queue memory */
262 if (unlikely(!q))
263 return -ENOMEM;
264 temp_mqe = lpfc_sli4_qe(q, q->host_index);
265
266 /* If the host has not yet processed the next entry then we are done */
267 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
268 return -ENOMEM;
269 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
270 /* Save off the mailbox pointer for completion */
271 q->phba->mbox = (MAILBOX_t *)temp_mqe;
272
273 /* Update the host index before invoking device */
274 q->host_index = ((q->host_index + 1) % q->entry_count);
275
276 /* Ring Doorbell */
277 doorbell.word0 = 0;
278 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
279 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
280 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
281 return 0;
282 }
283
284 /**
285 * lpfc_sli4_mq_release - Updates internal hba index for MQ
286 * @q: The Mailbox Queue to operate on.
287 *
288 * This routine will update the HBA index of a queue to reflect consumption of
289 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
290 * an entry the host calls this function to update the queue's internal
291 * pointers. This routine returns the number of entries that were consumed by
292 * the HBA.
293 **/
294 static uint32_t
lpfc_sli4_mq_release(struct lpfc_queue * q)295 lpfc_sli4_mq_release(struct lpfc_queue *q)
296 {
297 /* sanity check on queue memory */
298 if (unlikely(!q))
299 return 0;
300
301 /* Clear the mailbox pointer for completion */
302 q->phba->mbox = NULL;
303 q->hba_index = ((q->hba_index + 1) % q->entry_count);
304 return 1;
305 }
306
307 /**
308 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
309 * @q: The Event Queue to get the first valid EQE from
310 *
311 * This routine will get the first valid Event Queue Entry from @q, update
312 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
313 * the Queue (no more work to do), or the Queue is full of EQEs that have been
314 * processed, but not popped back to the HBA then this routine will return NULL.
315 **/
316 static struct lpfc_eqe *
lpfc_sli4_eq_get(struct lpfc_queue * q)317 lpfc_sli4_eq_get(struct lpfc_queue *q)
318 {
319 struct lpfc_eqe *eqe;
320
321 /* sanity check on queue memory */
322 if (unlikely(!q))
323 return NULL;
324 eqe = lpfc_sli4_qe(q, q->host_index);
325
326 /* If the next EQE is not valid then we are done */
327 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
328 return NULL;
329
330 /*
331 * insert barrier for instruction interlock : data from the hardware
332 * must have the valid bit checked before it can be copied and acted
333 * upon. Speculative instructions were allowing a bcopy at the start
334 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
335 * after our return, to copy data before the valid bit check above
336 * was done. As such, some of the copied data was stale. The barrier
337 * ensures the check is before any data is copied.
338 */
339 mb();
340 return eqe;
341 }
342
343 /**
344 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
345 * @q: The Event Queue to disable interrupts
346 *
347 **/
348 void
lpfc_sli4_eq_clr_intr(struct lpfc_queue * q)349 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
350 {
351 struct lpfc_register doorbell;
352
353 doorbell.word0 = 0;
354 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
355 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
356 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
357 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
358 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
359 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
360 }
361
362 /**
363 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
364 * @q: The Event Queue to disable interrupts
365 *
366 **/
367 void
lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue * q)368 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
369 {
370 struct lpfc_register doorbell;
371
372 doorbell.word0 = 0;
373 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
374 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
375 }
376
377 /**
378 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
379 * @phba: adapter with EQ
380 * @q: The Event Queue that the host has completed processing for.
381 * @count: Number of elements that have been consumed
382 * @arm: Indicates whether the host wants to arms this CQ.
383 *
384 * This routine will notify the HBA, by ringing the doorbell, that count
385 * number of EQEs have been processed. The @arm parameter indicates whether
386 * the queue should be rearmed when ringing the doorbell.
387 **/
388 void
lpfc_sli4_write_eq_db(struct lpfc_hba * phba,struct lpfc_queue * q,uint32_t count,bool arm)389 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
390 uint32_t count, bool arm)
391 {
392 struct lpfc_register doorbell;
393
394 /* sanity check on queue memory */
395 if (unlikely(!q || (count == 0 && !arm)))
396 return;
397
398 /* ring doorbell for number popped */
399 doorbell.word0 = 0;
400 if (arm) {
401 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
402 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
403 }
404 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
405 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
406 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
407 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
408 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
409 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
410 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
411 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
412 readl(q->phba->sli4_hba.EQDBregaddr);
413 }
414
415 /**
416 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
417 * @phba: adapter with EQ
418 * @q: The Event Queue that the host has completed processing for.
419 * @count: Number of elements that have been consumed
420 * @arm: Indicates whether the host wants to arms this CQ.
421 *
422 * This routine will notify the HBA, by ringing the doorbell, that count
423 * number of EQEs have been processed. The @arm parameter indicates whether
424 * the queue should be rearmed when ringing the doorbell.
425 **/
426 void
lpfc_sli4_if6_write_eq_db(struct lpfc_hba * phba,struct lpfc_queue * q,uint32_t count,bool arm)427 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
428 uint32_t count, bool arm)
429 {
430 struct lpfc_register doorbell;
431
432 /* sanity check on queue memory */
433 if (unlikely(!q || (count == 0 && !arm)))
434 return;
435
436 /* ring doorbell for number popped */
437 doorbell.word0 = 0;
438 if (arm)
439 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
440 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
441 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
442 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
443 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
444 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
445 readl(q->phba->sli4_hba.EQDBregaddr);
446 }
447
448 static void
__lpfc_sli4_consume_eqe(struct lpfc_hba * phba,struct lpfc_queue * eq,struct lpfc_eqe * eqe)449 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
450 struct lpfc_eqe *eqe)
451 {
452 if (!phba->sli4_hba.pc_sli4_params.eqav)
453 bf_set_le32(lpfc_eqe_valid, eqe, 0);
454
455 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
456
457 /* if the index wrapped around, toggle the valid bit */
458 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
459 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
460 }
461
462 static void
lpfc_sli4_eqcq_flush(struct lpfc_hba * phba,struct lpfc_queue * eq)463 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
464 {
465 struct lpfc_eqe *eqe = NULL;
466 u32 eq_count = 0, cq_count = 0;
467 struct lpfc_cqe *cqe = NULL;
468 struct lpfc_queue *cq = NULL, *childq = NULL;
469 int cqid = 0;
470
471 /* walk all the EQ entries and drop on the floor */
472 eqe = lpfc_sli4_eq_get(eq);
473 while (eqe) {
474 /* Get the reference to the corresponding CQ */
475 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
476 cq = NULL;
477
478 list_for_each_entry(childq, &eq->child_list, list) {
479 if (childq->queue_id == cqid) {
480 cq = childq;
481 break;
482 }
483 }
484 /* If CQ is valid, iterate through it and drop all the CQEs */
485 if (cq) {
486 cqe = lpfc_sli4_cq_get(cq);
487 while (cqe) {
488 __lpfc_sli4_consume_cqe(phba, cq, cqe);
489 cq_count++;
490 cqe = lpfc_sli4_cq_get(cq);
491 }
492 /* Clear and re-arm the CQ */
493 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
494 LPFC_QUEUE_REARM);
495 cq_count = 0;
496 }
497 __lpfc_sli4_consume_eqe(phba, eq, eqe);
498 eq_count++;
499 eqe = lpfc_sli4_eq_get(eq);
500 }
501
502 /* Clear and re-arm the EQ */
503 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
504 }
505
506 static int
lpfc_sli4_process_eq(struct lpfc_hba * phba,struct lpfc_queue * eq,uint8_t rearm)507 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
508 uint8_t rearm)
509 {
510 struct lpfc_eqe *eqe;
511 int count = 0, consumed = 0;
512
513 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
514 goto rearm_and_exit;
515
516 eqe = lpfc_sli4_eq_get(eq);
517 while (eqe) {
518 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
519 __lpfc_sli4_consume_eqe(phba, eq, eqe);
520
521 consumed++;
522 if (!(++count % eq->max_proc_limit))
523 break;
524
525 if (!(count % eq->notify_interval)) {
526 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
527 LPFC_QUEUE_NOARM);
528 consumed = 0;
529 }
530
531 eqe = lpfc_sli4_eq_get(eq);
532 }
533 eq->EQ_processed += count;
534
535 /* Track the max number of EQEs processed in 1 intr */
536 if (count > eq->EQ_max_eqe)
537 eq->EQ_max_eqe = count;
538
539 xchg(&eq->queue_claimed, 0);
540
541 rearm_and_exit:
542 /* Always clear the EQ. */
543 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
544
545 return count;
546 }
547
548 /**
549 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
550 * @q: The Completion Queue to get the first valid CQE from
551 *
552 * This routine will get the first valid Completion Queue Entry from @q, update
553 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
554 * the Queue (no more work to do), or the Queue is full of CQEs that have been
555 * processed, but not popped back to the HBA then this routine will return NULL.
556 **/
557 static struct lpfc_cqe *
lpfc_sli4_cq_get(struct lpfc_queue * q)558 lpfc_sli4_cq_get(struct lpfc_queue *q)
559 {
560 struct lpfc_cqe *cqe;
561
562 /* sanity check on queue memory */
563 if (unlikely(!q))
564 return NULL;
565 cqe = lpfc_sli4_qe(q, q->host_index);
566
567 /* If the next CQE is not valid then we are done */
568 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
569 return NULL;
570
571 /*
572 * insert barrier for instruction interlock : data from the hardware
573 * must have the valid bit checked before it can be copied and acted
574 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
575 * instructions allowing action on content before valid bit checked,
576 * add barrier here as well. May not be needed as "content" is a
577 * single 32-bit entity here (vs multi word structure for cq's).
578 */
579 mb();
580 return cqe;
581 }
582
583 static void
__lpfc_sli4_consume_cqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)584 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
585 struct lpfc_cqe *cqe)
586 {
587 if (!phba->sli4_hba.pc_sli4_params.cqav)
588 bf_set_le32(lpfc_cqe_valid, cqe, 0);
589
590 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
591
592 /* if the index wrapped around, toggle the valid bit */
593 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
594 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
595 }
596
597 /**
598 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
599 * @phba: the adapter with the CQ
600 * @q: The Completion Queue that the host has completed processing for.
601 * @count: the number of elements that were consumed
602 * @arm: Indicates whether the host wants to arms this CQ.
603 *
604 * This routine will notify the HBA, by ringing the doorbell, that the
605 * CQEs have been processed. The @arm parameter specifies whether the
606 * queue should be rearmed when ringing the doorbell.
607 **/
608 void
lpfc_sli4_write_cq_db(struct lpfc_hba * phba,struct lpfc_queue * q,uint32_t count,bool arm)609 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
610 uint32_t count, bool arm)
611 {
612 struct lpfc_register doorbell;
613
614 /* sanity check on queue memory */
615 if (unlikely(!q || (count == 0 && !arm)))
616 return;
617
618 /* ring doorbell for number popped */
619 doorbell.word0 = 0;
620 if (arm)
621 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
622 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
623 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
624 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
625 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
626 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
627 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
628 }
629
630 /**
631 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
632 * @phba: the adapter with the CQ
633 * @q: The Completion Queue that the host has completed processing for.
634 * @count: the number of elements that were consumed
635 * @arm: Indicates whether the host wants to arms this CQ.
636 *
637 * This routine will notify the HBA, by ringing the doorbell, that the
638 * CQEs have been processed. The @arm parameter specifies whether the
639 * queue should be rearmed when ringing the doorbell.
640 **/
641 void
lpfc_sli4_if6_write_cq_db(struct lpfc_hba * phba,struct lpfc_queue * q,uint32_t count,bool arm)642 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
643 uint32_t count, bool arm)
644 {
645 struct lpfc_register doorbell;
646
647 /* sanity check on queue memory */
648 if (unlikely(!q || (count == 0 && !arm)))
649 return;
650
651 /* ring doorbell for number popped */
652 doorbell.word0 = 0;
653 if (arm)
654 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
655 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
656 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
657 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
658 }
659
660 /*
661 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
662 *
663 * This routine will copy the contents of @wqe to the next available entry on
664 * the @q. This function will then ring the Receive Queue Doorbell to signal the
665 * HBA to start processing the Receive Queue Entry. This function returns the
666 * index that the rqe was copied to if successful. If no entries are available
667 * on @q then this function will return -ENOMEM.
668 * The caller is expected to hold the hbalock when calling this routine.
669 **/
670 int
lpfc_sli4_rq_put(struct lpfc_queue * hq,struct lpfc_queue * dq,struct lpfc_rqe * hrqe,struct lpfc_rqe * drqe)671 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
672 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
673 {
674 struct lpfc_rqe *temp_hrqe;
675 struct lpfc_rqe *temp_drqe;
676 struct lpfc_register doorbell;
677 int hq_put_index;
678 int dq_put_index;
679
680 /* sanity check on queue memory */
681 if (unlikely(!hq) || unlikely(!dq))
682 return -ENOMEM;
683 hq_put_index = hq->host_index;
684 dq_put_index = dq->host_index;
685 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
686 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
687
688 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
689 return -EINVAL;
690 if (hq_put_index != dq_put_index)
691 return -EINVAL;
692 /* If the host has not yet processed the next entry then we are done */
693 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
694 return -EBUSY;
695 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
696 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
697
698 /* Update the host index to point to the next slot */
699 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
700 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
701 hq->RQ_buf_posted++;
702
703 /* Ring The Header Receive Queue Doorbell */
704 if (!(hq->host_index % hq->notify_interval)) {
705 doorbell.word0 = 0;
706 if (hq->db_format == LPFC_DB_RING_FORMAT) {
707 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
708 hq->notify_interval);
709 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
710 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
711 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
712 hq->notify_interval);
713 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
714 hq->host_index);
715 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
716 } else {
717 return -EINVAL;
718 }
719 writel(doorbell.word0, hq->db_regaddr);
720 }
721 return hq_put_index;
722 }
723
724 /*
725 * lpfc_sli4_rq_release - Updates internal hba index for RQ
726 *
727 * This routine will update the HBA index of a queue to reflect consumption of
728 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
729 * consumed an entry the host calls this function to update the queue's
730 * internal pointers. This routine returns the number of entries that were
731 * consumed by the HBA.
732 **/
733 static uint32_t
lpfc_sli4_rq_release(struct lpfc_queue * hq,struct lpfc_queue * dq)734 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
735 {
736 /* sanity check on queue memory */
737 if (unlikely(!hq) || unlikely(!dq))
738 return 0;
739
740 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
741 return 0;
742 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
743 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
744 return 1;
745 }
746
747 /**
748 * lpfc_cmd_iocb - Get next command iocb entry in the ring
749 * @phba: Pointer to HBA context object.
750 * @pring: Pointer to driver SLI ring object.
751 *
752 * This function returns pointer to next command iocb entry
753 * in the command ring. The caller must hold hbalock to prevent
754 * other threads consume the next command iocb.
755 * SLI-2/SLI-3 provide different sized iocbs.
756 **/
757 static inline IOCB_t *
lpfc_cmd_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)758 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
759 {
760 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
761 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
762 }
763
764 /**
765 * lpfc_resp_iocb - Get next response iocb entry in the ring
766 * @phba: Pointer to HBA context object.
767 * @pring: Pointer to driver SLI ring object.
768 *
769 * This function returns pointer to next response iocb entry
770 * in the response ring. The caller must hold hbalock to make sure
771 * that no other thread consume the next response iocb.
772 * SLI-2/SLI-3 provide different sized iocbs.
773 **/
774 static inline IOCB_t *
lpfc_resp_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)775 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
776 {
777 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
778 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
779 }
780
781 /**
782 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
783 * @phba: Pointer to HBA context object.
784 *
785 * This function is called with hbalock held. This function
786 * allocates a new driver iocb object from the iocb pool. If the
787 * allocation is successful, it returns pointer to the newly
788 * allocated iocb object else it returns NULL.
789 **/
790 struct lpfc_iocbq *
__lpfc_sli_get_iocbq(struct lpfc_hba * phba)791 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
792 {
793 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
794 struct lpfc_iocbq * iocbq = NULL;
795
796 lockdep_assert_held(&phba->hbalock);
797
798 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
799 if (iocbq)
800 phba->iocb_cnt++;
801 if (phba->iocb_cnt > phba->iocb_max)
802 phba->iocb_max = phba->iocb_cnt;
803 return iocbq;
804 }
805
806 /**
807 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
808 * @phba: Pointer to HBA context object.
809 * @xritag: XRI value.
810 *
811 * This function clears the sglq pointer from the array of acive
812 * sglq's. The xritag that is passed in is used to index into the
813 * array. Before the xritag can be used it needs to be adjusted
814 * by subtracting the xribase.
815 *
816 * Returns sglq ponter = success, NULL = Failure.
817 **/
818 struct lpfc_sglq *
__lpfc_clear_active_sglq(struct lpfc_hba * phba,uint16_t xritag)819 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
820 {
821 struct lpfc_sglq *sglq;
822
823 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
824 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
825 return sglq;
826 }
827
828 /**
829 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
830 * @phba: Pointer to HBA context object.
831 * @xritag: XRI value.
832 *
833 * This function returns the sglq pointer from the array of acive
834 * sglq's. The xritag that is passed in is used to index into the
835 * array. Before the xritag can be used it needs to be adjusted
836 * by subtracting the xribase.
837 *
838 * Returns sglq ponter = success, NULL = Failure.
839 **/
840 struct lpfc_sglq *
__lpfc_get_active_sglq(struct lpfc_hba * phba,uint16_t xritag)841 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
842 {
843 struct lpfc_sglq *sglq;
844
845 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
846 return sglq;
847 }
848
849 /**
850 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
851 * @phba: Pointer to HBA context object.
852 * @xritag: xri used in this exchange.
853 * @rrq: The RRQ to be cleared.
854 *
855 **/
856 void
lpfc_clr_rrq_active(struct lpfc_hba * phba,uint16_t xritag,struct lpfc_node_rrq * rrq)857 lpfc_clr_rrq_active(struct lpfc_hba *phba,
858 uint16_t xritag,
859 struct lpfc_node_rrq *rrq)
860 {
861 struct lpfc_nodelist *ndlp = NULL;
862
863 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
864 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
865
866 /* The target DID could have been swapped (cable swap)
867 * we should use the ndlp from the findnode if it is
868 * available.
869 */
870 if ((!ndlp) && rrq->ndlp)
871 ndlp = rrq->ndlp;
872
873 if (!ndlp)
874 goto out;
875
876 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
877 rrq->send_rrq = 0;
878 rrq->xritag = 0;
879 rrq->rrq_stop_time = 0;
880 }
881 out:
882 mempool_free(rrq, phba->rrq_pool);
883 }
884
885 /**
886 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
887 * @phba: Pointer to HBA context object.
888 *
889 * This function is called with hbalock held. This function
890 * Checks if stop_time (ratov from setting rrq active) has
891 * been reached, if it has and the send_rrq flag is set then
892 * it will call lpfc_send_rrq. If the send_rrq flag is not set
893 * then it will just call the routine to clear the rrq and
894 * free the rrq resource.
895 * The timer is set to the next rrq that is going to expire before
896 * leaving the routine.
897 *
898 **/
899 void
lpfc_handle_rrq_active(struct lpfc_hba * phba)900 lpfc_handle_rrq_active(struct lpfc_hba *phba)
901 {
902 struct lpfc_node_rrq *rrq;
903 struct lpfc_node_rrq *nextrrq;
904 unsigned long next_time;
905 unsigned long iflags;
906 LIST_HEAD(send_rrq);
907
908 spin_lock_irqsave(&phba->hbalock, iflags);
909 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
910 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
911 list_for_each_entry_safe(rrq, nextrrq,
912 &phba->active_rrq_list, list) {
913 if (time_after(jiffies, rrq->rrq_stop_time))
914 list_move(&rrq->list, &send_rrq);
915 else if (time_before(rrq->rrq_stop_time, next_time))
916 next_time = rrq->rrq_stop_time;
917 }
918 spin_unlock_irqrestore(&phba->hbalock, iflags);
919 if ((!list_empty(&phba->active_rrq_list)) &&
920 (!(phba->pport->load_flag & FC_UNLOADING)))
921 mod_timer(&phba->rrq_tmr, next_time);
922 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
923 list_del(&rrq->list);
924 if (!rrq->send_rrq) {
925 /* this call will free the rrq */
926 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
927 } else if (lpfc_send_rrq(phba, rrq)) {
928 /* if we send the rrq then the completion handler
929 * will clear the bit in the xribitmap.
930 */
931 lpfc_clr_rrq_active(phba, rrq->xritag,
932 rrq);
933 }
934 }
935 }
936
937 /**
938 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
939 * @vport: Pointer to vport context object.
940 * @xri: The xri used in the exchange.
941 * @did: The targets DID for this exchange.
942 *
943 * returns NULL = rrq not found in the phba->active_rrq_list.
944 * rrq = rrq for this xri and target.
945 **/
946 struct lpfc_node_rrq *
lpfc_get_active_rrq(struct lpfc_vport * vport,uint16_t xri,uint32_t did)947 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
948 {
949 struct lpfc_hba *phba = vport->phba;
950 struct lpfc_node_rrq *rrq;
951 struct lpfc_node_rrq *nextrrq;
952 unsigned long iflags;
953
954 if (phba->sli_rev != LPFC_SLI_REV4)
955 return NULL;
956 spin_lock_irqsave(&phba->hbalock, iflags);
957 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
958 if (rrq->vport == vport && rrq->xritag == xri &&
959 rrq->nlp_DID == did){
960 list_del(&rrq->list);
961 spin_unlock_irqrestore(&phba->hbalock, iflags);
962 return rrq;
963 }
964 }
965 spin_unlock_irqrestore(&phba->hbalock, iflags);
966 return NULL;
967 }
968
969 /**
970 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
971 * @vport: Pointer to vport context object.
972 * @ndlp: Pointer to the lpfc_node_list structure.
973 * If ndlp is NULL Remove all active RRQs for this vport from the
974 * phba->active_rrq_list and clear the rrq.
975 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
976 **/
977 void
lpfc_cleanup_vports_rrqs(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)978 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
979
980 {
981 struct lpfc_hba *phba = vport->phba;
982 struct lpfc_node_rrq *rrq;
983 struct lpfc_node_rrq *nextrrq;
984 unsigned long iflags;
985 LIST_HEAD(rrq_list);
986
987 if (phba->sli_rev != LPFC_SLI_REV4)
988 return;
989 if (!ndlp) {
990 lpfc_sli4_vport_delete_els_xri_aborted(vport);
991 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
992 }
993 spin_lock_irqsave(&phba->hbalock, iflags);
994 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
995 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
996 list_move(&rrq->list, &rrq_list);
997 spin_unlock_irqrestore(&phba->hbalock, iflags);
998
999 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1000 list_del(&rrq->list);
1001 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1002 }
1003 }
1004
1005 /**
1006 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1007 * @phba: Pointer to HBA context object.
1008 * @ndlp: Targets nodelist pointer for this exchange.
1009 * @xritag: the xri in the bitmap to test.
1010 *
1011 * This function returns:
1012 * 0 = rrq not active for this xri
1013 * 1 = rrq is valid for this xri.
1014 **/
1015 int
lpfc_test_rrq_active(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,uint16_t xritag)1016 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1017 uint16_t xritag)
1018 {
1019 if (!ndlp)
1020 return 0;
1021 if (!ndlp->active_rrqs_xri_bitmap)
1022 return 0;
1023 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1024 return 1;
1025 else
1026 return 0;
1027 }
1028
1029 /**
1030 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1031 * @phba: Pointer to HBA context object.
1032 * @ndlp: nodelist pointer for this target.
1033 * @xritag: xri used in this exchange.
1034 * @rxid: Remote Exchange ID.
1035 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1036 *
1037 * This function takes the hbalock.
1038 * The active bit is always set in the active rrq xri_bitmap even
1039 * if there is no slot avaiable for the other rrq information.
1040 *
1041 * returns 0 rrq actived for this xri
1042 * < 0 No memory or invalid ndlp.
1043 **/
1044 int
lpfc_set_rrq_active(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,uint16_t xritag,uint16_t rxid,uint16_t send_rrq)1045 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1046 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1047 {
1048 unsigned long iflags;
1049 struct lpfc_node_rrq *rrq;
1050 int empty;
1051
1052 if (!ndlp)
1053 return -EINVAL;
1054
1055 if (!phba->cfg_enable_rrq)
1056 return -EINVAL;
1057
1058 spin_lock_irqsave(&phba->hbalock, iflags);
1059 if (phba->pport->load_flag & FC_UNLOADING) {
1060 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1061 goto out;
1062 }
1063
1064 /*
1065 * set the active bit even if there is no mem available.
1066 */
1067 if (NLP_CHK_FREE_REQ(ndlp))
1068 goto out;
1069
1070 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1071 goto out;
1072
1073 if (!ndlp->active_rrqs_xri_bitmap)
1074 goto out;
1075
1076 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1077 goto out;
1078
1079 spin_unlock_irqrestore(&phba->hbalock, iflags);
1080 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1081 if (!rrq) {
1082 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1083 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1084 " DID:0x%x Send:%d\n",
1085 xritag, rxid, ndlp->nlp_DID, send_rrq);
1086 return -EINVAL;
1087 }
1088 if (phba->cfg_enable_rrq == 1)
1089 rrq->send_rrq = send_rrq;
1090 else
1091 rrq->send_rrq = 0;
1092 rrq->xritag = xritag;
1093 rrq->rrq_stop_time = jiffies +
1094 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1095 rrq->ndlp = ndlp;
1096 rrq->nlp_DID = ndlp->nlp_DID;
1097 rrq->vport = ndlp->vport;
1098 rrq->rxid = rxid;
1099 spin_lock_irqsave(&phba->hbalock, iflags);
1100 empty = list_empty(&phba->active_rrq_list);
1101 list_add_tail(&rrq->list, &phba->active_rrq_list);
1102 phba->hba_flag |= HBA_RRQ_ACTIVE;
1103 if (empty)
1104 lpfc_worker_wake_up(phba);
1105 spin_unlock_irqrestore(&phba->hbalock, iflags);
1106 return 0;
1107 out:
1108 spin_unlock_irqrestore(&phba->hbalock, iflags);
1109 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1110 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1111 " DID:0x%x Send:%d\n",
1112 xritag, rxid, ndlp->nlp_DID, send_rrq);
1113 return -EINVAL;
1114 }
1115
1116 /**
1117 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1118 * @phba: Pointer to HBA context object.
1119 * @piocbq: Pointer to the iocbq.
1120 *
1121 * The driver calls this function with either the nvme ls ring lock
1122 * or the fc els ring lock held depending on the iocb usage. This function
1123 * gets a new driver sglq object from the sglq list. If the list is not empty
1124 * then it is successful, it returns pointer to the newly allocated sglq
1125 * object else it returns NULL.
1126 **/
1127 static struct lpfc_sglq *
__lpfc_sli_get_els_sglq(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq)1128 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1129 {
1130 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1131 struct lpfc_sglq *sglq = NULL;
1132 struct lpfc_sglq *start_sglq = NULL;
1133 struct lpfc_io_buf *lpfc_cmd;
1134 struct lpfc_nodelist *ndlp;
1135 struct lpfc_sli_ring *pring = NULL;
1136 int found = 0;
1137
1138 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1139 pring = phba->sli4_hba.nvmels_wq->pring;
1140 else
1141 pring = lpfc_phba_elsring(phba);
1142
1143 lockdep_assert_held(&pring->ring_lock);
1144
1145 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1146 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1147 ndlp = lpfc_cmd->rdata->pnode;
1148 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1149 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1150 ndlp = piocbq->context_un.ndlp;
1151 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1152 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1153 ndlp = NULL;
1154 else
1155 ndlp = piocbq->context_un.ndlp;
1156 } else {
1157 ndlp = piocbq->context1;
1158 }
1159
1160 spin_lock(&phba->sli4_hba.sgl_list_lock);
1161 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1162 start_sglq = sglq;
1163 while (!found) {
1164 if (!sglq)
1165 break;
1166 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1167 test_bit(sglq->sli4_lxritag,
1168 ndlp->active_rrqs_xri_bitmap)) {
1169 /* This xri has an rrq outstanding for this DID.
1170 * put it back in the list and get another xri.
1171 */
1172 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1173 sglq = NULL;
1174 list_remove_head(lpfc_els_sgl_list, sglq,
1175 struct lpfc_sglq, list);
1176 if (sglq == start_sglq) {
1177 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1178 sglq = NULL;
1179 break;
1180 } else
1181 continue;
1182 }
1183 sglq->ndlp = ndlp;
1184 found = 1;
1185 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1186 sglq->state = SGL_ALLOCATED;
1187 }
1188 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1189 return sglq;
1190 }
1191
1192 /**
1193 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1194 * @phba: Pointer to HBA context object.
1195 * @piocbq: Pointer to the iocbq.
1196 *
1197 * This function is called with the sgl_list lock held. This function
1198 * gets a new driver sglq object from the sglq list. If the
1199 * list is not empty then it is successful, it returns pointer to the newly
1200 * allocated sglq object else it returns NULL.
1201 **/
1202 struct lpfc_sglq *
__lpfc_sli_get_nvmet_sglq(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq)1203 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1204 {
1205 struct list_head *lpfc_nvmet_sgl_list;
1206 struct lpfc_sglq *sglq = NULL;
1207
1208 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1209
1210 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1211
1212 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1213 if (!sglq)
1214 return NULL;
1215 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1216 sglq->state = SGL_ALLOCATED;
1217 return sglq;
1218 }
1219
1220 /**
1221 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1222 * @phba: Pointer to HBA context object.
1223 *
1224 * This function is called with no lock held. This function
1225 * allocates a new driver iocb object from the iocb pool. If the
1226 * allocation is successful, it returns pointer to the newly
1227 * allocated iocb object else it returns NULL.
1228 **/
1229 struct lpfc_iocbq *
lpfc_sli_get_iocbq(struct lpfc_hba * phba)1230 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1231 {
1232 struct lpfc_iocbq * iocbq = NULL;
1233 unsigned long iflags;
1234
1235 spin_lock_irqsave(&phba->hbalock, iflags);
1236 iocbq = __lpfc_sli_get_iocbq(phba);
1237 spin_unlock_irqrestore(&phba->hbalock, iflags);
1238 return iocbq;
1239 }
1240
1241 /**
1242 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1243 * @phba: Pointer to HBA context object.
1244 * @iocbq: Pointer to driver iocb object.
1245 *
1246 * This function is called to release the driver iocb object
1247 * to the iocb pool. The iotag in the iocb object
1248 * does not change for each use of the iocb object. This function
1249 * clears all other fields of the iocb object when it is freed.
1250 * The sqlq structure that holds the xritag and phys and virtual
1251 * mappings for the scatter gather list is retrieved from the
1252 * active array of sglq. The get of the sglq pointer also clears
1253 * the entry in the array. If the status of the IO indiactes that
1254 * this IO was aborted then the sglq entry it put on the
1255 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1256 * IO has good status or fails for any other reason then the sglq
1257 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1258 * asserted held in the code path calling this routine.
1259 **/
1260 static void
__lpfc_sli_release_iocbq_s4(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1261 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1262 {
1263 struct lpfc_sglq *sglq;
1264 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1265 unsigned long iflag = 0;
1266 struct lpfc_sli_ring *pring;
1267
1268 if (iocbq->sli4_xritag == NO_XRI)
1269 sglq = NULL;
1270 else
1271 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1272
1273
1274 if (sglq) {
1275 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1276 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1277 iflag);
1278 sglq->state = SGL_FREED;
1279 sglq->ndlp = NULL;
1280 list_add_tail(&sglq->list,
1281 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1282 spin_unlock_irqrestore(
1283 &phba->sli4_hba.sgl_list_lock, iflag);
1284 goto out;
1285 }
1286
1287 pring = phba->sli4_hba.els_wq->pring;
1288 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1289 (sglq->state != SGL_XRI_ABORTED)) {
1290 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1291 iflag);
1292 list_add(&sglq->list,
1293 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1294 spin_unlock_irqrestore(
1295 &phba->sli4_hba.sgl_list_lock, iflag);
1296 } else {
1297 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1298 iflag);
1299 sglq->state = SGL_FREED;
1300 sglq->ndlp = NULL;
1301 list_add_tail(&sglq->list,
1302 &phba->sli4_hba.lpfc_els_sgl_list);
1303 spin_unlock_irqrestore(
1304 &phba->sli4_hba.sgl_list_lock, iflag);
1305
1306 /* Check if TXQ queue needs to be serviced */
1307 if (!list_empty(&pring->txq))
1308 lpfc_worker_wake_up(phba);
1309 }
1310 }
1311
1312 out:
1313 /*
1314 * Clean all volatile data fields, preserve iotag and node struct.
1315 */
1316 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1317 iocbq->sli4_lxritag = NO_XRI;
1318 iocbq->sli4_xritag = NO_XRI;
1319 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1320 LPFC_IO_NVME_LS);
1321 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1322 }
1323
1324
1325 /**
1326 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1327 * @phba: Pointer to HBA context object.
1328 * @iocbq: Pointer to driver iocb object.
1329 *
1330 * This function is called to release the driver iocb object to the
1331 * iocb pool. The iotag in the iocb object does not change for each
1332 * use of the iocb object. This function clears all other fields of
1333 * the iocb object when it is freed. The hbalock is asserted held in
1334 * the code path calling this routine.
1335 **/
1336 static void
__lpfc_sli_release_iocbq_s3(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1337 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1338 {
1339 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1340
1341 /*
1342 * Clean all volatile data fields, preserve iotag and node struct.
1343 */
1344 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1345 iocbq->sli4_xritag = NO_XRI;
1346 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1347 }
1348
1349 /**
1350 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1351 * @phba: Pointer to HBA context object.
1352 * @iocbq: Pointer to driver iocb object.
1353 *
1354 * This function is called with hbalock held to release driver
1355 * iocb object to the iocb pool. The iotag in the iocb object
1356 * does not change for each use of the iocb object. This function
1357 * clears all other fields of the iocb object when it is freed.
1358 **/
1359 static void
__lpfc_sli_release_iocbq(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1360 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1361 {
1362 lockdep_assert_held(&phba->hbalock);
1363
1364 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1365 phba->iocb_cnt--;
1366 }
1367
1368 /**
1369 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1370 * @phba: Pointer to HBA context object.
1371 * @iocbq: Pointer to driver iocb object.
1372 *
1373 * This function is called with no lock held to release the iocb to
1374 * iocb pool.
1375 **/
1376 void
lpfc_sli_release_iocbq(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1377 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1378 {
1379 unsigned long iflags;
1380
1381 /*
1382 * Clean all volatile data fields, preserve iotag and node struct.
1383 */
1384 spin_lock_irqsave(&phba->hbalock, iflags);
1385 __lpfc_sli_release_iocbq(phba, iocbq);
1386 spin_unlock_irqrestore(&phba->hbalock, iflags);
1387 }
1388
1389 /**
1390 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1391 * @phba: Pointer to HBA context object.
1392 * @iocblist: List of IOCBs.
1393 * @ulpstatus: ULP status in IOCB command field.
1394 * @ulpWord4: ULP word-4 in IOCB command field.
1395 *
1396 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1397 * on the list by invoking the complete callback function associated with the
1398 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1399 * fields.
1400 **/
1401 void
lpfc_sli_cancel_iocbs(struct lpfc_hba * phba,struct list_head * iocblist,uint32_t ulpstatus,uint32_t ulpWord4)1402 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1403 uint32_t ulpstatus, uint32_t ulpWord4)
1404 {
1405 struct lpfc_iocbq *piocb;
1406
1407 while (!list_empty(iocblist)) {
1408 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1409 if (!piocb->iocb_cmpl) {
1410 if (piocb->iocb_flag & LPFC_IO_NVME)
1411 lpfc_nvme_cancel_iocb(phba, piocb);
1412 else
1413 lpfc_sli_release_iocbq(phba, piocb);
1414 } else {
1415 piocb->iocb.ulpStatus = ulpstatus;
1416 piocb->iocb.un.ulpWord[4] = ulpWord4;
1417 (piocb->iocb_cmpl) (phba, piocb, piocb);
1418 }
1419 }
1420 return;
1421 }
1422
1423 /**
1424 * lpfc_sli_iocb_cmd_type - Get the iocb type
1425 * @iocb_cmnd: iocb command code.
1426 *
1427 * This function is called by ring event handler function to get the iocb type.
1428 * This function translates the iocb command to an iocb command type used to
1429 * decide the final disposition of each completed IOCB.
1430 * The function returns
1431 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1432 * LPFC_SOL_IOCB if it is a solicited iocb completion
1433 * LPFC_ABORT_IOCB if it is an abort iocb
1434 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1435 *
1436 * The caller is not required to hold any lock.
1437 **/
1438 static lpfc_iocb_type
lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)1439 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1440 {
1441 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1442
1443 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1444 return 0;
1445
1446 switch (iocb_cmnd) {
1447 case CMD_XMIT_SEQUENCE_CR:
1448 case CMD_XMIT_SEQUENCE_CX:
1449 case CMD_XMIT_BCAST_CN:
1450 case CMD_XMIT_BCAST_CX:
1451 case CMD_ELS_REQUEST_CR:
1452 case CMD_ELS_REQUEST_CX:
1453 case CMD_CREATE_XRI_CR:
1454 case CMD_CREATE_XRI_CX:
1455 case CMD_GET_RPI_CN:
1456 case CMD_XMIT_ELS_RSP_CX:
1457 case CMD_GET_RPI_CR:
1458 case CMD_FCP_IWRITE_CR:
1459 case CMD_FCP_IWRITE_CX:
1460 case CMD_FCP_IREAD_CR:
1461 case CMD_FCP_IREAD_CX:
1462 case CMD_FCP_ICMND_CR:
1463 case CMD_FCP_ICMND_CX:
1464 case CMD_FCP_TSEND_CX:
1465 case CMD_FCP_TRSP_CX:
1466 case CMD_FCP_TRECEIVE_CX:
1467 case CMD_FCP_AUTO_TRSP_CX:
1468 case CMD_ADAPTER_MSG:
1469 case CMD_ADAPTER_DUMP:
1470 case CMD_XMIT_SEQUENCE64_CR:
1471 case CMD_XMIT_SEQUENCE64_CX:
1472 case CMD_XMIT_BCAST64_CN:
1473 case CMD_XMIT_BCAST64_CX:
1474 case CMD_ELS_REQUEST64_CR:
1475 case CMD_ELS_REQUEST64_CX:
1476 case CMD_FCP_IWRITE64_CR:
1477 case CMD_FCP_IWRITE64_CX:
1478 case CMD_FCP_IREAD64_CR:
1479 case CMD_FCP_IREAD64_CX:
1480 case CMD_FCP_ICMND64_CR:
1481 case CMD_FCP_ICMND64_CX:
1482 case CMD_FCP_TSEND64_CX:
1483 case CMD_FCP_TRSP64_CX:
1484 case CMD_FCP_TRECEIVE64_CX:
1485 case CMD_GEN_REQUEST64_CR:
1486 case CMD_GEN_REQUEST64_CX:
1487 case CMD_XMIT_ELS_RSP64_CX:
1488 case DSSCMD_IWRITE64_CR:
1489 case DSSCMD_IWRITE64_CX:
1490 case DSSCMD_IREAD64_CR:
1491 case DSSCMD_IREAD64_CX:
1492 case CMD_SEND_FRAME:
1493 type = LPFC_SOL_IOCB;
1494 break;
1495 case CMD_ABORT_XRI_CN:
1496 case CMD_ABORT_XRI_CX:
1497 case CMD_CLOSE_XRI_CN:
1498 case CMD_CLOSE_XRI_CX:
1499 case CMD_XRI_ABORTED_CX:
1500 case CMD_ABORT_MXRI64_CN:
1501 case CMD_XMIT_BLS_RSP64_CX:
1502 type = LPFC_ABORT_IOCB;
1503 break;
1504 case CMD_RCV_SEQUENCE_CX:
1505 case CMD_RCV_ELS_REQ_CX:
1506 case CMD_RCV_SEQUENCE64_CX:
1507 case CMD_RCV_ELS_REQ64_CX:
1508 case CMD_ASYNC_STATUS:
1509 case CMD_IOCB_RCV_SEQ64_CX:
1510 case CMD_IOCB_RCV_ELS64_CX:
1511 case CMD_IOCB_RCV_CONT64_CX:
1512 case CMD_IOCB_RET_XRI64_CX:
1513 type = LPFC_UNSOL_IOCB;
1514 break;
1515 case CMD_IOCB_XMIT_MSEQ64_CR:
1516 case CMD_IOCB_XMIT_MSEQ64_CX:
1517 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1518 case CMD_IOCB_RCV_ELS_LIST64_CX:
1519 case CMD_IOCB_CLOSE_EXTENDED_CN:
1520 case CMD_IOCB_ABORT_EXTENDED_CN:
1521 case CMD_IOCB_RET_HBQE64_CN:
1522 case CMD_IOCB_FCP_IBIDIR64_CR:
1523 case CMD_IOCB_FCP_IBIDIR64_CX:
1524 case CMD_IOCB_FCP_ITASKMGT64_CX:
1525 case CMD_IOCB_LOGENTRY_CN:
1526 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1527 printk("%s - Unhandled SLI-3 Command x%x\n",
1528 __func__, iocb_cmnd);
1529 type = LPFC_UNKNOWN_IOCB;
1530 break;
1531 default:
1532 type = LPFC_UNKNOWN_IOCB;
1533 break;
1534 }
1535
1536 return type;
1537 }
1538
1539 /**
1540 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1541 * @phba: Pointer to HBA context object.
1542 *
1543 * This function is called from SLI initialization code
1544 * to configure every ring of the HBA's SLI interface. The
1545 * caller is not required to hold any lock. This function issues
1546 * a config_ring mailbox command for each ring.
1547 * This function returns zero if successful else returns a negative
1548 * error code.
1549 **/
1550 static int
lpfc_sli_ring_map(struct lpfc_hba * phba)1551 lpfc_sli_ring_map(struct lpfc_hba *phba)
1552 {
1553 struct lpfc_sli *psli = &phba->sli;
1554 LPFC_MBOXQ_t *pmb;
1555 MAILBOX_t *pmbox;
1556 int i, rc, ret = 0;
1557
1558 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1559 if (!pmb)
1560 return -ENOMEM;
1561 pmbox = &pmb->u.mb;
1562 phba->link_state = LPFC_INIT_MBX_CMDS;
1563 for (i = 0; i < psli->num_rings; i++) {
1564 lpfc_config_ring(phba, i, pmb);
1565 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1566 if (rc != MBX_SUCCESS) {
1567 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1568 "0446 Adapter failed to init (%d), "
1569 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1570 "ring %d\n",
1571 rc, pmbox->mbxCommand,
1572 pmbox->mbxStatus, i);
1573 phba->link_state = LPFC_HBA_ERROR;
1574 ret = -ENXIO;
1575 break;
1576 }
1577 }
1578 mempool_free(pmb, phba->mbox_mem_pool);
1579 return ret;
1580 }
1581
1582 /**
1583 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1584 * @phba: Pointer to HBA context object.
1585 * @pring: Pointer to driver SLI ring object.
1586 * @piocb: Pointer to the driver iocb object.
1587 *
1588 * The driver calls this function with the hbalock held for SLI3 ports or
1589 * the ring lock held for SLI4 ports. The function adds the
1590 * new iocb to txcmplq of the given ring. This function always returns
1591 * 0. If this function is called for ELS ring, this function checks if
1592 * there is a vport associated with the ELS command. This function also
1593 * starts els_tmofunc timer if this is an ELS command.
1594 **/
1595 static int
lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * piocb)1596 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1597 struct lpfc_iocbq *piocb)
1598 {
1599 if (phba->sli_rev == LPFC_SLI_REV4)
1600 lockdep_assert_held(&pring->ring_lock);
1601 else
1602 lockdep_assert_held(&phba->hbalock);
1603
1604 BUG_ON(!piocb);
1605
1606 list_add_tail(&piocb->list, &pring->txcmplq);
1607 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1608 pring->txcmplq_cnt++;
1609
1610 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1611 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1612 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1613 BUG_ON(!piocb->vport);
1614 if (!(piocb->vport->load_flag & FC_UNLOADING))
1615 mod_timer(&piocb->vport->els_tmofunc,
1616 jiffies +
1617 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1618 }
1619
1620 return 0;
1621 }
1622
1623 /**
1624 * lpfc_sli_ringtx_get - Get first element of the txq
1625 * @phba: Pointer to HBA context object.
1626 * @pring: Pointer to driver SLI ring object.
1627 *
1628 * This function is called with hbalock held to get next
1629 * iocb in txq of the given ring. If there is any iocb in
1630 * the txq, the function returns first iocb in the list after
1631 * removing the iocb from the list, else it returns NULL.
1632 **/
1633 struct lpfc_iocbq *
lpfc_sli_ringtx_get(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)1634 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1635 {
1636 struct lpfc_iocbq *cmd_iocb;
1637
1638 lockdep_assert_held(&phba->hbalock);
1639
1640 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1641 return cmd_iocb;
1642 }
1643
1644 /**
1645 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1646 * @phba: Pointer to HBA context object.
1647 * @pring: Pointer to driver SLI ring object.
1648 *
1649 * This function is called with hbalock held and the caller must post the
1650 * iocb without releasing the lock. If the caller releases the lock,
1651 * iocb slot returned by the function is not guaranteed to be available.
1652 * The function returns pointer to the next available iocb slot if there
1653 * is available slot in the ring, else it returns NULL.
1654 * If the get index of the ring is ahead of the put index, the function
1655 * will post an error attention event to the worker thread to take the
1656 * HBA to offline state.
1657 **/
1658 static IOCB_t *
lpfc_sli_next_iocb_slot(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)1659 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1660 {
1661 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1662 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1663
1664 lockdep_assert_held(&phba->hbalock);
1665
1666 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1667 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1668 pring->sli.sli3.next_cmdidx = 0;
1669
1670 if (unlikely(pring->sli.sli3.local_getidx ==
1671 pring->sli.sli3.next_cmdidx)) {
1672
1673 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1674
1675 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1676 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1677 "0315 Ring %d issue: portCmdGet %d "
1678 "is bigger than cmd ring %d\n",
1679 pring->ringno,
1680 pring->sli.sli3.local_getidx,
1681 max_cmd_idx);
1682
1683 phba->link_state = LPFC_HBA_ERROR;
1684 /*
1685 * All error attention handlers are posted to
1686 * worker thread
1687 */
1688 phba->work_ha |= HA_ERATT;
1689 phba->work_hs = HS_FFER3;
1690
1691 lpfc_worker_wake_up(phba);
1692
1693 return NULL;
1694 }
1695
1696 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1697 return NULL;
1698 }
1699
1700 return lpfc_cmd_iocb(phba, pring);
1701 }
1702
1703 /**
1704 * lpfc_sli_next_iotag - Get an iotag for the iocb
1705 * @phba: Pointer to HBA context object.
1706 * @iocbq: Pointer to driver iocb object.
1707 *
1708 * This function gets an iotag for the iocb. If there is no unused iotag and
1709 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1710 * array and assigns a new iotag.
1711 * The function returns the allocated iotag if successful, else returns zero.
1712 * Zero is not a valid iotag.
1713 * The caller is not required to hold any lock.
1714 **/
1715 uint16_t
lpfc_sli_next_iotag(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1716 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1717 {
1718 struct lpfc_iocbq **new_arr;
1719 struct lpfc_iocbq **old_arr;
1720 size_t new_len;
1721 struct lpfc_sli *psli = &phba->sli;
1722 uint16_t iotag;
1723
1724 spin_lock_irq(&phba->hbalock);
1725 iotag = psli->last_iotag;
1726 if(++iotag < psli->iocbq_lookup_len) {
1727 psli->last_iotag = iotag;
1728 psli->iocbq_lookup[iotag] = iocbq;
1729 spin_unlock_irq(&phba->hbalock);
1730 iocbq->iotag = iotag;
1731 return iotag;
1732 } else if (psli->iocbq_lookup_len < (0xffff
1733 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1734 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1735 spin_unlock_irq(&phba->hbalock);
1736 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1737 GFP_KERNEL);
1738 if (new_arr) {
1739 spin_lock_irq(&phba->hbalock);
1740 old_arr = psli->iocbq_lookup;
1741 if (new_len <= psli->iocbq_lookup_len) {
1742 /* highly unprobable case */
1743 kfree(new_arr);
1744 iotag = psli->last_iotag;
1745 if(++iotag < psli->iocbq_lookup_len) {
1746 psli->last_iotag = iotag;
1747 psli->iocbq_lookup[iotag] = iocbq;
1748 spin_unlock_irq(&phba->hbalock);
1749 iocbq->iotag = iotag;
1750 return iotag;
1751 }
1752 spin_unlock_irq(&phba->hbalock);
1753 return 0;
1754 }
1755 if (psli->iocbq_lookup)
1756 memcpy(new_arr, old_arr,
1757 ((psli->last_iotag + 1) *
1758 sizeof (struct lpfc_iocbq *)));
1759 psli->iocbq_lookup = new_arr;
1760 psli->iocbq_lookup_len = new_len;
1761 psli->last_iotag = iotag;
1762 psli->iocbq_lookup[iotag] = iocbq;
1763 spin_unlock_irq(&phba->hbalock);
1764 iocbq->iotag = iotag;
1765 kfree(old_arr);
1766 return iotag;
1767 }
1768 } else
1769 spin_unlock_irq(&phba->hbalock);
1770
1771 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1772 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1773 psli->last_iotag);
1774
1775 return 0;
1776 }
1777
1778 /**
1779 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1780 * @phba: Pointer to HBA context object.
1781 * @pring: Pointer to driver SLI ring object.
1782 * @iocb: Pointer to iocb slot in the ring.
1783 * @nextiocb: Pointer to driver iocb object which need to be
1784 * posted to firmware.
1785 *
1786 * This function is called to post a new iocb to the firmware. This
1787 * function copies the new iocb to ring iocb slot and updates the
1788 * ring pointers. It adds the new iocb to txcmplq if there is
1789 * a completion call back for this iocb else the function will free the
1790 * iocb object. The hbalock is asserted held in the code path calling
1791 * this routine.
1792 **/
1793 static void
lpfc_sli_submit_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,IOCB_t * iocb,struct lpfc_iocbq * nextiocb)1794 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1795 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1796 {
1797 /*
1798 * Set up an iotag
1799 */
1800 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1801
1802
1803 if (pring->ringno == LPFC_ELS_RING) {
1804 lpfc_debugfs_slow_ring_trc(phba,
1805 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1806 *(((uint32_t *) &nextiocb->iocb) + 4),
1807 *(((uint32_t *) &nextiocb->iocb) + 6),
1808 *(((uint32_t *) &nextiocb->iocb) + 7));
1809 }
1810
1811 /*
1812 * Issue iocb command to adapter
1813 */
1814 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1815 wmb();
1816 pring->stats.iocb_cmd++;
1817
1818 /*
1819 * If there is no completion routine to call, we can release the
1820 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1821 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1822 */
1823 if (nextiocb->iocb_cmpl)
1824 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1825 else
1826 __lpfc_sli_release_iocbq(phba, nextiocb);
1827
1828 /*
1829 * Let the HBA know what IOCB slot will be the next one the
1830 * driver will put a command into.
1831 */
1832 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1833 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1834 }
1835
1836 /**
1837 * lpfc_sli_update_full_ring - Update the chip attention register
1838 * @phba: Pointer to HBA context object.
1839 * @pring: Pointer to driver SLI ring object.
1840 *
1841 * The caller is not required to hold any lock for calling this function.
1842 * This function updates the chip attention bits for the ring to inform firmware
1843 * that there are pending work to be done for this ring and requests an
1844 * interrupt when there is space available in the ring. This function is
1845 * called when the driver is unable to post more iocbs to the ring due
1846 * to unavailability of space in the ring.
1847 **/
1848 static void
lpfc_sli_update_full_ring(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)1849 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1850 {
1851 int ringno = pring->ringno;
1852
1853 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1854
1855 wmb();
1856
1857 /*
1858 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1859 * The HBA will tell us when an IOCB entry is available.
1860 */
1861 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1862 readl(phba->CAregaddr); /* flush */
1863
1864 pring->stats.iocb_cmd_full++;
1865 }
1866
1867 /**
1868 * lpfc_sli_update_ring - Update chip attention register
1869 * @phba: Pointer to HBA context object.
1870 * @pring: Pointer to driver SLI ring object.
1871 *
1872 * This function updates the chip attention register bit for the
1873 * given ring to inform HBA that there is more work to be done
1874 * in this ring. The caller is not required to hold any lock.
1875 **/
1876 static void
lpfc_sli_update_ring(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)1877 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1878 {
1879 int ringno = pring->ringno;
1880
1881 /*
1882 * Tell the HBA that there is work to do in this ring.
1883 */
1884 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1885 wmb();
1886 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1887 readl(phba->CAregaddr); /* flush */
1888 }
1889 }
1890
1891 /**
1892 * lpfc_sli_resume_iocb - Process iocbs in the txq
1893 * @phba: Pointer to HBA context object.
1894 * @pring: Pointer to driver SLI ring object.
1895 *
1896 * This function is called with hbalock held to post pending iocbs
1897 * in the txq to the firmware. This function is called when driver
1898 * detects space available in the ring.
1899 **/
1900 static void
lpfc_sli_resume_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)1901 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1902 {
1903 IOCB_t *iocb;
1904 struct lpfc_iocbq *nextiocb;
1905
1906 lockdep_assert_held(&phba->hbalock);
1907
1908 /*
1909 * Check to see if:
1910 * (a) there is anything on the txq to send
1911 * (b) link is up
1912 * (c) link attention events can be processed (fcp ring only)
1913 * (d) IOCB processing is not blocked by the outstanding mbox command.
1914 */
1915
1916 if (lpfc_is_link_up(phba) &&
1917 (!list_empty(&pring->txq)) &&
1918 (pring->ringno != LPFC_FCP_RING ||
1919 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1920
1921 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1922 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1923 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1924
1925 if (iocb)
1926 lpfc_sli_update_ring(phba, pring);
1927 else
1928 lpfc_sli_update_full_ring(phba, pring);
1929 }
1930
1931 return;
1932 }
1933
1934 /**
1935 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1936 * @phba: Pointer to HBA context object.
1937 * @hbqno: HBQ number.
1938 *
1939 * This function is called with hbalock held to get the next
1940 * available slot for the given HBQ. If there is free slot
1941 * available for the HBQ it will return pointer to the next available
1942 * HBQ entry else it will return NULL.
1943 **/
1944 static struct lpfc_hbq_entry *
lpfc_sli_next_hbq_slot(struct lpfc_hba * phba,uint32_t hbqno)1945 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1946 {
1947 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1948
1949 lockdep_assert_held(&phba->hbalock);
1950
1951 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1952 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1953 hbqp->next_hbqPutIdx = 0;
1954
1955 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1956 uint32_t raw_index = phba->hbq_get[hbqno];
1957 uint32_t getidx = le32_to_cpu(raw_index);
1958
1959 hbqp->local_hbqGetIdx = getidx;
1960
1961 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1962 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1963 "1802 HBQ %d: local_hbqGetIdx "
1964 "%u is > than hbqp->entry_count %u\n",
1965 hbqno, hbqp->local_hbqGetIdx,
1966 hbqp->entry_count);
1967
1968 phba->link_state = LPFC_HBA_ERROR;
1969 return NULL;
1970 }
1971
1972 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1973 return NULL;
1974 }
1975
1976 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1977 hbqp->hbqPutIdx;
1978 }
1979
1980 /**
1981 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1982 * @phba: Pointer to HBA context object.
1983 *
1984 * This function is called with no lock held to free all the
1985 * hbq buffers while uninitializing the SLI interface. It also
1986 * frees the HBQ buffers returned by the firmware but not yet
1987 * processed by the upper layers.
1988 **/
1989 void
lpfc_sli_hbqbuf_free_all(struct lpfc_hba * phba)1990 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1991 {
1992 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1993 struct hbq_dmabuf *hbq_buf;
1994 unsigned long flags;
1995 int i, hbq_count;
1996
1997 hbq_count = lpfc_sli_hbq_count();
1998 /* Return all memory used by all HBQs */
1999 spin_lock_irqsave(&phba->hbalock, flags);
2000 for (i = 0; i < hbq_count; ++i) {
2001 list_for_each_entry_safe(dmabuf, next_dmabuf,
2002 &phba->hbqs[i].hbq_buffer_list, list) {
2003 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2004 list_del(&hbq_buf->dbuf.list);
2005 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2006 }
2007 phba->hbqs[i].buffer_count = 0;
2008 }
2009
2010 /* Mark the HBQs not in use */
2011 phba->hbq_in_use = 0;
2012 spin_unlock_irqrestore(&phba->hbalock, flags);
2013 }
2014
2015 /**
2016 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2017 * @phba: Pointer to HBA context object.
2018 * @hbqno: HBQ number.
2019 * @hbq_buf: Pointer to HBQ buffer.
2020 *
2021 * This function is called with the hbalock held to post a
2022 * hbq buffer to the firmware. If the function finds an empty
2023 * slot in the HBQ, it will post the buffer. The function will return
2024 * pointer to the hbq entry if it successfully post the buffer
2025 * else it will return NULL.
2026 **/
2027 static int
lpfc_sli_hbq_to_firmware(struct lpfc_hba * phba,uint32_t hbqno,struct hbq_dmabuf * hbq_buf)2028 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2029 struct hbq_dmabuf *hbq_buf)
2030 {
2031 lockdep_assert_held(&phba->hbalock);
2032 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2033 }
2034
2035 /**
2036 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2037 * @phba: Pointer to HBA context object.
2038 * @hbqno: HBQ number.
2039 * @hbq_buf: Pointer to HBQ buffer.
2040 *
2041 * This function is called with the hbalock held to post a hbq buffer to the
2042 * firmware. If the function finds an empty slot in the HBQ, it will post the
2043 * buffer and place it on the hbq_buffer_list. The function will return zero if
2044 * it successfully post the buffer else it will return an error.
2045 **/
2046 static int
lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba * phba,uint32_t hbqno,struct hbq_dmabuf * hbq_buf)2047 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2048 struct hbq_dmabuf *hbq_buf)
2049 {
2050 struct lpfc_hbq_entry *hbqe;
2051 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2052
2053 lockdep_assert_held(&phba->hbalock);
2054 /* Get next HBQ entry slot to use */
2055 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2056 if (hbqe) {
2057 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2058
2059 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2060 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2061 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2062 hbqe->bde.tus.f.bdeFlags = 0;
2063 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2064 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2065 /* Sync SLIM */
2066 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2067 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2068 /* flush */
2069 readl(phba->hbq_put + hbqno);
2070 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2071 return 0;
2072 } else
2073 return -ENOMEM;
2074 }
2075
2076 /**
2077 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2078 * @phba: Pointer to HBA context object.
2079 * @hbqno: HBQ number.
2080 * @hbq_buf: Pointer to HBQ buffer.
2081 *
2082 * This function is called with the hbalock held to post an RQE to the SLI4
2083 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2084 * the hbq_buffer_list and return zero, otherwise it will return an error.
2085 **/
2086 static int
lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba * phba,uint32_t hbqno,struct hbq_dmabuf * hbq_buf)2087 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2088 struct hbq_dmabuf *hbq_buf)
2089 {
2090 int rc;
2091 struct lpfc_rqe hrqe;
2092 struct lpfc_rqe drqe;
2093 struct lpfc_queue *hrq;
2094 struct lpfc_queue *drq;
2095
2096 if (hbqno != LPFC_ELS_HBQ)
2097 return 1;
2098 hrq = phba->sli4_hba.hdr_rq;
2099 drq = phba->sli4_hba.dat_rq;
2100
2101 lockdep_assert_held(&phba->hbalock);
2102 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2103 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2104 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2105 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2106 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2107 if (rc < 0)
2108 return rc;
2109 hbq_buf->tag = (rc | (hbqno << 16));
2110 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2111 return 0;
2112 }
2113
2114 /* HBQ for ELS and CT traffic. */
2115 static struct lpfc_hbq_init lpfc_els_hbq = {
2116 .rn = 1,
2117 .entry_count = 256,
2118 .mask_count = 0,
2119 .profile = 0,
2120 .ring_mask = (1 << LPFC_ELS_RING),
2121 .buffer_count = 0,
2122 .init_count = 40,
2123 .add_count = 40,
2124 };
2125
2126 /* Array of HBQs */
2127 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2128 &lpfc_els_hbq,
2129 };
2130
2131 /**
2132 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2133 * @phba: Pointer to HBA context object.
2134 * @hbqno: HBQ number.
2135 * @count: Number of HBQ buffers to be posted.
2136 *
2137 * This function is called with no lock held to post more hbq buffers to the
2138 * given HBQ. The function returns the number of HBQ buffers successfully
2139 * posted.
2140 **/
2141 static int
lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba * phba,uint32_t hbqno,uint32_t count)2142 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2143 {
2144 uint32_t i, posted = 0;
2145 unsigned long flags;
2146 struct hbq_dmabuf *hbq_buffer;
2147 LIST_HEAD(hbq_buf_list);
2148 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2149 return 0;
2150
2151 if ((phba->hbqs[hbqno].buffer_count + count) >
2152 lpfc_hbq_defs[hbqno]->entry_count)
2153 count = lpfc_hbq_defs[hbqno]->entry_count -
2154 phba->hbqs[hbqno].buffer_count;
2155 if (!count)
2156 return 0;
2157 /* Allocate HBQ entries */
2158 for (i = 0; i < count; i++) {
2159 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2160 if (!hbq_buffer)
2161 break;
2162 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2163 }
2164 /* Check whether HBQ is still in use */
2165 spin_lock_irqsave(&phba->hbalock, flags);
2166 if (!phba->hbq_in_use)
2167 goto err;
2168 while (!list_empty(&hbq_buf_list)) {
2169 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2170 dbuf.list);
2171 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2172 (hbqno << 16));
2173 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2174 phba->hbqs[hbqno].buffer_count++;
2175 posted++;
2176 } else
2177 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2178 }
2179 spin_unlock_irqrestore(&phba->hbalock, flags);
2180 return posted;
2181 err:
2182 spin_unlock_irqrestore(&phba->hbalock, flags);
2183 while (!list_empty(&hbq_buf_list)) {
2184 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2185 dbuf.list);
2186 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2187 }
2188 return 0;
2189 }
2190
2191 /**
2192 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2193 * @phba: Pointer to HBA context object.
2194 * @qno: HBQ number.
2195 *
2196 * This function posts more buffers to the HBQ. This function
2197 * is called with no lock held. The function returns the number of HBQ entries
2198 * successfully allocated.
2199 **/
2200 int
lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba * phba,uint32_t qno)2201 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2202 {
2203 if (phba->sli_rev == LPFC_SLI_REV4)
2204 return 0;
2205 else
2206 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2207 lpfc_hbq_defs[qno]->add_count);
2208 }
2209
2210 /**
2211 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2212 * @phba: Pointer to HBA context object.
2213 * @qno: HBQ queue number.
2214 *
2215 * This function is called from SLI initialization code path with
2216 * no lock held to post initial HBQ buffers to firmware. The
2217 * function returns the number of HBQ entries successfully allocated.
2218 **/
2219 static int
lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba * phba,uint32_t qno)2220 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2221 {
2222 if (phba->sli_rev == LPFC_SLI_REV4)
2223 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2224 lpfc_hbq_defs[qno]->entry_count);
2225 else
2226 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2227 lpfc_hbq_defs[qno]->init_count);
2228 }
2229
2230 /*
2231 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2232 *
2233 * This function removes the first hbq buffer on an hbq list and returns a
2234 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2235 **/
2236 static struct hbq_dmabuf *
lpfc_sli_hbqbuf_get(struct list_head * rb_list)2237 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2238 {
2239 struct lpfc_dmabuf *d_buf;
2240
2241 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2242 if (!d_buf)
2243 return NULL;
2244 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2245 }
2246
2247 /**
2248 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2249 * @phba: Pointer to HBA context object.
2250 * @hrq: HBQ number.
2251 *
2252 * This function removes the first RQ buffer on an RQ buffer list and returns a
2253 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2254 **/
2255 static struct rqb_dmabuf *
lpfc_sli_rqbuf_get(struct lpfc_hba * phba,struct lpfc_queue * hrq)2256 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2257 {
2258 struct lpfc_dmabuf *h_buf;
2259 struct lpfc_rqb *rqbp;
2260
2261 rqbp = hrq->rqbp;
2262 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2263 struct lpfc_dmabuf, list);
2264 if (!h_buf)
2265 return NULL;
2266 rqbp->buffer_count--;
2267 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2268 }
2269
2270 /**
2271 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2272 * @phba: Pointer to HBA context object.
2273 * @tag: Tag of the hbq buffer.
2274 *
2275 * This function searches for the hbq buffer associated with the given tag in
2276 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2277 * otherwise it returns NULL.
2278 **/
2279 static struct hbq_dmabuf *
lpfc_sli_hbqbuf_find(struct lpfc_hba * phba,uint32_t tag)2280 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2281 {
2282 struct lpfc_dmabuf *d_buf;
2283 struct hbq_dmabuf *hbq_buf;
2284 uint32_t hbqno;
2285
2286 hbqno = tag >> 16;
2287 if (hbqno >= LPFC_MAX_HBQS)
2288 return NULL;
2289
2290 spin_lock_irq(&phba->hbalock);
2291 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2292 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2293 if (hbq_buf->tag == tag) {
2294 spin_unlock_irq(&phba->hbalock);
2295 return hbq_buf;
2296 }
2297 }
2298 spin_unlock_irq(&phba->hbalock);
2299 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2300 "1803 Bad hbq tag. Data: x%x x%x\n",
2301 tag, phba->hbqs[tag >> 16].buffer_count);
2302 return NULL;
2303 }
2304
2305 /**
2306 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2307 * @phba: Pointer to HBA context object.
2308 * @hbq_buffer: Pointer to HBQ buffer.
2309 *
2310 * This function is called with hbalock. This function gives back
2311 * the hbq buffer to firmware. If the HBQ does not have space to
2312 * post the buffer, it will free the buffer.
2313 **/
2314 void
lpfc_sli_free_hbq(struct lpfc_hba * phba,struct hbq_dmabuf * hbq_buffer)2315 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2316 {
2317 uint32_t hbqno;
2318
2319 if (hbq_buffer) {
2320 hbqno = hbq_buffer->tag >> 16;
2321 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2322 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2323 }
2324 }
2325
2326 /**
2327 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2328 * @mbxCommand: mailbox command code.
2329 *
2330 * This function is called by the mailbox event handler function to verify
2331 * that the completed mailbox command is a legitimate mailbox command. If the
2332 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2333 * and the mailbox event handler will take the HBA offline.
2334 **/
2335 static int
lpfc_sli_chk_mbx_command(uint8_t mbxCommand)2336 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2337 {
2338 uint8_t ret;
2339
2340 switch (mbxCommand) {
2341 case MBX_LOAD_SM:
2342 case MBX_READ_NV:
2343 case MBX_WRITE_NV:
2344 case MBX_WRITE_VPARMS:
2345 case MBX_RUN_BIU_DIAG:
2346 case MBX_INIT_LINK:
2347 case MBX_DOWN_LINK:
2348 case MBX_CONFIG_LINK:
2349 case MBX_CONFIG_RING:
2350 case MBX_RESET_RING:
2351 case MBX_READ_CONFIG:
2352 case MBX_READ_RCONFIG:
2353 case MBX_READ_SPARM:
2354 case MBX_READ_STATUS:
2355 case MBX_READ_RPI:
2356 case MBX_READ_XRI:
2357 case MBX_READ_REV:
2358 case MBX_READ_LNK_STAT:
2359 case MBX_REG_LOGIN:
2360 case MBX_UNREG_LOGIN:
2361 case MBX_CLEAR_LA:
2362 case MBX_DUMP_MEMORY:
2363 case MBX_DUMP_CONTEXT:
2364 case MBX_RUN_DIAGS:
2365 case MBX_RESTART:
2366 case MBX_UPDATE_CFG:
2367 case MBX_DOWN_LOAD:
2368 case MBX_DEL_LD_ENTRY:
2369 case MBX_RUN_PROGRAM:
2370 case MBX_SET_MASK:
2371 case MBX_SET_VARIABLE:
2372 case MBX_UNREG_D_ID:
2373 case MBX_KILL_BOARD:
2374 case MBX_CONFIG_FARP:
2375 case MBX_BEACON:
2376 case MBX_LOAD_AREA:
2377 case MBX_RUN_BIU_DIAG64:
2378 case MBX_CONFIG_PORT:
2379 case MBX_READ_SPARM64:
2380 case MBX_READ_RPI64:
2381 case MBX_REG_LOGIN64:
2382 case MBX_READ_TOPOLOGY:
2383 case MBX_WRITE_WWN:
2384 case MBX_SET_DEBUG:
2385 case MBX_LOAD_EXP_ROM:
2386 case MBX_ASYNCEVT_ENABLE:
2387 case MBX_REG_VPI:
2388 case MBX_UNREG_VPI:
2389 case MBX_HEARTBEAT:
2390 case MBX_PORT_CAPABILITIES:
2391 case MBX_PORT_IOV_CONTROL:
2392 case MBX_SLI4_CONFIG:
2393 case MBX_SLI4_REQ_FTRS:
2394 case MBX_REG_FCFI:
2395 case MBX_UNREG_FCFI:
2396 case MBX_REG_VFI:
2397 case MBX_UNREG_VFI:
2398 case MBX_INIT_VPI:
2399 case MBX_INIT_VFI:
2400 case MBX_RESUME_RPI:
2401 case MBX_READ_EVENT_LOG_STATUS:
2402 case MBX_READ_EVENT_LOG:
2403 case MBX_SECURITY_MGMT:
2404 case MBX_AUTH_PORT:
2405 case MBX_ACCESS_VDATA:
2406 ret = mbxCommand;
2407 break;
2408 default:
2409 ret = MBX_SHUTDOWN;
2410 break;
2411 }
2412 return ret;
2413 }
2414
2415 /**
2416 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2417 * @phba: Pointer to HBA context object.
2418 * @pmboxq: Pointer to mailbox command.
2419 *
2420 * This is completion handler function for mailbox commands issued from
2421 * lpfc_sli_issue_mbox_wait function. This function is called by the
2422 * mailbox event handler function with no lock held. This function
2423 * will wake up thread waiting on the wait queue pointed by context1
2424 * of the mailbox.
2425 **/
2426 void
lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)2427 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2428 {
2429 unsigned long drvr_flag;
2430 struct completion *pmbox_done;
2431
2432 /*
2433 * If pmbox_done is empty, the driver thread gave up waiting and
2434 * continued running.
2435 */
2436 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2437 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2438 pmbox_done = (struct completion *)pmboxq->context3;
2439 if (pmbox_done)
2440 complete(pmbox_done);
2441 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2442 return;
2443 }
2444
2445 static void
__lpfc_sli_rpi_release(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)2446 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2447 {
2448 unsigned long iflags;
2449
2450 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2451 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2452 spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
2453 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2454 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2455 spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
2456 }
2457 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2458 }
2459
2460 /**
2461 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2462 * @phba: Pointer to HBA context object.
2463 * @pmb: Pointer to mailbox object.
2464 *
2465 * This function is the default mailbox completion handler. It
2466 * frees the memory resources associated with the completed mailbox
2467 * command. If the completed command is a REG_LOGIN mailbox command,
2468 * this function will issue a UREG_LOGIN to re-claim the RPI.
2469 **/
2470 void
lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)2471 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2472 {
2473 struct lpfc_vport *vport = pmb->vport;
2474 struct lpfc_dmabuf *mp;
2475 struct lpfc_nodelist *ndlp;
2476 struct Scsi_Host *shost;
2477 uint16_t rpi, vpi;
2478 int rc;
2479
2480 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2481
2482 if (mp) {
2483 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2484 kfree(mp);
2485 }
2486
2487 /*
2488 * If a REG_LOGIN succeeded after node is destroyed or node
2489 * is in re-discovery driver need to cleanup the RPI.
2490 */
2491 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2492 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2493 !pmb->u.mb.mbxStatus) {
2494 rpi = pmb->u.mb.un.varWords[0];
2495 vpi = pmb->u.mb.un.varRegLogin.vpi;
2496 if (phba->sli_rev == LPFC_SLI_REV4)
2497 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2498 lpfc_unreg_login(phba, vpi, rpi, pmb);
2499 pmb->vport = vport;
2500 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2501 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2502 if (rc != MBX_NOT_FINISHED)
2503 return;
2504 }
2505
2506 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2507 !(phba->pport->load_flag & FC_UNLOADING) &&
2508 !pmb->u.mb.mbxStatus) {
2509 shost = lpfc_shost_from_vport(vport);
2510 spin_lock_irq(shost->host_lock);
2511 vport->vpi_state |= LPFC_VPI_REGISTERED;
2512 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2513 spin_unlock_irq(shost->host_lock);
2514 }
2515
2516 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2517 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2518 lpfc_nlp_put(ndlp);
2519 pmb->ctx_buf = NULL;
2520 pmb->ctx_ndlp = NULL;
2521 }
2522
2523 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2524 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2525
2526 /* Check to see if there are any deferred events to process */
2527 if (ndlp) {
2528 lpfc_printf_vlog(
2529 vport,
2530 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2531 "1438 UNREG cmpl deferred mbox x%x "
2532 "on NPort x%x Data: x%x x%x %px\n",
2533 ndlp->nlp_rpi, ndlp->nlp_DID,
2534 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2535
2536 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2537 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2538 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2539 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2540 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2541 } else {
2542 __lpfc_sli_rpi_release(vport, ndlp);
2543 }
2544 if (vport->load_flag & FC_UNLOADING)
2545 lpfc_nlp_put(ndlp);
2546 pmb->ctx_ndlp = NULL;
2547 }
2548 }
2549
2550 /* Check security permission status on INIT_LINK mailbox command */
2551 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2552 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2553 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2554 "2860 SLI authentication is required "
2555 "for INIT_LINK but has not done yet\n");
2556
2557 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2558 lpfc_sli4_mbox_cmd_free(phba, pmb);
2559 else
2560 mempool_free(pmb, phba->mbox_mem_pool);
2561 }
2562 /**
2563 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2564 * @phba: Pointer to HBA context object.
2565 * @pmb: Pointer to mailbox object.
2566 *
2567 * This function is the unreg rpi mailbox completion handler. It
2568 * frees the memory resources associated with the completed mailbox
2569 * command. An additional refrenece is put on the ndlp to prevent
2570 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2571 * the unreg mailbox command completes, this routine puts the
2572 * reference back.
2573 *
2574 **/
2575 void
lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)2576 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2577 {
2578 struct lpfc_vport *vport = pmb->vport;
2579 struct lpfc_nodelist *ndlp;
2580
2581 ndlp = pmb->ctx_ndlp;
2582 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2583 if (phba->sli_rev == LPFC_SLI_REV4 &&
2584 (bf_get(lpfc_sli_intf_if_type,
2585 &phba->sli4_hba.sli_intf) >=
2586 LPFC_SLI_INTF_IF_TYPE_2)) {
2587 if (ndlp) {
2588 lpfc_printf_vlog(
2589 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2590 "0010 UNREG_LOGIN vpi:%x "
2591 "rpi:%x DID:%x defer x%x flg x%x "
2592 "map:%x %px\n",
2593 vport->vpi, ndlp->nlp_rpi,
2594 ndlp->nlp_DID, ndlp->nlp_defer_did,
2595 ndlp->nlp_flag,
2596 ndlp->nlp_usg_map, ndlp);
2597 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2598 lpfc_nlp_put(ndlp);
2599
2600 /* Check to see if there are any deferred
2601 * events to process
2602 */
2603 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2604 (ndlp->nlp_defer_did !=
2605 NLP_EVT_NOTHING_PENDING)) {
2606 lpfc_printf_vlog(
2607 vport, KERN_INFO, LOG_DISCOVERY,
2608 "4111 UNREG cmpl deferred "
2609 "clr x%x on "
2610 "NPort x%x Data: x%x x%px\n",
2611 ndlp->nlp_rpi, ndlp->nlp_DID,
2612 ndlp->nlp_defer_did, ndlp);
2613 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2614 ndlp->nlp_defer_did =
2615 NLP_EVT_NOTHING_PENDING;
2616 lpfc_issue_els_plogi(
2617 vport, ndlp->nlp_DID, 0);
2618 } else {
2619 __lpfc_sli_rpi_release(vport, ndlp);
2620 }
2621 }
2622 }
2623 }
2624
2625 mempool_free(pmb, phba->mbox_mem_pool);
2626 }
2627
2628 /**
2629 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2630 * @phba: Pointer to HBA context object.
2631 *
2632 * This function is called with no lock held. This function processes all
2633 * the completed mailbox commands and gives it to upper layers. The interrupt
2634 * service routine processes mailbox completion interrupt and adds completed
2635 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2636 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2637 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2638 * function returns the mailbox commands to the upper layer by calling the
2639 * completion handler function of each mailbox.
2640 **/
2641 int
lpfc_sli_handle_mb_event(struct lpfc_hba * phba)2642 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2643 {
2644 MAILBOX_t *pmbox;
2645 LPFC_MBOXQ_t *pmb;
2646 int rc;
2647 LIST_HEAD(cmplq);
2648
2649 phba->sli.slistat.mbox_event++;
2650
2651 /* Get all completed mailboxe buffers into the cmplq */
2652 spin_lock_irq(&phba->hbalock);
2653 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2654 spin_unlock_irq(&phba->hbalock);
2655
2656 /* Get a Mailbox buffer to setup mailbox commands for callback */
2657 do {
2658 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2659 if (pmb == NULL)
2660 break;
2661
2662 pmbox = &pmb->u.mb;
2663
2664 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2665 if (pmb->vport) {
2666 lpfc_debugfs_disc_trc(pmb->vport,
2667 LPFC_DISC_TRC_MBOX_VPORT,
2668 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2669 (uint32_t)pmbox->mbxCommand,
2670 pmbox->un.varWords[0],
2671 pmbox->un.varWords[1]);
2672 }
2673 else {
2674 lpfc_debugfs_disc_trc(phba->pport,
2675 LPFC_DISC_TRC_MBOX,
2676 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2677 (uint32_t)pmbox->mbxCommand,
2678 pmbox->un.varWords[0],
2679 pmbox->un.varWords[1]);
2680 }
2681 }
2682
2683 /*
2684 * It is a fatal error if unknown mbox command completion.
2685 */
2686 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2687 MBX_SHUTDOWN) {
2688 /* Unknown mailbox command compl */
2689 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2690 "(%d):0323 Unknown Mailbox command "
2691 "x%x (x%x/x%x) Cmpl\n",
2692 pmb->vport ? pmb->vport->vpi :
2693 LPFC_VPORT_UNKNOWN,
2694 pmbox->mbxCommand,
2695 lpfc_sli_config_mbox_subsys_get(phba,
2696 pmb),
2697 lpfc_sli_config_mbox_opcode_get(phba,
2698 pmb));
2699 phba->link_state = LPFC_HBA_ERROR;
2700 phba->work_hs = HS_FFER3;
2701 lpfc_handle_eratt(phba);
2702 continue;
2703 }
2704
2705 if (pmbox->mbxStatus) {
2706 phba->sli.slistat.mbox_stat_err++;
2707 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2708 /* Mbox cmd cmpl error - RETRYing */
2709 lpfc_printf_log(phba, KERN_INFO,
2710 LOG_MBOX | LOG_SLI,
2711 "(%d):0305 Mbox cmd cmpl "
2712 "error - RETRYing Data: x%x "
2713 "(x%x/x%x) x%x x%x x%x\n",
2714 pmb->vport ? pmb->vport->vpi :
2715 LPFC_VPORT_UNKNOWN,
2716 pmbox->mbxCommand,
2717 lpfc_sli_config_mbox_subsys_get(phba,
2718 pmb),
2719 lpfc_sli_config_mbox_opcode_get(phba,
2720 pmb),
2721 pmbox->mbxStatus,
2722 pmbox->un.varWords[0],
2723 pmb->vport ? pmb->vport->port_state :
2724 LPFC_VPORT_UNKNOWN);
2725 pmbox->mbxStatus = 0;
2726 pmbox->mbxOwner = OWN_HOST;
2727 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2728 if (rc != MBX_NOT_FINISHED)
2729 continue;
2730 }
2731 }
2732
2733 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2734 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2735 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2736 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2737 "x%x x%x x%x\n",
2738 pmb->vport ? pmb->vport->vpi : 0,
2739 pmbox->mbxCommand,
2740 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2741 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2742 pmb->mbox_cmpl,
2743 *((uint32_t *) pmbox),
2744 pmbox->un.varWords[0],
2745 pmbox->un.varWords[1],
2746 pmbox->un.varWords[2],
2747 pmbox->un.varWords[3],
2748 pmbox->un.varWords[4],
2749 pmbox->un.varWords[5],
2750 pmbox->un.varWords[6],
2751 pmbox->un.varWords[7],
2752 pmbox->un.varWords[8],
2753 pmbox->un.varWords[9],
2754 pmbox->un.varWords[10]);
2755
2756 if (pmb->mbox_cmpl)
2757 pmb->mbox_cmpl(phba,pmb);
2758 } while (1);
2759 return 0;
2760 }
2761
2762 /**
2763 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2764 * @phba: Pointer to HBA context object.
2765 * @pring: Pointer to driver SLI ring object.
2766 * @tag: buffer tag.
2767 *
2768 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2769 * is set in the tag the buffer is posted for a particular exchange,
2770 * the function will return the buffer without replacing the buffer.
2771 * If the buffer is for unsolicited ELS or CT traffic, this function
2772 * returns the buffer and also posts another buffer to the firmware.
2773 **/
2774 static struct lpfc_dmabuf *
lpfc_sli_get_buff(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t tag)2775 lpfc_sli_get_buff(struct lpfc_hba *phba,
2776 struct lpfc_sli_ring *pring,
2777 uint32_t tag)
2778 {
2779 struct hbq_dmabuf *hbq_entry;
2780
2781 if (tag & QUE_BUFTAG_BIT)
2782 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2783 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2784 if (!hbq_entry)
2785 return NULL;
2786 return &hbq_entry->dbuf;
2787 }
2788
2789 /**
2790 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
2791 * containing a NVME LS request.
2792 * @phba: pointer to lpfc hba data structure.
2793 * @piocb: pointer to the iocbq struct representing the sequence starting
2794 * frame.
2795 *
2796 * This routine initially validates the NVME LS, validates there is a login
2797 * with the port that sent the LS, and then calls the appropriate nvme host
2798 * or target LS request handler.
2799 **/
2800 static void
lpfc_nvme_unsol_ls_handler(struct lpfc_hba * phba,struct lpfc_iocbq * piocb)2801 lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
2802 {
2803 struct lpfc_nodelist *ndlp;
2804 struct lpfc_dmabuf *d_buf;
2805 struct hbq_dmabuf *nvmebuf;
2806 struct fc_frame_header *fc_hdr;
2807 struct lpfc_async_xchg_ctx *axchg = NULL;
2808 char *failwhy = NULL;
2809 uint32_t oxid, sid, did, fctl, size;
2810 int ret = 1;
2811
2812 d_buf = piocb->context2;
2813
2814 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2815 fc_hdr = nvmebuf->hbuf.virt;
2816 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2817 sid = sli4_sid_from_fc_hdr(fc_hdr);
2818 did = sli4_did_from_fc_hdr(fc_hdr);
2819 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
2820 fc_hdr->fh_f_ctl[1] << 8 |
2821 fc_hdr->fh_f_ctl[2]);
2822 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
2823
2824 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
2825 oxid, size, sid);
2826
2827 if (phba->pport->load_flag & FC_UNLOADING) {
2828 failwhy = "Driver Unloading";
2829 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
2830 failwhy = "NVME FC4 Disabled";
2831 } else if (!phba->nvmet_support && !phba->pport->localport) {
2832 failwhy = "No Localport";
2833 } else if (phba->nvmet_support && !phba->targetport) {
2834 failwhy = "No Targetport";
2835 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
2836 failwhy = "Bad NVME LS R_CTL";
2837 } else if (unlikely((fctl & 0x00FF0000) !=
2838 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
2839 failwhy = "Bad NVME LS F_CTL";
2840 } else {
2841 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
2842 if (!axchg)
2843 failwhy = "No CTX memory";
2844 }
2845
2846 if (unlikely(failwhy)) {
2847 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2848 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
2849 sid, oxid, failwhy);
2850 goto out_fail;
2851 }
2852
2853 /* validate the source of the LS is logged in */
2854 ndlp = lpfc_findnode_did(phba->pport, sid);
2855 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2856 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2857 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2858 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2859 "6216 NVME Unsol rcv: No ndlp: "
2860 "NPort_ID x%x oxid x%x\n",
2861 sid, oxid);
2862 goto out_fail;
2863 }
2864
2865 axchg->phba = phba;
2866 axchg->ndlp = ndlp;
2867 axchg->size = size;
2868 axchg->oxid = oxid;
2869 axchg->sid = sid;
2870 axchg->wqeq = NULL;
2871 axchg->state = LPFC_NVME_STE_LS_RCV;
2872 axchg->entry_cnt = 1;
2873 axchg->rqb_buffer = (void *)nvmebuf;
2874 axchg->hdwq = &phba->sli4_hba.hdwq[0];
2875 axchg->payload = nvmebuf->dbuf.virt;
2876 INIT_LIST_HEAD(&axchg->list);
2877
2878 if (phba->nvmet_support)
2879 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
2880 else
2881 ret = lpfc_nvme_handle_lsreq(phba, axchg);
2882
2883 /* if zero, LS was successfully handled. If non-zero, LS not handled */
2884 if (!ret)
2885 return;
2886
2887 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2888 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
2889 "NVMe%s handler failed %d\n",
2890 did, sid, oxid,
2891 (phba->nvmet_support) ? "T" : "I", ret);
2892
2893 out_fail:
2894
2895 /* recycle receive buffer */
2896 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2897
2898 /* If start of new exchange, abort it */
2899 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
2900 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
2901
2902 if (ret)
2903 kfree(axchg);
2904 }
2905
2906 /**
2907 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2908 * @phba: Pointer to HBA context object.
2909 * @pring: Pointer to driver SLI ring object.
2910 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2911 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2912 * @fch_type: the type for the first frame of the sequence.
2913 *
2914 * This function is called with no lock held. This function uses the r_ctl and
2915 * type of the received sequence to find the correct callback function to call
2916 * to process the sequence.
2917 **/
2918 static int
lpfc_complete_unsol_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * saveq,uint32_t fch_r_ctl,uint32_t fch_type)2919 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2920 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2921 uint32_t fch_type)
2922 {
2923 int i;
2924
2925 switch (fch_type) {
2926 case FC_TYPE_NVME:
2927 lpfc_nvme_unsol_ls_handler(phba, saveq);
2928 return 1;
2929 default:
2930 break;
2931 }
2932
2933 /* unSolicited Responses */
2934 if (pring->prt[0].profile) {
2935 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2936 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2937 saveq);
2938 return 1;
2939 }
2940 /* We must search, based on rctl / type
2941 for the right routine */
2942 for (i = 0; i < pring->num_mask; i++) {
2943 if ((pring->prt[i].rctl == fch_r_ctl) &&
2944 (pring->prt[i].type == fch_type)) {
2945 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2946 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2947 (phba, pring, saveq);
2948 return 1;
2949 }
2950 }
2951 return 0;
2952 }
2953
2954 /**
2955 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2956 * @phba: Pointer to HBA context object.
2957 * @pring: Pointer to driver SLI ring object.
2958 * @saveq: Pointer to the unsolicited iocb.
2959 *
2960 * This function is called with no lock held by the ring event handler
2961 * when there is an unsolicited iocb posted to the response ring by the
2962 * firmware. This function gets the buffer associated with the iocbs
2963 * and calls the event handler for the ring. This function handles both
2964 * qring buffers and hbq buffers.
2965 * When the function returns 1 the caller can free the iocb object otherwise
2966 * upper layer functions will free the iocb objects.
2967 **/
2968 static int
lpfc_sli_process_unsol_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * saveq)2969 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2970 struct lpfc_iocbq *saveq)
2971 {
2972 IOCB_t * irsp;
2973 WORD5 * w5p;
2974 uint32_t Rctl, Type;
2975 struct lpfc_iocbq *iocbq;
2976 struct lpfc_dmabuf *dmzbuf;
2977
2978 irsp = &(saveq->iocb);
2979
2980 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2981 if (pring->lpfc_sli_rcv_async_status)
2982 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2983 else
2984 lpfc_printf_log(phba,
2985 KERN_WARNING,
2986 LOG_SLI,
2987 "0316 Ring %d handler: unexpected "
2988 "ASYNC_STATUS iocb received evt_code "
2989 "0x%x\n",
2990 pring->ringno,
2991 irsp->un.asyncstat.evt_code);
2992 return 1;
2993 }
2994
2995 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2996 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2997 if (irsp->ulpBdeCount > 0) {
2998 dmzbuf = lpfc_sli_get_buff(phba, pring,
2999 irsp->un.ulpWord[3]);
3000 lpfc_in_buf_free(phba, dmzbuf);
3001 }
3002
3003 if (irsp->ulpBdeCount > 1) {
3004 dmzbuf = lpfc_sli_get_buff(phba, pring,
3005 irsp->unsli3.sli3Words[3]);
3006 lpfc_in_buf_free(phba, dmzbuf);
3007 }
3008
3009 if (irsp->ulpBdeCount > 2) {
3010 dmzbuf = lpfc_sli_get_buff(phba, pring,
3011 irsp->unsli3.sli3Words[7]);
3012 lpfc_in_buf_free(phba, dmzbuf);
3013 }
3014
3015 return 1;
3016 }
3017
3018 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3019 if (irsp->ulpBdeCount != 0) {
3020 saveq->context2 = lpfc_sli_get_buff(phba, pring,
3021 irsp->un.ulpWord[3]);
3022 if (!saveq->context2)
3023 lpfc_printf_log(phba,
3024 KERN_ERR,
3025 LOG_SLI,
3026 "0341 Ring %d Cannot find buffer for "
3027 "an unsolicited iocb. tag 0x%x\n",
3028 pring->ringno,
3029 irsp->un.ulpWord[3]);
3030 }
3031 if (irsp->ulpBdeCount == 2) {
3032 saveq->context3 = lpfc_sli_get_buff(phba, pring,
3033 irsp->unsli3.sli3Words[7]);
3034 if (!saveq->context3)
3035 lpfc_printf_log(phba,
3036 KERN_ERR,
3037 LOG_SLI,
3038 "0342 Ring %d Cannot find buffer for an"
3039 " unsolicited iocb. tag 0x%x\n",
3040 pring->ringno,
3041 irsp->unsli3.sli3Words[7]);
3042 }
3043 list_for_each_entry(iocbq, &saveq->list, list) {
3044 irsp = &(iocbq->iocb);
3045 if (irsp->ulpBdeCount != 0) {
3046 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
3047 irsp->un.ulpWord[3]);
3048 if (!iocbq->context2)
3049 lpfc_printf_log(phba,
3050 KERN_ERR,
3051 LOG_SLI,
3052 "0343 Ring %d Cannot find "
3053 "buffer for an unsolicited iocb"
3054 ". tag 0x%x\n", pring->ringno,
3055 irsp->un.ulpWord[3]);
3056 }
3057 if (irsp->ulpBdeCount == 2) {
3058 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
3059 irsp->unsli3.sli3Words[7]);
3060 if (!iocbq->context3)
3061 lpfc_printf_log(phba,
3062 KERN_ERR,
3063 LOG_SLI,
3064 "0344 Ring %d Cannot find "
3065 "buffer for an unsolicited "
3066 "iocb. tag 0x%x\n",
3067 pring->ringno,
3068 irsp->unsli3.sli3Words[7]);
3069 }
3070 }
3071 }
3072 if (irsp->ulpBdeCount != 0 &&
3073 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3074 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3075 int found = 0;
3076
3077 /* search continue save q for same XRI */
3078 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3079 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3080 saveq->iocb.unsli3.rcvsli3.ox_id) {
3081 list_add_tail(&saveq->list, &iocbq->list);
3082 found = 1;
3083 break;
3084 }
3085 }
3086 if (!found)
3087 list_add_tail(&saveq->clist,
3088 &pring->iocb_continue_saveq);
3089 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3090 list_del_init(&iocbq->clist);
3091 saveq = iocbq;
3092 irsp = &(saveq->iocb);
3093 } else
3094 return 0;
3095 }
3096 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3097 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3098 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3099 Rctl = FC_RCTL_ELS_REQ;
3100 Type = FC_TYPE_ELS;
3101 } else {
3102 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3103 Rctl = w5p->hcsw.Rctl;
3104 Type = w5p->hcsw.Type;
3105
3106 /* Firmware Workaround */
3107 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3108 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3109 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3110 Rctl = FC_RCTL_ELS_REQ;
3111 Type = FC_TYPE_ELS;
3112 w5p->hcsw.Rctl = Rctl;
3113 w5p->hcsw.Type = Type;
3114 }
3115 }
3116
3117 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3118 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3119 "0313 Ring %d handler: unexpected Rctl x%x "
3120 "Type x%x received\n",
3121 pring->ringno, Rctl, Type);
3122
3123 return 1;
3124 }
3125
3126 /**
3127 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3128 * @phba: Pointer to HBA context object.
3129 * @pring: Pointer to driver SLI ring object.
3130 * @prspiocb: Pointer to response iocb object.
3131 *
3132 * This function looks up the iocb_lookup table to get the command iocb
3133 * corresponding to the given response iocb using the iotag of the
3134 * response iocb. The driver calls this function with the hbalock held
3135 * for SLI3 ports or the ring lock held for SLI4 ports.
3136 * This function returns the command iocb object if it finds the command
3137 * iocb else returns NULL.
3138 **/
3139 static struct lpfc_iocbq *
lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * prspiocb)3140 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3141 struct lpfc_sli_ring *pring,
3142 struct lpfc_iocbq *prspiocb)
3143 {
3144 struct lpfc_iocbq *cmd_iocb = NULL;
3145 uint16_t iotag;
3146 spinlock_t *temp_lock = NULL;
3147 unsigned long iflag = 0;
3148
3149 if (phba->sli_rev == LPFC_SLI_REV4)
3150 temp_lock = &pring->ring_lock;
3151 else
3152 temp_lock = &phba->hbalock;
3153
3154 spin_lock_irqsave(temp_lock, iflag);
3155 iotag = prspiocb->iocb.ulpIoTag;
3156
3157 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3158 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3159 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3160 /* remove from txcmpl queue list */
3161 list_del_init(&cmd_iocb->list);
3162 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3163 pring->txcmplq_cnt--;
3164 spin_unlock_irqrestore(temp_lock, iflag);
3165 return cmd_iocb;
3166 }
3167 }
3168
3169 spin_unlock_irqrestore(temp_lock, iflag);
3170 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3171 "0317 iotag x%x is out of "
3172 "range: max iotag x%x wd0 x%x\n",
3173 iotag, phba->sli.last_iotag,
3174 *(((uint32_t *) &prspiocb->iocb) + 7));
3175 return NULL;
3176 }
3177
3178 /**
3179 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3180 * @phba: Pointer to HBA context object.
3181 * @pring: Pointer to driver SLI ring object.
3182 * @iotag: IOCB tag.
3183 *
3184 * This function looks up the iocb_lookup table to get the command iocb
3185 * corresponding to the given iotag. The driver calls this function with
3186 * the ring lock held because this function is an SLI4 port only helper.
3187 * This function returns the command iocb object if it finds the command
3188 * iocb else returns NULL.
3189 **/
3190 static struct lpfc_iocbq *
lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint16_t iotag)3191 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3192 struct lpfc_sli_ring *pring, uint16_t iotag)
3193 {
3194 struct lpfc_iocbq *cmd_iocb = NULL;
3195 spinlock_t *temp_lock = NULL;
3196 unsigned long iflag = 0;
3197
3198 if (phba->sli_rev == LPFC_SLI_REV4)
3199 temp_lock = &pring->ring_lock;
3200 else
3201 temp_lock = &phba->hbalock;
3202
3203 spin_lock_irqsave(temp_lock, iflag);
3204 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3205 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3206 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3207 /* remove from txcmpl queue list */
3208 list_del_init(&cmd_iocb->list);
3209 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3210 pring->txcmplq_cnt--;
3211 spin_unlock_irqrestore(temp_lock, iflag);
3212 return cmd_iocb;
3213 }
3214 }
3215
3216 spin_unlock_irqrestore(temp_lock, iflag);
3217 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3218 "0372 iotag x%x lookup error: max iotag (x%x) "
3219 "iocb_flag x%x\n",
3220 iotag, phba->sli.last_iotag,
3221 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3222 return NULL;
3223 }
3224
3225 /**
3226 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3227 * @phba: Pointer to HBA context object.
3228 * @pring: Pointer to driver SLI ring object.
3229 * @saveq: Pointer to the response iocb to be processed.
3230 *
3231 * This function is called by the ring event handler for non-fcp
3232 * rings when there is a new response iocb in the response ring.
3233 * The caller is not required to hold any locks. This function
3234 * gets the command iocb associated with the response iocb and
3235 * calls the completion handler for the command iocb. If there
3236 * is no completion handler, the function will free the resources
3237 * associated with command iocb. If the response iocb is for
3238 * an already aborted command iocb, the status of the completion
3239 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3240 * This function always returns 1.
3241 **/
3242 static int
lpfc_sli_process_sol_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * saveq)3243 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3244 struct lpfc_iocbq *saveq)
3245 {
3246 struct lpfc_iocbq *cmdiocbp;
3247 int rc = 1;
3248 unsigned long iflag;
3249
3250 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3251 if (cmdiocbp) {
3252 if (cmdiocbp->iocb_cmpl) {
3253 /*
3254 * If an ELS command failed send an event to mgmt
3255 * application.
3256 */
3257 if (saveq->iocb.ulpStatus &&
3258 (pring->ringno == LPFC_ELS_RING) &&
3259 (cmdiocbp->iocb.ulpCommand ==
3260 CMD_ELS_REQUEST64_CR))
3261 lpfc_send_els_failure_event(phba,
3262 cmdiocbp, saveq);
3263
3264 /*
3265 * Post all ELS completions to the worker thread.
3266 * All other are passed to the completion callback.
3267 */
3268 if (pring->ringno == LPFC_ELS_RING) {
3269 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3270 (cmdiocbp->iocb_flag &
3271 LPFC_DRIVER_ABORTED)) {
3272 spin_lock_irqsave(&phba->hbalock,
3273 iflag);
3274 cmdiocbp->iocb_flag &=
3275 ~LPFC_DRIVER_ABORTED;
3276 spin_unlock_irqrestore(&phba->hbalock,
3277 iflag);
3278 saveq->iocb.ulpStatus =
3279 IOSTAT_LOCAL_REJECT;
3280 saveq->iocb.un.ulpWord[4] =
3281 IOERR_SLI_ABORTED;
3282
3283 /* Firmware could still be in progress
3284 * of DMAing payload, so don't free data
3285 * buffer till after a hbeat.
3286 */
3287 spin_lock_irqsave(&phba->hbalock,
3288 iflag);
3289 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3290 spin_unlock_irqrestore(&phba->hbalock,
3291 iflag);
3292 }
3293 if (phba->sli_rev == LPFC_SLI_REV4) {
3294 if (saveq->iocb_flag &
3295 LPFC_EXCHANGE_BUSY) {
3296 /* Set cmdiocb flag for the
3297 * exchange busy so sgl (xri)
3298 * will not be released until
3299 * the abort xri is received
3300 * from hba.
3301 */
3302 spin_lock_irqsave(
3303 &phba->hbalock, iflag);
3304 cmdiocbp->iocb_flag |=
3305 LPFC_EXCHANGE_BUSY;
3306 spin_unlock_irqrestore(
3307 &phba->hbalock, iflag);
3308 }
3309 if (cmdiocbp->iocb_flag &
3310 LPFC_DRIVER_ABORTED) {
3311 /*
3312 * Clear LPFC_DRIVER_ABORTED
3313 * bit in case it was driver
3314 * initiated abort.
3315 */
3316 spin_lock_irqsave(
3317 &phba->hbalock, iflag);
3318 cmdiocbp->iocb_flag &=
3319 ~LPFC_DRIVER_ABORTED;
3320 spin_unlock_irqrestore(
3321 &phba->hbalock, iflag);
3322 cmdiocbp->iocb.ulpStatus =
3323 IOSTAT_LOCAL_REJECT;
3324 cmdiocbp->iocb.un.ulpWord[4] =
3325 IOERR_ABORT_REQUESTED;
3326 /*
3327 * For SLI4, irsiocb contains
3328 * NO_XRI in sli_xritag, it
3329 * shall not affect releasing
3330 * sgl (xri) process.
3331 */
3332 saveq->iocb.ulpStatus =
3333 IOSTAT_LOCAL_REJECT;
3334 saveq->iocb.un.ulpWord[4] =
3335 IOERR_SLI_ABORTED;
3336 spin_lock_irqsave(
3337 &phba->hbalock, iflag);
3338 saveq->iocb_flag |=
3339 LPFC_DELAY_MEM_FREE;
3340 spin_unlock_irqrestore(
3341 &phba->hbalock, iflag);
3342 }
3343 }
3344 }
3345 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3346 } else
3347 lpfc_sli_release_iocbq(phba, cmdiocbp);
3348 } else {
3349 /*
3350 * Unknown initiating command based on the response iotag.
3351 * This could be the case on the ELS ring because of
3352 * lpfc_els_abort().
3353 */
3354 if (pring->ringno != LPFC_ELS_RING) {
3355 /*
3356 * Ring <ringno> handler: unexpected completion IoTag
3357 * <IoTag>
3358 */
3359 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3360 "0322 Ring %d handler: "
3361 "unexpected completion IoTag x%x "
3362 "Data: x%x x%x x%x x%x\n",
3363 pring->ringno,
3364 saveq->iocb.ulpIoTag,
3365 saveq->iocb.ulpStatus,
3366 saveq->iocb.un.ulpWord[4],
3367 saveq->iocb.ulpCommand,
3368 saveq->iocb.ulpContext);
3369 }
3370 }
3371
3372 return rc;
3373 }
3374
3375 /**
3376 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3377 * @phba: Pointer to HBA context object.
3378 * @pring: Pointer to driver SLI ring object.
3379 *
3380 * This function is called from the iocb ring event handlers when
3381 * put pointer is ahead of the get pointer for a ring. This function signal
3382 * an error attention condition to the worker thread and the worker
3383 * thread will transition the HBA to offline state.
3384 **/
3385 static void
lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)3386 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3387 {
3388 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3389 /*
3390 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3391 * rsp ring <portRspMax>
3392 */
3393 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3394 "0312 Ring %d handler: portRspPut %d "
3395 "is bigger than rsp ring %d\n",
3396 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3397 pring->sli.sli3.numRiocb);
3398
3399 phba->link_state = LPFC_HBA_ERROR;
3400
3401 /*
3402 * All error attention handlers are posted to
3403 * worker thread
3404 */
3405 phba->work_ha |= HA_ERATT;
3406 phba->work_hs = HS_FFER3;
3407
3408 lpfc_worker_wake_up(phba);
3409
3410 return;
3411 }
3412
3413 /**
3414 * lpfc_poll_eratt - Error attention polling timer timeout handler
3415 * @t: Context to fetch pointer to address of HBA context object from.
3416 *
3417 * This function is invoked by the Error Attention polling timer when the
3418 * timer times out. It will check the SLI Error Attention register for
3419 * possible attention events. If so, it will post an Error Attention event
3420 * and wake up worker thread to process it. Otherwise, it will set up the
3421 * Error Attention polling timer for the next poll.
3422 **/
lpfc_poll_eratt(struct timer_list * t)3423 void lpfc_poll_eratt(struct timer_list *t)
3424 {
3425 struct lpfc_hba *phba;
3426 uint32_t eratt = 0;
3427 uint64_t sli_intr, cnt;
3428
3429 phba = from_timer(phba, t, eratt_poll);
3430
3431 /* Here we will also keep track of interrupts per sec of the hba */
3432 sli_intr = phba->sli.slistat.sli_intr;
3433
3434 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3435 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3436 sli_intr);
3437 else
3438 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3439
3440 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3441 do_div(cnt, phba->eratt_poll_interval);
3442 phba->sli.slistat.sli_ips = cnt;
3443
3444 phba->sli.slistat.sli_prev_intr = sli_intr;
3445
3446 /* Check chip HA register for error event */
3447 eratt = lpfc_sli_check_eratt(phba);
3448
3449 if (eratt)
3450 /* Tell the worker thread there is work to do */
3451 lpfc_worker_wake_up(phba);
3452 else
3453 /* Restart the timer for next eratt poll */
3454 mod_timer(&phba->eratt_poll,
3455 jiffies +
3456 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3457 return;
3458 }
3459
3460
3461 /**
3462 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3463 * @phba: Pointer to HBA context object.
3464 * @pring: Pointer to driver SLI ring object.
3465 * @mask: Host attention register mask for this ring.
3466 *
3467 * This function is called from the interrupt context when there is a ring
3468 * event for the fcp ring. The caller does not hold any lock.
3469 * The function processes each response iocb in the response ring until it
3470 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3471 * LE bit set. The function will call the completion handler of the command iocb
3472 * if the response iocb indicates a completion for a command iocb or it is
3473 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3474 * function if this is an unsolicited iocb.
3475 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3476 * to check it explicitly.
3477 */
3478 int
lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)3479 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3480 struct lpfc_sli_ring *pring, uint32_t mask)
3481 {
3482 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3483 IOCB_t *irsp = NULL;
3484 IOCB_t *entry = NULL;
3485 struct lpfc_iocbq *cmdiocbq = NULL;
3486 struct lpfc_iocbq rspiocbq;
3487 uint32_t status;
3488 uint32_t portRspPut, portRspMax;
3489 int rc = 1;
3490 lpfc_iocb_type type;
3491 unsigned long iflag;
3492 uint32_t rsp_cmpl = 0;
3493
3494 spin_lock_irqsave(&phba->hbalock, iflag);
3495 pring->stats.iocb_event++;
3496
3497 /*
3498 * The next available response entry should never exceed the maximum
3499 * entries. If it does, treat it as an adapter hardware error.
3500 */
3501 portRspMax = pring->sli.sli3.numRiocb;
3502 portRspPut = le32_to_cpu(pgp->rspPutInx);
3503 if (unlikely(portRspPut >= portRspMax)) {
3504 lpfc_sli_rsp_pointers_error(phba, pring);
3505 spin_unlock_irqrestore(&phba->hbalock, iflag);
3506 return 1;
3507 }
3508 if (phba->fcp_ring_in_use) {
3509 spin_unlock_irqrestore(&phba->hbalock, iflag);
3510 return 1;
3511 } else
3512 phba->fcp_ring_in_use = 1;
3513
3514 rmb();
3515 while (pring->sli.sli3.rspidx != portRspPut) {
3516 /*
3517 * Fetch an entry off the ring and copy it into a local data
3518 * structure. The copy involves a byte-swap since the
3519 * network byte order and pci byte orders are different.
3520 */
3521 entry = lpfc_resp_iocb(phba, pring);
3522 phba->last_completion_time = jiffies;
3523
3524 if (++pring->sli.sli3.rspidx >= portRspMax)
3525 pring->sli.sli3.rspidx = 0;
3526
3527 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3528 (uint32_t *) &rspiocbq.iocb,
3529 phba->iocb_rsp_size);
3530 INIT_LIST_HEAD(&(rspiocbq.list));
3531 irsp = &rspiocbq.iocb;
3532
3533 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3534 pring->stats.iocb_rsp++;
3535 rsp_cmpl++;
3536
3537 if (unlikely(irsp->ulpStatus)) {
3538 /*
3539 * If resource errors reported from HBA, reduce
3540 * queuedepths of the SCSI device.
3541 */
3542 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3543 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3544 IOERR_NO_RESOURCES)) {
3545 spin_unlock_irqrestore(&phba->hbalock, iflag);
3546 phba->lpfc_rampdown_queue_depth(phba);
3547 spin_lock_irqsave(&phba->hbalock, iflag);
3548 }
3549
3550 /* Rsp ring <ringno> error: IOCB */
3551 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3552 "0336 Rsp Ring %d error: IOCB Data: "
3553 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3554 pring->ringno,
3555 irsp->un.ulpWord[0],
3556 irsp->un.ulpWord[1],
3557 irsp->un.ulpWord[2],
3558 irsp->un.ulpWord[3],
3559 irsp->un.ulpWord[4],
3560 irsp->un.ulpWord[5],
3561 *(uint32_t *)&irsp->un1,
3562 *((uint32_t *)&irsp->un1 + 1));
3563 }
3564
3565 switch (type) {
3566 case LPFC_ABORT_IOCB:
3567 case LPFC_SOL_IOCB:
3568 /*
3569 * Idle exchange closed via ABTS from port. No iocb
3570 * resources need to be recovered.
3571 */
3572 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3573 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3574 "0333 IOCB cmd 0x%x"
3575 " processed. Skipping"
3576 " completion\n",
3577 irsp->ulpCommand);
3578 break;
3579 }
3580
3581 spin_unlock_irqrestore(&phba->hbalock, iflag);
3582 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3583 &rspiocbq);
3584 spin_lock_irqsave(&phba->hbalock, iflag);
3585 if (unlikely(!cmdiocbq))
3586 break;
3587 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3588 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3589 if (cmdiocbq->iocb_cmpl) {
3590 spin_unlock_irqrestore(&phba->hbalock, iflag);
3591 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3592 &rspiocbq);
3593 spin_lock_irqsave(&phba->hbalock, iflag);
3594 }
3595 break;
3596 case LPFC_UNSOL_IOCB:
3597 spin_unlock_irqrestore(&phba->hbalock, iflag);
3598 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3599 spin_lock_irqsave(&phba->hbalock, iflag);
3600 break;
3601 default:
3602 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3603 char adaptermsg[LPFC_MAX_ADPTMSG];
3604 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3605 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3606 MAX_MSG_DATA);
3607 dev_warn(&((phba->pcidev)->dev),
3608 "lpfc%d: %s\n",
3609 phba->brd_no, adaptermsg);
3610 } else {
3611 /* Unknown IOCB command */
3612 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3613 "0334 Unknown IOCB command "
3614 "Data: x%x, x%x x%x x%x x%x\n",
3615 type, irsp->ulpCommand,
3616 irsp->ulpStatus,
3617 irsp->ulpIoTag,
3618 irsp->ulpContext);
3619 }
3620 break;
3621 }
3622
3623 /*
3624 * The response IOCB has been processed. Update the ring
3625 * pointer in SLIM. If the port response put pointer has not
3626 * been updated, sync the pgp->rspPutInx and fetch the new port
3627 * response put pointer.
3628 */
3629 writel(pring->sli.sli3.rspidx,
3630 &phba->host_gp[pring->ringno].rspGetInx);
3631
3632 if (pring->sli.sli3.rspidx == portRspPut)
3633 portRspPut = le32_to_cpu(pgp->rspPutInx);
3634 }
3635
3636 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3637 pring->stats.iocb_rsp_full++;
3638 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3639 writel(status, phba->CAregaddr);
3640 readl(phba->CAregaddr);
3641 }
3642 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3643 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3644 pring->stats.iocb_cmd_empty++;
3645
3646 /* Force update of the local copy of cmdGetInx */
3647 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3648 lpfc_sli_resume_iocb(phba, pring);
3649
3650 if ((pring->lpfc_sli_cmd_available))
3651 (pring->lpfc_sli_cmd_available) (phba, pring);
3652
3653 }
3654
3655 phba->fcp_ring_in_use = 0;
3656 spin_unlock_irqrestore(&phba->hbalock, iflag);
3657 return rc;
3658 }
3659
3660 /**
3661 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3662 * @phba: Pointer to HBA context object.
3663 * @pring: Pointer to driver SLI ring object.
3664 * @rspiocbp: Pointer to driver response IOCB object.
3665 *
3666 * This function is called from the worker thread when there is a slow-path
3667 * response IOCB to process. This function chains all the response iocbs until
3668 * seeing the iocb with the LE bit set. The function will call
3669 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3670 * completion of a command iocb. The function will call the
3671 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3672 * The function frees the resources or calls the completion handler if this
3673 * iocb is an abort completion. The function returns NULL when the response
3674 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3675 * this function shall chain the iocb on to the iocb_continueq and return the
3676 * response iocb passed in.
3677 **/
3678 static struct lpfc_iocbq *
lpfc_sli_sp_handle_rspiocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * rspiocbp)3679 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3680 struct lpfc_iocbq *rspiocbp)
3681 {
3682 struct lpfc_iocbq *saveq;
3683 struct lpfc_iocbq *cmdiocbp;
3684 struct lpfc_iocbq *next_iocb;
3685 IOCB_t *irsp = NULL;
3686 uint32_t free_saveq;
3687 uint8_t iocb_cmd_type;
3688 lpfc_iocb_type type;
3689 unsigned long iflag;
3690 int rc;
3691
3692 spin_lock_irqsave(&phba->hbalock, iflag);
3693 /* First add the response iocb to the countinueq list */
3694 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3695 pring->iocb_continueq_cnt++;
3696
3697 /* Now, determine whether the list is completed for processing */
3698 irsp = &rspiocbp->iocb;
3699 if (irsp->ulpLe) {
3700 /*
3701 * By default, the driver expects to free all resources
3702 * associated with this iocb completion.
3703 */
3704 free_saveq = 1;
3705 saveq = list_get_first(&pring->iocb_continueq,
3706 struct lpfc_iocbq, list);
3707 irsp = &(saveq->iocb);
3708 list_del_init(&pring->iocb_continueq);
3709 pring->iocb_continueq_cnt = 0;
3710
3711 pring->stats.iocb_rsp++;
3712
3713 /*
3714 * If resource errors reported from HBA, reduce
3715 * queuedepths of the SCSI device.
3716 */
3717 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3718 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3719 IOERR_NO_RESOURCES)) {
3720 spin_unlock_irqrestore(&phba->hbalock, iflag);
3721 phba->lpfc_rampdown_queue_depth(phba);
3722 spin_lock_irqsave(&phba->hbalock, iflag);
3723 }
3724
3725 if (irsp->ulpStatus) {
3726 /* Rsp ring <ringno> error: IOCB */
3727 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3728 "0328 Rsp Ring %d error: "
3729 "IOCB Data: "
3730 "x%x x%x x%x x%x "
3731 "x%x x%x x%x x%x "
3732 "x%x x%x x%x x%x "
3733 "x%x x%x x%x x%x\n",
3734 pring->ringno,
3735 irsp->un.ulpWord[0],
3736 irsp->un.ulpWord[1],
3737 irsp->un.ulpWord[2],
3738 irsp->un.ulpWord[3],
3739 irsp->un.ulpWord[4],
3740 irsp->un.ulpWord[5],
3741 *(((uint32_t *) irsp) + 6),
3742 *(((uint32_t *) irsp) + 7),
3743 *(((uint32_t *) irsp) + 8),
3744 *(((uint32_t *) irsp) + 9),
3745 *(((uint32_t *) irsp) + 10),
3746 *(((uint32_t *) irsp) + 11),
3747 *(((uint32_t *) irsp) + 12),
3748 *(((uint32_t *) irsp) + 13),
3749 *(((uint32_t *) irsp) + 14),
3750 *(((uint32_t *) irsp) + 15));
3751 }
3752
3753 /*
3754 * Fetch the IOCB command type and call the correct completion
3755 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3756 * get freed back to the lpfc_iocb_list by the discovery
3757 * kernel thread.
3758 */
3759 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3760 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3761 switch (type) {
3762 case LPFC_SOL_IOCB:
3763 spin_unlock_irqrestore(&phba->hbalock, iflag);
3764 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3765 spin_lock_irqsave(&phba->hbalock, iflag);
3766 break;
3767
3768 case LPFC_UNSOL_IOCB:
3769 spin_unlock_irqrestore(&phba->hbalock, iflag);
3770 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3771 spin_lock_irqsave(&phba->hbalock, iflag);
3772 if (!rc)
3773 free_saveq = 0;
3774 break;
3775
3776 case LPFC_ABORT_IOCB:
3777 cmdiocbp = NULL;
3778 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3779 spin_unlock_irqrestore(&phba->hbalock, iflag);
3780 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3781 saveq);
3782 spin_lock_irqsave(&phba->hbalock, iflag);
3783 }
3784 if (cmdiocbp) {
3785 /* Call the specified completion routine */
3786 if (cmdiocbp->iocb_cmpl) {
3787 spin_unlock_irqrestore(&phba->hbalock,
3788 iflag);
3789 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3790 saveq);
3791 spin_lock_irqsave(&phba->hbalock,
3792 iflag);
3793 } else
3794 __lpfc_sli_release_iocbq(phba,
3795 cmdiocbp);
3796 }
3797 break;
3798
3799 case LPFC_UNKNOWN_IOCB:
3800 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3801 char adaptermsg[LPFC_MAX_ADPTMSG];
3802 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3803 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3804 MAX_MSG_DATA);
3805 dev_warn(&((phba->pcidev)->dev),
3806 "lpfc%d: %s\n",
3807 phba->brd_no, adaptermsg);
3808 } else {
3809 /* Unknown IOCB command */
3810 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3811 "0335 Unknown IOCB "
3812 "command Data: x%x "
3813 "x%x x%x x%x\n",
3814 irsp->ulpCommand,
3815 irsp->ulpStatus,
3816 irsp->ulpIoTag,
3817 irsp->ulpContext);
3818 }
3819 break;
3820 }
3821
3822 if (free_saveq) {
3823 list_for_each_entry_safe(rspiocbp, next_iocb,
3824 &saveq->list, list) {
3825 list_del_init(&rspiocbp->list);
3826 __lpfc_sli_release_iocbq(phba, rspiocbp);
3827 }
3828 __lpfc_sli_release_iocbq(phba, saveq);
3829 }
3830 rspiocbp = NULL;
3831 }
3832 spin_unlock_irqrestore(&phba->hbalock, iflag);
3833 return rspiocbp;
3834 }
3835
3836 /**
3837 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3838 * @phba: Pointer to HBA context object.
3839 * @pring: Pointer to driver SLI ring object.
3840 * @mask: Host attention register mask for this ring.
3841 *
3842 * This routine wraps the actual slow_ring event process routine from the
3843 * API jump table function pointer from the lpfc_hba struct.
3844 **/
3845 void
lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)3846 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3847 struct lpfc_sli_ring *pring, uint32_t mask)
3848 {
3849 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3850 }
3851
3852 /**
3853 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3854 * @phba: Pointer to HBA context object.
3855 * @pring: Pointer to driver SLI ring object.
3856 * @mask: Host attention register mask for this ring.
3857 *
3858 * This function is called from the worker thread when there is a ring event
3859 * for non-fcp rings. The caller does not hold any lock. The function will
3860 * remove each response iocb in the response ring and calls the handle
3861 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3862 **/
3863 static void
lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)3864 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3865 struct lpfc_sli_ring *pring, uint32_t mask)
3866 {
3867 struct lpfc_pgp *pgp;
3868 IOCB_t *entry;
3869 IOCB_t *irsp = NULL;
3870 struct lpfc_iocbq *rspiocbp = NULL;
3871 uint32_t portRspPut, portRspMax;
3872 unsigned long iflag;
3873 uint32_t status;
3874
3875 pgp = &phba->port_gp[pring->ringno];
3876 spin_lock_irqsave(&phba->hbalock, iflag);
3877 pring->stats.iocb_event++;
3878
3879 /*
3880 * The next available response entry should never exceed the maximum
3881 * entries. If it does, treat it as an adapter hardware error.
3882 */
3883 portRspMax = pring->sli.sli3.numRiocb;
3884 portRspPut = le32_to_cpu(pgp->rspPutInx);
3885 if (portRspPut >= portRspMax) {
3886 /*
3887 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3888 * rsp ring <portRspMax>
3889 */
3890 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3891 "0303 Ring %d handler: portRspPut %d "
3892 "is bigger than rsp ring %d\n",
3893 pring->ringno, portRspPut, portRspMax);
3894
3895 phba->link_state = LPFC_HBA_ERROR;
3896 spin_unlock_irqrestore(&phba->hbalock, iflag);
3897
3898 phba->work_hs = HS_FFER3;
3899 lpfc_handle_eratt(phba);
3900
3901 return;
3902 }
3903
3904 rmb();
3905 while (pring->sli.sli3.rspidx != portRspPut) {
3906 /*
3907 * Build a completion list and call the appropriate handler.
3908 * The process is to get the next available response iocb, get
3909 * a free iocb from the list, copy the response data into the
3910 * free iocb, insert to the continuation list, and update the
3911 * next response index to slim. This process makes response
3912 * iocb's in the ring available to DMA as fast as possible but
3913 * pays a penalty for a copy operation. Since the iocb is
3914 * only 32 bytes, this penalty is considered small relative to
3915 * the PCI reads for register values and a slim write. When
3916 * the ulpLe field is set, the entire Command has been
3917 * received.
3918 */
3919 entry = lpfc_resp_iocb(phba, pring);
3920
3921 phba->last_completion_time = jiffies;
3922 rspiocbp = __lpfc_sli_get_iocbq(phba);
3923 if (rspiocbp == NULL) {
3924 printk(KERN_ERR "%s: out of buffers! Failing "
3925 "completion.\n", __func__);
3926 break;
3927 }
3928
3929 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3930 phba->iocb_rsp_size);
3931 irsp = &rspiocbp->iocb;
3932
3933 if (++pring->sli.sli3.rspidx >= portRspMax)
3934 pring->sli.sli3.rspidx = 0;
3935
3936 if (pring->ringno == LPFC_ELS_RING) {
3937 lpfc_debugfs_slow_ring_trc(phba,
3938 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3939 *(((uint32_t *) irsp) + 4),
3940 *(((uint32_t *) irsp) + 6),
3941 *(((uint32_t *) irsp) + 7));
3942 }
3943
3944 writel(pring->sli.sli3.rspidx,
3945 &phba->host_gp[pring->ringno].rspGetInx);
3946
3947 spin_unlock_irqrestore(&phba->hbalock, iflag);
3948 /* Handle the response IOCB */
3949 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3950 spin_lock_irqsave(&phba->hbalock, iflag);
3951
3952 /*
3953 * If the port response put pointer has not been updated, sync
3954 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3955 * response put pointer.
3956 */
3957 if (pring->sli.sli3.rspidx == portRspPut) {
3958 portRspPut = le32_to_cpu(pgp->rspPutInx);
3959 }
3960 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3961
3962 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3963 /* At least one response entry has been freed */
3964 pring->stats.iocb_rsp_full++;
3965 /* SET RxRE_RSP in Chip Att register */
3966 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3967 writel(status, phba->CAregaddr);
3968 readl(phba->CAregaddr); /* flush */
3969 }
3970 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3971 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3972 pring->stats.iocb_cmd_empty++;
3973
3974 /* Force update of the local copy of cmdGetInx */
3975 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3976 lpfc_sli_resume_iocb(phba, pring);
3977
3978 if ((pring->lpfc_sli_cmd_available))
3979 (pring->lpfc_sli_cmd_available) (phba, pring);
3980
3981 }
3982
3983 spin_unlock_irqrestore(&phba->hbalock, iflag);
3984 return;
3985 }
3986
3987 /**
3988 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3989 * @phba: Pointer to HBA context object.
3990 * @pring: Pointer to driver SLI ring object.
3991 * @mask: Host attention register mask for this ring.
3992 *
3993 * This function is called from the worker thread when there is a pending
3994 * ELS response iocb on the driver internal slow-path response iocb worker
3995 * queue. The caller does not hold any lock. The function will remove each
3996 * response iocb from the response worker queue and calls the handle
3997 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3998 **/
3999 static void
lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)4000 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4001 struct lpfc_sli_ring *pring, uint32_t mask)
4002 {
4003 struct lpfc_iocbq *irspiocbq;
4004 struct hbq_dmabuf *dmabuf;
4005 struct lpfc_cq_event *cq_event;
4006 unsigned long iflag;
4007 int count = 0;
4008
4009 spin_lock_irqsave(&phba->hbalock, iflag);
4010 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4011 spin_unlock_irqrestore(&phba->hbalock, iflag);
4012 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4013 /* Get the response iocb from the head of work queue */
4014 spin_lock_irqsave(&phba->hbalock, iflag);
4015 list_remove_head(&phba->sli4_hba.sp_queue_event,
4016 cq_event, struct lpfc_cq_event, list);
4017 spin_unlock_irqrestore(&phba->hbalock, iflag);
4018
4019 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4020 case CQE_CODE_COMPL_WQE:
4021 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4022 cq_event);
4023 /* Translate ELS WCQE to response IOCBQ */
4024 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
4025 irspiocbq);
4026 if (irspiocbq)
4027 lpfc_sli_sp_handle_rspiocb(phba, pring,
4028 irspiocbq);
4029 count++;
4030 break;
4031 case CQE_CODE_RECEIVE:
4032 case CQE_CODE_RECEIVE_V1:
4033 dmabuf = container_of(cq_event, struct hbq_dmabuf,
4034 cq_event);
4035 lpfc_sli4_handle_received_buffer(phba, dmabuf);
4036 count++;
4037 break;
4038 default:
4039 break;
4040 }
4041
4042 /* Limit the number of events to 64 to avoid soft lockups */
4043 if (count == 64)
4044 break;
4045 }
4046 }
4047
4048 /**
4049 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4050 * @phba: Pointer to HBA context object.
4051 * @pring: Pointer to driver SLI ring object.
4052 *
4053 * This function aborts all iocbs in the given ring and frees all the iocb
4054 * objects in txq. This function issues an abort iocb for all the iocb commands
4055 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4056 * the return of this function. The caller is not required to hold any locks.
4057 **/
4058 void
lpfc_sli_abort_iocb_ring(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)4059 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4060 {
4061 LIST_HEAD(completions);
4062 struct lpfc_iocbq *iocb, *next_iocb;
4063
4064 if (pring->ringno == LPFC_ELS_RING) {
4065 lpfc_fabric_abort_hba(phba);
4066 }
4067
4068 /* Error everything on txq and txcmplq
4069 * First do the txq.
4070 */
4071 if (phba->sli_rev >= LPFC_SLI_REV4) {
4072 spin_lock_irq(&pring->ring_lock);
4073 list_splice_init(&pring->txq, &completions);
4074 pring->txq_cnt = 0;
4075 spin_unlock_irq(&pring->ring_lock);
4076
4077 spin_lock_irq(&phba->hbalock);
4078 /* Next issue ABTS for everything on the txcmplq */
4079 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4080 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
4081 spin_unlock_irq(&phba->hbalock);
4082 } else {
4083 spin_lock_irq(&phba->hbalock);
4084 list_splice_init(&pring->txq, &completions);
4085 pring->txq_cnt = 0;
4086
4087 /* Next issue ABTS for everything on the txcmplq */
4088 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4089 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
4090 spin_unlock_irq(&phba->hbalock);
4091 }
4092
4093 /* Cancel all the IOCBs from the completions list */
4094 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4095 IOERR_SLI_ABORTED);
4096 }
4097
4098 /**
4099 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4100 * @phba: Pointer to HBA context object.
4101 *
4102 * This function aborts all iocbs in FCP rings and frees all the iocb
4103 * objects in txq. This function issues an abort iocb for all the iocb commands
4104 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4105 * the return of this function. The caller is not required to hold any locks.
4106 **/
4107 void
lpfc_sli_abort_fcp_rings(struct lpfc_hba * phba)4108 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4109 {
4110 struct lpfc_sli *psli = &phba->sli;
4111 struct lpfc_sli_ring *pring;
4112 uint32_t i;
4113
4114 /* Look on all the FCP Rings for the iotag */
4115 if (phba->sli_rev >= LPFC_SLI_REV4) {
4116 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4117 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4118 lpfc_sli_abort_iocb_ring(phba, pring);
4119 }
4120 } else {
4121 pring = &psli->sli3_ring[LPFC_FCP_RING];
4122 lpfc_sli_abort_iocb_ring(phba, pring);
4123 }
4124 }
4125
4126 /**
4127 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4128 * @phba: Pointer to HBA context object.
4129 *
4130 * This function flushes all iocbs in the IO ring and frees all the iocb
4131 * objects in txq and txcmplq. This function will not issue abort iocbs
4132 * for all the iocb commands in txcmplq, they will just be returned with
4133 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4134 * slot has been permanently disabled.
4135 **/
4136 void
lpfc_sli_flush_io_rings(struct lpfc_hba * phba)4137 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4138 {
4139 LIST_HEAD(txq);
4140 LIST_HEAD(txcmplq);
4141 struct lpfc_sli *psli = &phba->sli;
4142 struct lpfc_sli_ring *pring;
4143 uint32_t i;
4144 struct lpfc_iocbq *piocb, *next_iocb;
4145
4146 spin_lock_irq(&phba->hbalock);
4147 if (phba->hba_flag & HBA_IOQ_FLUSH ||
4148 !phba->sli4_hba.hdwq) {
4149 spin_unlock_irq(&phba->hbalock);
4150 return;
4151 }
4152 /* Indicate the I/O queues are flushed */
4153 phba->hba_flag |= HBA_IOQ_FLUSH;
4154 spin_unlock_irq(&phba->hbalock);
4155
4156 /* Look on all the FCP Rings for the iotag */
4157 if (phba->sli_rev >= LPFC_SLI_REV4) {
4158 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4159 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4160
4161 spin_lock_irq(&pring->ring_lock);
4162 /* Retrieve everything on txq */
4163 list_splice_init(&pring->txq, &txq);
4164 list_for_each_entry_safe(piocb, next_iocb,
4165 &pring->txcmplq, list)
4166 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4167 /* Retrieve everything on the txcmplq */
4168 list_splice_init(&pring->txcmplq, &txcmplq);
4169 pring->txq_cnt = 0;
4170 pring->txcmplq_cnt = 0;
4171 spin_unlock_irq(&pring->ring_lock);
4172
4173 /* Flush the txq */
4174 lpfc_sli_cancel_iocbs(phba, &txq,
4175 IOSTAT_LOCAL_REJECT,
4176 IOERR_SLI_DOWN);
4177 /* Flush the txcmpq */
4178 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4179 IOSTAT_LOCAL_REJECT,
4180 IOERR_SLI_DOWN);
4181 }
4182 } else {
4183 pring = &psli->sli3_ring[LPFC_FCP_RING];
4184
4185 spin_lock_irq(&phba->hbalock);
4186 /* Retrieve everything on txq */
4187 list_splice_init(&pring->txq, &txq);
4188 list_for_each_entry_safe(piocb, next_iocb,
4189 &pring->txcmplq, list)
4190 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4191 /* Retrieve everything on the txcmplq */
4192 list_splice_init(&pring->txcmplq, &txcmplq);
4193 pring->txq_cnt = 0;
4194 pring->txcmplq_cnt = 0;
4195 spin_unlock_irq(&phba->hbalock);
4196
4197 /* Flush the txq */
4198 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4199 IOERR_SLI_DOWN);
4200 /* Flush the txcmpq */
4201 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4202 IOERR_SLI_DOWN);
4203 }
4204 }
4205
4206 /**
4207 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4208 * @phba: Pointer to HBA context object.
4209 * @mask: Bit mask to be checked.
4210 *
4211 * This function reads the host status register and compares
4212 * with the provided bit mask to check if HBA completed
4213 * the restart. This function will wait in a loop for the
4214 * HBA to complete restart. If the HBA does not restart within
4215 * 15 iterations, the function will reset the HBA again. The
4216 * function returns 1 when HBA fail to restart otherwise returns
4217 * zero.
4218 **/
4219 static int
lpfc_sli_brdready_s3(struct lpfc_hba * phba,uint32_t mask)4220 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4221 {
4222 uint32_t status;
4223 int i = 0;
4224 int retval = 0;
4225
4226 /* Read the HBA Host Status Register */
4227 if (lpfc_readl(phba->HSregaddr, &status))
4228 return 1;
4229
4230 /*
4231 * Check status register every 100ms for 5 retries, then every
4232 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4233 * every 2.5 sec for 4.
4234 * Break our of the loop if errors occurred during init.
4235 */
4236 while (((status & mask) != mask) &&
4237 !(status & HS_FFERM) &&
4238 i++ < 20) {
4239
4240 if (i <= 5)
4241 msleep(10);
4242 else if (i <= 10)
4243 msleep(500);
4244 else
4245 msleep(2500);
4246
4247 if (i == 15) {
4248 /* Do post */
4249 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4250 lpfc_sli_brdrestart(phba);
4251 }
4252 /* Read the HBA Host Status Register */
4253 if (lpfc_readl(phba->HSregaddr, &status)) {
4254 retval = 1;
4255 break;
4256 }
4257 }
4258
4259 /* Check to see if any errors occurred during init */
4260 if ((status & HS_FFERM) || (i >= 20)) {
4261 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4262 "2751 Adapter failed to restart, "
4263 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4264 status,
4265 readl(phba->MBslimaddr + 0xa8),
4266 readl(phba->MBslimaddr + 0xac));
4267 phba->link_state = LPFC_HBA_ERROR;
4268 retval = 1;
4269 }
4270
4271 return retval;
4272 }
4273
4274 /**
4275 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4276 * @phba: Pointer to HBA context object.
4277 * @mask: Bit mask to be checked.
4278 *
4279 * This function checks the host status register to check if HBA is
4280 * ready. This function will wait in a loop for the HBA to be ready
4281 * If the HBA is not ready , the function will will reset the HBA PCI
4282 * function again. The function returns 1 when HBA fail to be ready
4283 * otherwise returns zero.
4284 **/
4285 static int
lpfc_sli_brdready_s4(struct lpfc_hba * phba,uint32_t mask)4286 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4287 {
4288 uint32_t status;
4289 int retval = 0;
4290
4291 /* Read the HBA Host Status Register */
4292 status = lpfc_sli4_post_status_check(phba);
4293
4294 if (status) {
4295 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4296 lpfc_sli_brdrestart(phba);
4297 status = lpfc_sli4_post_status_check(phba);
4298 }
4299
4300 /* Check to see if any errors occurred during init */
4301 if (status) {
4302 phba->link_state = LPFC_HBA_ERROR;
4303 retval = 1;
4304 } else
4305 phba->sli4_hba.intr_enable = 0;
4306
4307 return retval;
4308 }
4309
4310 /**
4311 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4312 * @phba: Pointer to HBA context object.
4313 * @mask: Bit mask to be checked.
4314 *
4315 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4316 * from the API jump table function pointer from the lpfc_hba struct.
4317 **/
4318 int
lpfc_sli_brdready(struct lpfc_hba * phba,uint32_t mask)4319 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4320 {
4321 return phba->lpfc_sli_brdready(phba, mask);
4322 }
4323
4324 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4325
4326 /**
4327 * lpfc_reset_barrier - Make HBA ready for HBA reset
4328 * @phba: Pointer to HBA context object.
4329 *
4330 * This function is called before resetting an HBA. This function is called
4331 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4332 **/
lpfc_reset_barrier(struct lpfc_hba * phba)4333 void lpfc_reset_barrier(struct lpfc_hba *phba)
4334 {
4335 uint32_t __iomem *resp_buf;
4336 uint32_t __iomem *mbox_buf;
4337 volatile uint32_t mbox;
4338 uint32_t hc_copy, ha_copy, resp_data;
4339 int i;
4340 uint8_t hdrtype;
4341
4342 lockdep_assert_held(&phba->hbalock);
4343
4344 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4345 if (hdrtype != 0x80 ||
4346 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4347 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4348 return;
4349
4350 /*
4351 * Tell the other part of the chip to suspend temporarily all
4352 * its DMA activity.
4353 */
4354 resp_buf = phba->MBslimaddr;
4355
4356 /* Disable the error attention */
4357 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4358 return;
4359 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4360 readl(phba->HCregaddr); /* flush */
4361 phba->link_flag |= LS_IGNORE_ERATT;
4362
4363 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4364 return;
4365 if (ha_copy & HA_ERATT) {
4366 /* Clear Chip error bit */
4367 writel(HA_ERATT, phba->HAregaddr);
4368 phba->pport->stopped = 1;
4369 }
4370
4371 mbox = 0;
4372 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4373 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4374
4375 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4376 mbox_buf = phba->MBslimaddr;
4377 writel(mbox, mbox_buf);
4378
4379 for (i = 0; i < 50; i++) {
4380 if (lpfc_readl((resp_buf + 1), &resp_data))
4381 return;
4382 if (resp_data != ~(BARRIER_TEST_PATTERN))
4383 mdelay(1);
4384 else
4385 break;
4386 }
4387 resp_data = 0;
4388 if (lpfc_readl((resp_buf + 1), &resp_data))
4389 return;
4390 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4391 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4392 phba->pport->stopped)
4393 goto restore_hc;
4394 else
4395 goto clear_errat;
4396 }
4397
4398 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4399 resp_data = 0;
4400 for (i = 0; i < 500; i++) {
4401 if (lpfc_readl(resp_buf, &resp_data))
4402 return;
4403 if (resp_data != mbox)
4404 mdelay(1);
4405 else
4406 break;
4407 }
4408
4409 clear_errat:
4410
4411 while (++i < 500) {
4412 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4413 return;
4414 if (!(ha_copy & HA_ERATT))
4415 mdelay(1);
4416 else
4417 break;
4418 }
4419
4420 if (readl(phba->HAregaddr) & HA_ERATT) {
4421 writel(HA_ERATT, phba->HAregaddr);
4422 phba->pport->stopped = 1;
4423 }
4424
4425 restore_hc:
4426 phba->link_flag &= ~LS_IGNORE_ERATT;
4427 writel(hc_copy, phba->HCregaddr);
4428 readl(phba->HCregaddr); /* flush */
4429 }
4430
4431 /**
4432 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4433 * @phba: Pointer to HBA context object.
4434 *
4435 * This function issues a kill_board mailbox command and waits for
4436 * the error attention interrupt. This function is called for stopping
4437 * the firmware processing. The caller is not required to hold any
4438 * locks. This function calls lpfc_hba_down_post function to free
4439 * any pending commands after the kill. The function will return 1 when it
4440 * fails to kill the board else will return 0.
4441 **/
4442 int
lpfc_sli_brdkill(struct lpfc_hba * phba)4443 lpfc_sli_brdkill(struct lpfc_hba *phba)
4444 {
4445 struct lpfc_sli *psli;
4446 LPFC_MBOXQ_t *pmb;
4447 uint32_t status;
4448 uint32_t ha_copy;
4449 int retval;
4450 int i = 0;
4451
4452 psli = &phba->sli;
4453
4454 /* Kill HBA */
4455 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4456 "0329 Kill HBA Data: x%x x%x\n",
4457 phba->pport->port_state, psli->sli_flag);
4458
4459 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4460 if (!pmb)
4461 return 1;
4462
4463 /* Disable the error attention */
4464 spin_lock_irq(&phba->hbalock);
4465 if (lpfc_readl(phba->HCregaddr, &status)) {
4466 spin_unlock_irq(&phba->hbalock);
4467 mempool_free(pmb, phba->mbox_mem_pool);
4468 return 1;
4469 }
4470 status &= ~HC_ERINT_ENA;
4471 writel(status, phba->HCregaddr);
4472 readl(phba->HCregaddr); /* flush */
4473 phba->link_flag |= LS_IGNORE_ERATT;
4474 spin_unlock_irq(&phba->hbalock);
4475
4476 lpfc_kill_board(phba, pmb);
4477 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4478 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4479
4480 if (retval != MBX_SUCCESS) {
4481 if (retval != MBX_BUSY)
4482 mempool_free(pmb, phba->mbox_mem_pool);
4483 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4484 "2752 KILL_BOARD command failed retval %d\n",
4485 retval);
4486 spin_lock_irq(&phba->hbalock);
4487 phba->link_flag &= ~LS_IGNORE_ERATT;
4488 spin_unlock_irq(&phba->hbalock);
4489 return 1;
4490 }
4491
4492 spin_lock_irq(&phba->hbalock);
4493 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4494 spin_unlock_irq(&phba->hbalock);
4495
4496 mempool_free(pmb, phba->mbox_mem_pool);
4497
4498 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4499 * attention every 100ms for 3 seconds. If we don't get ERATT after
4500 * 3 seconds we still set HBA_ERROR state because the status of the
4501 * board is now undefined.
4502 */
4503 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4504 return 1;
4505 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4506 mdelay(100);
4507 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4508 return 1;
4509 }
4510
4511 del_timer_sync(&psli->mbox_tmo);
4512 if (ha_copy & HA_ERATT) {
4513 writel(HA_ERATT, phba->HAregaddr);
4514 phba->pport->stopped = 1;
4515 }
4516 spin_lock_irq(&phba->hbalock);
4517 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4518 psli->mbox_active = NULL;
4519 phba->link_flag &= ~LS_IGNORE_ERATT;
4520 spin_unlock_irq(&phba->hbalock);
4521
4522 lpfc_hba_down_post(phba);
4523 phba->link_state = LPFC_HBA_ERROR;
4524
4525 return ha_copy & HA_ERATT ? 0 : 1;
4526 }
4527
4528 /**
4529 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4530 * @phba: Pointer to HBA context object.
4531 *
4532 * This function resets the HBA by writing HC_INITFF to the control
4533 * register. After the HBA resets, this function resets all the iocb ring
4534 * indices. This function disables PCI layer parity checking during
4535 * the reset.
4536 * This function returns 0 always.
4537 * The caller is not required to hold any locks.
4538 **/
4539 int
lpfc_sli_brdreset(struct lpfc_hba * phba)4540 lpfc_sli_brdreset(struct lpfc_hba *phba)
4541 {
4542 struct lpfc_sli *psli;
4543 struct lpfc_sli_ring *pring;
4544 uint16_t cfg_value;
4545 int i;
4546
4547 psli = &phba->sli;
4548
4549 /* Reset HBA */
4550 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4551 "0325 Reset HBA Data: x%x x%x\n",
4552 (phba->pport) ? phba->pport->port_state : 0,
4553 psli->sli_flag);
4554
4555 /* perform board reset */
4556 phba->fc_eventTag = 0;
4557 phba->link_events = 0;
4558 if (phba->pport) {
4559 phba->pport->fc_myDID = 0;
4560 phba->pport->fc_prevDID = 0;
4561 }
4562
4563 /* Turn off parity checking and serr during the physical reset */
4564 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4565 return -EIO;
4566
4567 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4568 (cfg_value &
4569 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4570
4571 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4572
4573 /* Now toggle INITFF bit in the Host Control Register */
4574 writel(HC_INITFF, phba->HCregaddr);
4575 mdelay(1);
4576 readl(phba->HCregaddr); /* flush */
4577 writel(0, phba->HCregaddr);
4578 readl(phba->HCregaddr); /* flush */
4579
4580 /* Restore PCI cmd register */
4581 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4582
4583 /* Initialize relevant SLI info */
4584 for (i = 0; i < psli->num_rings; i++) {
4585 pring = &psli->sli3_ring[i];
4586 pring->flag = 0;
4587 pring->sli.sli3.rspidx = 0;
4588 pring->sli.sli3.next_cmdidx = 0;
4589 pring->sli.sli3.local_getidx = 0;
4590 pring->sli.sli3.cmdidx = 0;
4591 pring->missbufcnt = 0;
4592 }
4593
4594 phba->link_state = LPFC_WARM_START;
4595 return 0;
4596 }
4597
4598 /**
4599 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4600 * @phba: Pointer to HBA context object.
4601 *
4602 * This function resets a SLI4 HBA. This function disables PCI layer parity
4603 * checking during resets the device. The caller is not required to hold
4604 * any locks.
4605 *
4606 * This function returns 0 on success else returns negative error code.
4607 **/
4608 int
lpfc_sli4_brdreset(struct lpfc_hba * phba)4609 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4610 {
4611 struct lpfc_sli *psli = &phba->sli;
4612 uint16_t cfg_value;
4613 int rc = 0;
4614
4615 /* Reset HBA */
4616 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4617 "0295 Reset HBA Data: x%x x%x x%x\n",
4618 phba->pport->port_state, psli->sli_flag,
4619 phba->hba_flag);
4620
4621 /* perform board reset */
4622 phba->fc_eventTag = 0;
4623 phba->link_events = 0;
4624 phba->pport->fc_myDID = 0;
4625 phba->pport->fc_prevDID = 0;
4626
4627 spin_lock_irq(&phba->hbalock);
4628 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4629 phba->fcf.fcf_flag = 0;
4630 spin_unlock_irq(&phba->hbalock);
4631
4632 /* Now physically reset the device */
4633 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4634 "0389 Performing PCI function reset!\n");
4635
4636 /* Turn off parity checking and serr during the physical reset */
4637 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4638 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4639 "3205 PCI read Config failed\n");
4640 return -EIO;
4641 }
4642
4643 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4644 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4645
4646 /* Perform FCoE PCI function reset before freeing queue memory */
4647 rc = lpfc_pci_function_reset(phba);
4648
4649 /* Restore PCI cmd register */
4650 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4651
4652 return rc;
4653 }
4654
4655 /**
4656 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4657 * @phba: Pointer to HBA context object.
4658 *
4659 * This function is called in the SLI initialization code path to
4660 * restart the HBA. The caller is not required to hold any lock.
4661 * This function writes MBX_RESTART mailbox command to the SLIM and
4662 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4663 * function to free any pending commands. The function enables
4664 * POST only during the first initialization. The function returns zero.
4665 * The function does not guarantee completion of MBX_RESTART mailbox
4666 * command before the return of this function.
4667 **/
4668 static int
lpfc_sli_brdrestart_s3(struct lpfc_hba * phba)4669 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4670 {
4671 MAILBOX_t *mb;
4672 struct lpfc_sli *psli;
4673 volatile uint32_t word0;
4674 void __iomem *to_slim;
4675 uint32_t hba_aer_enabled;
4676
4677 spin_lock_irq(&phba->hbalock);
4678
4679 /* Take PCIe device Advanced Error Reporting (AER) state */
4680 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4681
4682 psli = &phba->sli;
4683
4684 /* Restart HBA */
4685 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4686 "0337 Restart HBA Data: x%x x%x\n",
4687 (phba->pport) ? phba->pport->port_state : 0,
4688 psli->sli_flag);
4689
4690 word0 = 0;
4691 mb = (MAILBOX_t *) &word0;
4692 mb->mbxCommand = MBX_RESTART;
4693 mb->mbxHc = 1;
4694
4695 lpfc_reset_barrier(phba);
4696
4697 to_slim = phba->MBslimaddr;
4698 writel(*(uint32_t *) mb, to_slim);
4699 readl(to_slim); /* flush */
4700
4701 /* Only skip post after fc_ffinit is completed */
4702 if (phba->pport && phba->pport->port_state)
4703 word0 = 1; /* This is really setting up word1 */
4704 else
4705 word0 = 0; /* This is really setting up word1 */
4706 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4707 writel(*(uint32_t *) mb, to_slim);
4708 readl(to_slim); /* flush */
4709
4710 lpfc_sli_brdreset(phba);
4711 if (phba->pport)
4712 phba->pport->stopped = 0;
4713 phba->link_state = LPFC_INIT_START;
4714 phba->hba_flag = 0;
4715 spin_unlock_irq(&phba->hbalock);
4716
4717 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4718 psli->stats_start = ktime_get_seconds();
4719
4720 /* Give the INITFF and Post time to settle. */
4721 mdelay(100);
4722
4723 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4724 if (hba_aer_enabled)
4725 pci_disable_pcie_error_reporting(phba->pcidev);
4726
4727 lpfc_hba_down_post(phba);
4728
4729 return 0;
4730 }
4731
4732 /**
4733 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4734 * @phba: Pointer to HBA context object.
4735 *
4736 * This function is called in the SLI initialization code path to restart
4737 * a SLI4 HBA. The caller is not required to hold any lock.
4738 * At the end of the function, it calls lpfc_hba_down_post function to
4739 * free any pending commands.
4740 **/
4741 static int
lpfc_sli_brdrestart_s4(struct lpfc_hba * phba)4742 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4743 {
4744 struct lpfc_sli *psli = &phba->sli;
4745 uint32_t hba_aer_enabled;
4746 int rc;
4747
4748 /* Restart HBA */
4749 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4750 "0296 Restart HBA Data: x%x x%x\n",
4751 phba->pport->port_state, psli->sli_flag);
4752
4753 /* Take PCIe device Advanced Error Reporting (AER) state */
4754 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4755
4756 rc = lpfc_sli4_brdreset(phba);
4757 if (rc) {
4758 phba->link_state = LPFC_HBA_ERROR;
4759 goto hba_down_queue;
4760 }
4761
4762 spin_lock_irq(&phba->hbalock);
4763 phba->pport->stopped = 0;
4764 phba->link_state = LPFC_INIT_START;
4765 phba->hba_flag = 0;
4766 spin_unlock_irq(&phba->hbalock);
4767
4768 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4769 psli->stats_start = ktime_get_seconds();
4770
4771 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4772 if (hba_aer_enabled)
4773 pci_disable_pcie_error_reporting(phba->pcidev);
4774
4775 hba_down_queue:
4776 lpfc_hba_down_post(phba);
4777 lpfc_sli4_queue_destroy(phba);
4778
4779 return rc;
4780 }
4781
4782 /**
4783 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4784 * @phba: Pointer to HBA context object.
4785 *
4786 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4787 * API jump table function pointer from the lpfc_hba struct.
4788 **/
4789 int
lpfc_sli_brdrestart(struct lpfc_hba * phba)4790 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4791 {
4792 return phba->lpfc_sli_brdrestart(phba);
4793 }
4794
4795 /**
4796 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4797 * @phba: Pointer to HBA context object.
4798 *
4799 * This function is called after a HBA restart to wait for successful
4800 * restart of the HBA. Successful restart of the HBA is indicated by
4801 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4802 * iteration, the function will restart the HBA again. The function returns
4803 * zero if HBA successfully restarted else returns negative error code.
4804 **/
4805 int
lpfc_sli_chipset_init(struct lpfc_hba * phba)4806 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4807 {
4808 uint32_t status, i = 0;
4809
4810 /* Read the HBA Host Status Register */
4811 if (lpfc_readl(phba->HSregaddr, &status))
4812 return -EIO;
4813
4814 /* Check status register to see what current state is */
4815 i = 0;
4816 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4817
4818 /* Check every 10ms for 10 retries, then every 100ms for 90
4819 * retries, then every 1 sec for 50 retires for a total of
4820 * ~60 seconds before reset the board again and check every
4821 * 1 sec for 50 retries. The up to 60 seconds before the
4822 * board ready is required by the Falcon FIPS zeroization
4823 * complete, and any reset the board in between shall cause
4824 * restart of zeroization, further delay the board ready.
4825 */
4826 if (i++ >= 200) {
4827 /* Adapter failed to init, timeout, status reg
4828 <status> */
4829 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4830 "0436 Adapter failed to init, "
4831 "timeout, status reg x%x, "
4832 "FW Data: A8 x%x AC x%x\n", status,
4833 readl(phba->MBslimaddr + 0xa8),
4834 readl(phba->MBslimaddr + 0xac));
4835 phba->link_state = LPFC_HBA_ERROR;
4836 return -ETIMEDOUT;
4837 }
4838
4839 /* Check to see if any errors occurred during init */
4840 if (status & HS_FFERM) {
4841 /* ERROR: During chipset initialization */
4842 /* Adapter failed to init, chipset, status reg
4843 <status> */
4844 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4845 "0437 Adapter failed to init, "
4846 "chipset, status reg x%x, "
4847 "FW Data: A8 x%x AC x%x\n", status,
4848 readl(phba->MBslimaddr + 0xa8),
4849 readl(phba->MBslimaddr + 0xac));
4850 phba->link_state = LPFC_HBA_ERROR;
4851 return -EIO;
4852 }
4853
4854 if (i <= 10)
4855 msleep(10);
4856 else if (i <= 100)
4857 msleep(100);
4858 else
4859 msleep(1000);
4860
4861 if (i == 150) {
4862 /* Do post */
4863 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4864 lpfc_sli_brdrestart(phba);
4865 }
4866 /* Read the HBA Host Status Register */
4867 if (lpfc_readl(phba->HSregaddr, &status))
4868 return -EIO;
4869 }
4870
4871 /* Check to see if any errors occurred during init */
4872 if (status & HS_FFERM) {
4873 /* ERROR: During chipset initialization */
4874 /* Adapter failed to init, chipset, status reg <status> */
4875 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4876 "0438 Adapter failed to init, chipset, "
4877 "status reg x%x, "
4878 "FW Data: A8 x%x AC x%x\n", status,
4879 readl(phba->MBslimaddr + 0xa8),
4880 readl(phba->MBslimaddr + 0xac));
4881 phba->link_state = LPFC_HBA_ERROR;
4882 return -EIO;
4883 }
4884
4885 /* Clear all interrupt enable conditions */
4886 writel(0, phba->HCregaddr);
4887 readl(phba->HCregaddr); /* flush */
4888
4889 /* setup host attn register */
4890 writel(0xffffffff, phba->HAregaddr);
4891 readl(phba->HAregaddr); /* flush */
4892 return 0;
4893 }
4894
4895 /**
4896 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4897 *
4898 * This function calculates and returns the number of HBQs required to be
4899 * configured.
4900 **/
4901 int
lpfc_sli_hbq_count(void)4902 lpfc_sli_hbq_count(void)
4903 {
4904 return ARRAY_SIZE(lpfc_hbq_defs);
4905 }
4906
4907 /**
4908 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4909 *
4910 * This function adds the number of hbq entries in every HBQ to get
4911 * the total number of hbq entries required for the HBA and returns
4912 * the total count.
4913 **/
4914 static int
lpfc_sli_hbq_entry_count(void)4915 lpfc_sli_hbq_entry_count(void)
4916 {
4917 int hbq_count = lpfc_sli_hbq_count();
4918 int count = 0;
4919 int i;
4920
4921 for (i = 0; i < hbq_count; ++i)
4922 count += lpfc_hbq_defs[i]->entry_count;
4923 return count;
4924 }
4925
4926 /**
4927 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4928 *
4929 * This function calculates amount of memory required for all hbq entries
4930 * to be configured and returns the total memory required.
4931 **/
4932 int
lpfc_sli_hbq_size(void)4933 lpfc_sli_hbq_size(void)
4934 {
4935 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4936 }
4937
4938 /**
4939 * lpfc_sli_hbq_setup - configure and initialize HBQs
4940 * @phba: Pointer to HBA context object.
4941 *
4942 * This function is called during the SLI initialization to configure
4943 * all the HBQs and post buffers to the HBQ. The caller is not
4944 * required to hold any locks. This function will return zero if successful
4945 * else it will return negative error code.
4946 **/
4947 static int
lpfc_sli_hbq_setup(struct lpfc_hba * phba)4948 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4949 {
4950 int hbq_count = lpfc_sli_hbq_count();
4951 LPFC_MBOXQ_t *pmb;
4952 MAILBOX_t *pmbox;
4953 uint32_t hbqno;
4954 uint32_t hbq_entry_index;
4955
4956 /* Get a Mailbox buffer to setup mailbox
4957 * commands for HBA initialization
4958 */
4959 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4960
4961 if (!pmb)
4962 return -ENOMEM;
4963
4964 pmbox = &pmb->u.mb;
4965
4966 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4967 phba->link_state = LPFC_INIT_MBX_CMDS;
4968 phba->hbq_in_use = 1;
4969
4970 hbq_entry_index = 0;
4971 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4972 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4973 phba->hbqs[hbqno].hbqPutIdx = 0;
4974 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4975 phba->hbqs[hbqno].entry_count =
4976 lpfc_hbq_defs[hbqno]->entry_count;
4977 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4978 hbq_entry_index, pmb);
4979 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4980
4981 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4982 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4983 mbxStatus <status>, ring <num> */
4984
4985 lpfc_printf_log(phba, KERN_ERR,
4986 LOG_SLI | LOG_VPORT,
4987 "1805 Adapter failed to init. "
4988 "Data: x%x x%x x%x\n",
4989 pmbox->mbxCommand,
4990 pmbox->mbxStatus, hbqno);
4991
4992 phba->link_state = LPFC_HBA_ERROR;
4993 mempool_free(pmb, phba->mbox_mem_pool);
4994 return -ENXIO;
4995 }
4996 }
4997 phba->hbq_count = hbq_count;
4998
4999 mempool_free(pmb, phba->mbox_mem_pool);
5000
5001 /* Initially populate or replenish the HBQs */
5002 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5003 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5004 return 0;
5005 }
5006
5007 /**
5008 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5009 * @phba: Pointer to HBA context object.
5010 *
5011 * This function is called during the SLI initialization to configure
5012 * all the HBQs and post buffers to the HBQ. The caller is not
5013 * required to hold any locks. This function will return zero if successful
5014 * else it will return negative error code.
5015 **/
5016 static int
lpfc_sli4_rb_setup(struct lpfc_hba * phba)5017 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5018 {
5019 phba->hbq_in_use = 1;
5020 /**
5021 * Specific case when the MDS diagnostics is enabled and supported.
5022 * The receive buffer count is truncated to manage the incoming
5023 * traffic.
5024 **/
5025 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5026 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5027 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5028 else
5029 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5030 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5031 phba->hbq_count = 1;
5032 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5033 /* Initially populate or replenish the HBQs */
5034 return 0;
5035 }
5036
5037 /**
5038 * lpfc_sli_config_port - Issue config port mailbox command
5039 * @phba: Pointer to HBA context object.
5040 * @sli_mode: sli mode - 2/3
5041 *
5042 * This function is called by the sli initialization code path
5043 * to issue config_port mailbox command. This function restarts the
5044 * HBA firmware and issues a config_port mailbox command to configure
5045 * the SLI interface in the sli mode specified by sli_mode
5046 * variable. The caller is not required to hold any locks.
5047 * The function returns 0 if successful, else returns negative error
5048 * code.
5049 **/
5050 int
lpfc_sli_config_port(struct lpfc_hba * phba,int sli_mode)5051 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5052 {
5053 LPFC_MBOXQ_t *pmb;
5054 uint32_t resetcount = 0, rc = 0, done = 0;
5055
5056 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5057 if (!pmb) {
5058 phba->link_state = LPFC_HBA_ERROR;
5059 return -ENOMEM;
5060 }
5061
5062 phba->sli_rev = sli_mode;
5063 while (resetcount < 2 && !done) {
5064 spin_lock_irq(&phba->hbalock);
5065 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5066 spin_unlock_irq(&phba->hbalock);
5067 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5068 lpfc_sli_brdrestart(phba);
5069 rc = lpfc_sli_chipset_init(phba);
5070 if (rc)
5071 break;
5072
5073 spin_lock_irq(&phba->hbalock);
5074 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5075 spin_unlock_irq(&phba->hbalock);
5076 resetcount++;
5077
5078 /* Call pre CONFIG_PORT mailbox command initialization. A
5079 * value of 0 means the call was successful. Any other
5080 * nonzero value is a failure, but if ERESTART is returned,
5081 * the driver may reset the HBA and try again.
5082 */
5083 rc = lpfc_config_port_prep(phba);
5084 if (rc == -ERESTART) {
5085 phba->link_state = LPFC_LINK_UNKNOWN;
5086 continue;
5087 } else if (rc)
5088 break;
5089
5090 phba->link_state = LPFC_INIT_MBX_CMDS;
5091 lpfc_config_port(phba, pmb);
5092 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5093 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5094 LPFC_SLI3_HBQ_ENABLED |
5095 LPFC_SLI3_CRP_ENABLED |
5096 LPFC_SLI3_DSS_ENABLED);
5097 if (rc != MBX_SUCCESS) {
5098 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5099 "0442 Adapter failed to init, mbxCmd x%x "
5100 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5101 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5102 spin_lock_irq(&phba->hbalock);
5103 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5104 spin_unlock_irq(&phba->hbalock);
5105 rc = -ENXIO;
5106 } else {
5107 /* Allow asynchronous mailbox command to go through */
5108 spin_lock_irq(&phba->hbalock);
5109 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5110 spin_unlock_irq(&phba->hbalock);
5111 done = 1;
5112
5113 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5114 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5115 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5116 "3110 Port did not grant ASABT\n");
5117 }
5118 }
5119 if (!done) {
5120 rc = -EINVAL;
5121 goto do_prep_failed;
5122 }
5123 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5124 if (!pmb->u.mb.un.varCfgPort.cMA) {
5125 rc = -ENXIO;
5126 goto do_prep_failed;
5127 }
5128 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5129 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5130 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5131 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5132 phba->max_vpi : phba->max_vports;
5133
5134 } else
5135 phba->max_vpi = 0;
5136 if (pmb->u.mb.un.varCfgPort.gerbm)
5137 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5138 if (pmb->u.mb.un.varCfgPort.gcrp)
5139 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5140
5141 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5142 phba->port_gp = phba->mbox->us.s3_pgp.port;
5143
5144 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5145 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5146 phba->cfg_enable_bg = 0;
5147 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5148 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5149 "0443 Adapter did not grant "
5150 "BlockGuard\n");
5151 }
5152 }
5153 } else {
5154 phba->hbq_get = NULL;
5155 phba->port_gp = phba->mbox->us.s2.port;
5156 phba->max_vpi = 0;
5157 }
5158 do_prep_failed:
5159 mempool_free(pmb, phba->mbox_mem_pool);
5160 return rc;
5161 }
5162
5163
5164 /**
5165 * lpfc_sli_hba_setup - SLI initialization function
5166 * @phba: Pointer to HBA context object.
5167 *
5168 * This function is the main SLI initialization function. This function
5169 * is called by the HBA initialization code, HBA reset code and HBA
5170 * error attention handler code. Caller is not required to hold any
5171 * locks. This function issues config_port mailbox command to configure
5172 * the SLI, setup iocb rings and HBQ rings. In the end the function
5173 * calls the config_port_post function to issue init_link mailbox
5174 * command and to start the discovery. The function will return zero
5175 * if successful, else it will return negative error code.
5176 **/
5177 int
lpfc_sli_hba_setup(struct lpfc_hba * phba)5178 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5179 {
5180 uint32_t rc;
5181 int mode = 3, i;
5182 int longs;
5183
5184 switch (phba->cfg_sli_mode) {
5185 case 2:
5186 if (phba->cfg_enable_npiv) {
5187 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5188 "1824 NPIV enabled: Override sli_mode "
5189 "parameter (%d) to auto (0).\n",
5190 phba->cfg_sli_mode);
5191 break;
5192 }
5193 mode = 2;
5194 break;
5195 case 0:
5196 case 3:
5197 break;
5198 default:
5199 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5200 "1819 Unrecognized sli_mode parameter: %d.\n",
5201 phba->cfg_sli_mode);
5202
5203 break;
5204 }
5205 phba->fcp_embed_io = 0; /* SLI4 FC support only */
5206
5207 rc = lpfc_sli_config_port(phba, mode);
5208
5209 if (rc && phba->cfg_sli_mode == 3)
5210 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5211 "1820 Unable to select SLI-3. "
5212 "Not supported by adapter.\n");
5213 if (rc && mode != 2)
5214 rc = lpfc_sli_config_port(phba, 2);
5215 else if (rc && mode == 2)
5216 rc = lpfc_sli_config_port(phba, 3);
5217 if (rc)
5218 goto lpfc_sli_hba_setup_error;
5219
5220 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5221 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5222 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5223 if (!rc) {
5224 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5225 "2709 This device supports "
5226 "Advanced Error Reporting (AER)\n");
5227 spin_lock_irq(&phba->hbalock);
5228 phba->hba_flag |= HBA_AER_ENABLED;
5229 spin_unlock_irq(&phba->hbalock);
5230 } else {
5231 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5232 "2708 This device does not support "
5233 "Advanced Error Reporting (AER): %d\n",
5234 rc);
5235 phba->cfg_aer_support = 0;
5236 }
5237 }
5238
5239 if (phba->sli_rev == 3) {
5240 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5241 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5242 } else {
5243 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5244 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5245 phba->sli3_options = 0;
5246 }
5247
5248 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5249 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5250 phba->sli_rev, phba->max_vpi);
5251 rc = lpfc_sli_ring_map(phba);
5252
5253 if (rc)
5254 goto lpfc_sli_hba_setup_error;
5255
5256 /* Initialize VPIs. */
5257 if (phba->sli_rev == LPFC_SLI_REV3) {
5258 /*
5259 * The VPI bitmask and physical ID array are allocated
5260 * and initialized once only - at driver load. A port
5261 * reset doesn't need to reinitialize this memory.
5262 */
5263 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5264 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5265 phba->vpi_bmask = kcalloc(longs,
5266 sizeof(unsigned long),
5267 GFP_KERNEL);
5268 if (!phba->vpi_bmask) {
5269 rc = -ENOMEM;
5270 goto lpfc_sli_hba_setup_error;
5271 }
5272
5273 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5274 sizeof(uint16_t),
5275 GFP_KERNEL);
5276 if (!phba->vpi_ids) {
5277 kfree(phba->vpi_bmask);
5278 rc = -ENOMEM;
5279 goto lpfc_sli_hba_setup_error;
5280 }
5281 for (i = 0; i < phba->max_vpi; i++)
5282 phba->vpi_ids[i] = i;
5283 }
5284 }
5285
5286 /* Init HBQs */
5287 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5288 rc = lpfc_sli_hbq_setup(phba);
5289 if (rc)
5290 goto lpfc_sli_hba_setup_error;
5291 }
5292 spin_lock_irq(&phba->hbalock);
5293 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5294 spin_unlock_irq(&phba->hbalock);
5295
5296 rc = lpfc_config_port_post(phba);
5297 if (rc)
5298 goto lpfc_sli_hba_setup_error;
5299
5300 return rc;
5301
5302 lpfc_sli_hba_setup_error:
5303 phba->link_state = LPFC_HBA_ERROR;
5304 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5305 "0445 Firmware initialization failed\n");
5306 return rc;
5307 }
5308
5309 /**
5310 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5311 * @phba: Pointer to HBA context object.
5312 *
5313 * This function issue a dump mailbox command to read config region
5314 * 23 and parse the records in the region and populate driver
5315 * data structure.
5316 **/
5317 static int
lpfc_sli4_read_fcoe_params(struct lpfc_hba * phba)5318 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5319 {
5320 LPFC_MBOXQ_t *mboxq;
5321 struct lpfc_dmabuf *mp;
5322 struct lpfc_mqe *mqe;
5323 uint32_t data_length;
5324 int rc;
5325
5326 /* Program the default value of vlan_id and fc_map */
5327 phba->valid_vlan = 0;
5328 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5329 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5330 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5331
5332 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5333 if (!mboxq)
5334 return -ENOMEM;
5335
5336 mqe = &mboxq->u.mqe;
5337 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5338 rc = -ENOMEM;
5339 goto out_free_mboxq;
5340 }
5341
5342 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5343 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5344
5345 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5346 "(%d):2571 Mailbox cmd x%x Status x%x "
5347 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5348 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5349 "CQ: x%x x%x x%x x%x\n",
5350 mboxq->vport ? mboxq->vport->vpi : 0,
5351 bf_get(lpfc_mqe_command, mqe),
5352 bf_get(lpfc_mqe_status, mqe),
5353 mqe->un.mb_words[0], mqe->un.mb_words[1],
5354 mqe->un.mb_words[2], mqe->un.mb_words[3],
5355 mqe->un.mb_words[4], mqe->un.mb_words[5],
5356 mqe->un.mb_words[6], mqe->un.mb_words[7],
5357 mqe->un.mb_words[8], mqe->un.mb_words[9],
5358 mqe->un.mb_words[10], mqe->un.mb_words[11],
5359 mqe->un.mb_words[12], mqe->un.mb_words[13],
5360 mqe->un.mb_words[14], mqe->un.mb_words[15],
5361 mqe->un.mb_words[16], mqe->un.mb_words[50],
5362 mboxq->mcqe.word0,
5363 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5364 mboxq->mcqe.trailer);
5365
5366 if (rc) {
5367 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5368 kfree(mp);
5369 rc = -EIO;
5370 goto out_free_mboxq;
5371 }
5372 data_length = mqe->un.mb_words[5];
5373 if (data_length > DMP_RGN23_SIZE) {
5374 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5375 kfree(mp);
5376 rc = -EIO;
5377 goto out_free_mboxq;
5378 }
5379
5380 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5381 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5382 kfree(mp);
5383 rc = 0;
5384
5385 out_free_mboxq:
5386 mempool_free(mboxq, phba->mbox_mem_pool);
5387 return rc;
5388 }
5389
5390 /**
5391 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5392 * @phba: pointer to lpfc hba data structure.
5393 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5394 * @vpd: pointer to the memory to hold resulting port vpd data.
5395 * @vpd_size: On input, the number of bytes allocated to @vpd.
5396 * On output, the number of data bytes in @vpd.
5397 *
5398 * This routine executes a READ_REV SLI4 mailbox command. In
5399 * addition, this routine gets the port vpd data.
5400 *
5401 * Return codes
5402 * 0 - successful
5403 * -ENOMEM - could not allocated memory.
5404 **/
5405 static int
lpfc_sli4_read_rev(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq,uint8_t * vpd,uint32_t * vpd_size)5406 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5407 uint8_t *vpd, uint32_t *vpd_size)
5408 {
5409 int rc = 0;
5410 uint32_t dma_size;
5411 struct lpfc_dmabuf *dmabuf;
5412 struct lpfc_mqe *mqe;
5413
5414 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5415 if (!dmabuf)
5416 return -ENOMEM;
5417
5418 /*
5419 * Get a DMA buffer for the vpd data resulting from the READ_REV
5420 * mailbox command.
5421 */
5422 dma_size = *vpd_size;
5423 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5424 &dmabuf->phys, GFP_KERNEL);
5425 if (!dmabuf->virt) {
5426 kfree(dmabuf);
5427 return -ENOMEM;
5428 }
5429
5430 /*
5431 * The SLI4 implementation of READ_REV conflicts at word1,
5432 * bits 31:16 and SLI4 adds vpd functionality not present
5433 * in SLI3. This code corrects the conflicts.
5434 */
5435 lpfc_read_rev(phba, mboxq);
5436 mqe = &mboxq->u.mqe;
5437 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5438 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5439 mqe->un.read_rev.word1 &= 0x0000FFFF;
5440 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5441 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5442
5443 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5444 if (rc) {
5445 dma_free_coherent(&phba->pcidev->dev, dma_size,
5446 dmabuf->virt, dmabuf->phys);
5447 kfree(dmabuf);
5448 return -EIO;
5449 }
5450
5451 /*
5452 * The available vpd length cannot be bigger than the
5453 * DMA buffer passed to the port. Catch the less than
5454 * case and update the caller's size.
5455 */
5456 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5457 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5458
5459 memcpy(vpd, dmabuf->virt, *vpd_size);
5460
5461 dma_free_coherent(&phba->pcidev->dev, dma_size,
5462 dmabuf->virt, dmabuf->phys);
5463 kfree(dmabuf);
5464 return 0;
5465 }
5466
5467 /**
5468 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5469 * @phba: pointer to lpfc hba data structure.
5470 *
5471 * This routine retrieves SLI4 device physical port name this PCI function
5472 * is attached to.
5473 *
5474 * Return codes
5475 * 0 - successful
5476 * otherwise - failed to retrieve controller attributes
5477 **/
5478 static int
lpfc_sli4_get_ctl_attr(struct lpfc_hba * phba)5479 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5480 {
5481 LPFC_MBOXQ_t *mboxq;
5482 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5483 struct lpfc_controller_attribute *cntl_attr;
5484 void *virtaddr = NULL;
5485 uint32_t alloclen, reqlen;
5486 uint32_t shdr_status, shdr_add_status;
5487 union lpfc_sli4_cfg_shdr *shdr;
5488 int rc;
5489
5490 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5491 if (!mboxq)
5492 return -ENOMEM;
5493
5494 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5495 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5496 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5497 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5498 LPFC_SLI4_MBX_NEMBED);
5499
5500 if (alloclen < reqlen) {
5501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5502 "3084 Allocated DMA memory size (%d) is "
5503 "less than the requested DMA memory size "
5504 "(%d)\n", alloclen, reqlen);
5505 rc = -ENOMEM;
5506 goto out_free_mboxq;
5507 }
5508 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5509 virtaddr = mboxq->sge_array->addr[0];
5510 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5511 shdr = &mbx_cntl_attr->cfg_shdr;
5512 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5513 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5514 if (shdr_status || shdr_add_status || rc) {
5515 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5516 "3085 Mailbox x%x (x%x/x%x) failed, "
5517 "rc:x%x, status:x%x, add_status:x%x\n",
5518 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5519 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5520 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5521 rc, shdr_status, shdr_add_status);
5522 rc = -ENXIO;
5523 goto out_free_mboxq;
5524 }
5525
5526 cntl_attr = &mbx_cntl_attr->cntl_attr;
5527 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5528 phba->sli4_hba.lnk_info.lnk_tp =
5529 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5530 phba->sli4_hba.lnk_info.lnk_no =
5531 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5532
5533 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5534 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5535 sizeof(phba->BIOSVersion));
5536
5537 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5538 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5539 phba->sli4_hba.lnk_info.lnk_tp,
5540 phba->sli4_hba.lnk_info.lnk_no,
5541 phba->BIOSVersion);
5542 out_free_mboxq:
5543 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5544 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5545 else
5546 mempool_free(mboxq, phba->mbox_mem_pool);
5547 return rc;
5548 }
5549
5550 /**
5551 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5552 * @phba: pointer to lpfc hba data structure.
5553 *
5554 * This routine retrieves SLI4 device physical port name this PCI function
5555 * is attached to.
5556 *
5557 * Return codes
5558 * 0 - successful
5559 * otherwise - failed to retrieve physical port name
5560 **/
5561 static int
lpfc_sli4_retrieve_pport_name(struct lpfc_hba * phba)5562 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5563 {
5564 LPFC_MBOXQ_t *mboxq;
5565 struct lpfc_mbx_get_port_name *get_port_name;
5566 uint32_t shdr_status, shdr_add_status;
5567 union lpfc_sli4_cfg_shdr *shdr;
5568 char cport_name = 0;
5569 int rc;
5570
5571 /* We assume nothing at this point */
5572 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5573 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5574
5575 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5576 if (!mboxq)
5577 return -ENOMEM;
5578 /* obtain link type and link number via READ_CONFIG */
5579 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5580 lpfc_sli4_read_config(phba);
5581 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5582 goto retrieve_ppname;
5583
5584 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5585 rc = lpfc_sli4_get_ctl_attr(phba);
5586 if (rc)
5587 goto out_free_mboxq;
5588
5589 retrieve_ppname:
5590 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5591 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5592 sizeof(struct lpfc_mbx_get_port_name) -
5593 sizeof(struct lpfc_sli4_cfg_mhdr),
5594 LPFC_SLI4_MBX_EMBED);
5595 get_port_name = &mboxq->u.mqe.un.get_port_name;
5596 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5597 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5598 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5599 phba->sli4_hba.lnk_info.lnk_tp);
5600 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5601 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5602 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5603 if (shdr_status || shdr_add_status || rc) {
5604 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5605 "3087 Mailbox x%x (x%x/x%x) failed: "
5606 "rc:x%x, status:x%x, add_status:x%x\n",
5607 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5608 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5609 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5610 rc, shdr_status, shdr_add_status);
5611 rc = -ENXIO;
5612 goto out_free_mboxq;
5613 }
5614 switch (phba->sli4_hba.lnk_info.lnk_no) {
5615 case LPFC_LINK_NUMBER_0:
5616 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5617 &get_port_name->u.response);
5618 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5619 break;
5620 case LPFC_LINK_NUMBER_1:
5621 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5622 &get_port_name->u.response);
5623 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5624 break;
5625 case LPFC_LINK_NUMBER_2:
5626 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5627 &get_port_name->u.response);
5628 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5629 break;
5630 case LPFC_LINK_NUMBER_3:
5631 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5632 &get_port_name->u.response);
5633 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5634 break;
5635 default:
5636 break;
5637 }
5638
5639 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5640 phba->Port[0] = cport_name;
5641 phba->Port[1] = '\0';
5642 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5643 "3091 SLI get port name: %s\n", phba->Port);
5644 }
5645
5646 out_free_mboxq:
5647 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5648 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5649 else
5650 mempool_free(mboxq, phba->mbox_mem_pool);
5651 return rc;
5652 }
5653
5654 /**
5655 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5656 * @phba: pointer to lpfc hba data structure.
5657 *
5658 * This routine is called to explicitly arm the SLI4 device's completion and
5659 * event queues
5660 **/
5661 static void
lpfc_sli4_arm_cqeq_intr(struct lpfc_hba * phba)5662 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5663 {
5664 int qidx;
5665 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5666 struct lpfc_sli4_hdw_queue *qp;
5667 struct lpfc_queue *eq;
5668
5669 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5670 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5671 if (sli4_hba->nvmels_cq)
5672 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5673 LPFC_QUEUE_REARM);
5674
5675 if (sli4_hba->hdwq) {
5676 /* Loop thru all Hardware Queues */
5677 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5678 qp = &sli4_hba->hdwq[qidx];
5679 /* ARM the corresponding CQ */
5680 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
5681 LPFC_QUEUE_REARM);
5682 }
5683
5684 /* Loop thru all IRQ vectors */
5685 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5686 eq = sli4_hba->hba_eq_hdl[qidx].eq;
5687 /* ARM the corresponding EQ */
5688 sli4_hba->sli4_write_eq_db(phba, eq,
5689 0, LPFC_QUEUE_REARM);
5690 }
5691 }
5692
5693 if (phba->nvmet_support) {
5694 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5695 sli4_hba->sli4_write_cq_db(phba,
5696 sli4_hba->nvmet_cqset[qidx], 0,
5697 LPFC_QUEUE_REARM);
5698 }
5699 }
5700 }
5701
5702 /**
5703 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5704 * @phba: Pointer to HBA context object.
5705 * @type: The resource extent type.
5706 * @extnt_count: buffer to hold port available extent count.
5707 * @extnt_size: buffer to hold element count per extent.
5708 *
5709 * This function calls the port and retrievs the number of available
5710 * extents and their size for a particular extent type.
5711 *
5712 * Returns: 0 if successful. Nonzero otherwise.
5713 **/
5714 int
lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba * phba,uint16_t type,uint16_t * extnt_count,uint16_t * extnt_size)5715 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5716 uint16_t *extnt_count, uint16_t *extnt_size)
5717 {
5718 int rc = 0;
5719 uint32_t length;
5720 uint32_t mbox_tmo;
5721 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5722 LPFC_MBOXQ_t *mbox;
5723
5724 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5725 if (!mbox)
5726 return -ENOMEM;
5727
5728 /* Find out how many extents are available for this resource type */
5729 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5730 sizeof(struct lpfc_sli4_cfg_mhdr));
5731 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5732 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5733 length, LPFC_SLI4_MBX_EMBED);
5734
5735 /* Send an extents count of 0 - the GET doesn't use it. */
5736 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5737 LPFC_SLI4_MBX_EMBED);
5738 if (unlikely(rc)) {
5739 rc = -EIO;
5740 goto err_exit;
5741 }
5742
5743 if (!phba->sli4_hba.intr_enable)
5744 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5745 else {
5746 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5747 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5748 }
5749 if (unlikely(rc)) {
5750 rc = -EIO;
5751 goto err_exit;
5752 }
5753
5754 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5755 if (bf_get(lpfc_mbox_hdr_status,
5756 &rsrc_info->header.cfg_shdr.response)) {
5757 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5758 "2930 Failed to get resource extents "
5759 "Status 0x%x Add'l Status 0x%x\n",
5760 bf_get(lpfc_mbox_hdr_status,
5761 &rsrc_info->header.cfg_shdr.response),
5762 bf_get(lpfc_mbox_hdr_add_status,
5763 &rsrc_info->header.cfg_shdr.response));
5764 rc = -EIO;
5765 goto err_exit;
5766 }
5767
5768 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5769 &rsrc_info->u.rsp);
5770 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5771 &rsrc_info->u.rsp);
5772
5773 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5774 "3162 Retrieved extents type-%d from port: count:%d, "
5775 "size:%d\n", type, *extnt_count, *extnt_size);
5776
5777 err_exit:
5778 mempool_free(mbox, phba->mbox_mem_pool);
5779 return rc;
5780 }
5781
5782 /**
5783 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5784 * @phba: Pointer to HBA context object.
5785 * @type: The extent type to check.
5786 *
5787 * This function reads the current available extents from the port and checks
5788 * if the extent count or extent size has changed since the last access.
5789 * Callers use this routine post port reset to understand if there is a
5790 * extent reprovisioning requirement.
5791 *
5792 * Returns:
5793 * -Error: error indicates problem.
5794 * 1: Extent count or size has changed.
5795 * 0: No changes.
5796 **/
5797 static int
lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba * phba,uint16_t type)5798 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5799 {
5800 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5801 uint16_t size_diff, rsrc_ext_size;
5802 int rc = 0;
5803 struct lpfc_rsrc_blks *rsrc_entry;
5804 struct list_head *rsrc_blk_list = NULL;
5805
5806 size_diff = 0;
5807 curr_ext_cnt = 0;
5808 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5809 &rsrc_ext_cnt,
5810 &rsrc_ext_size);
5811 if (unlikely(rc))
5812 return -EIO;
5813
5814 switch (type) {
5815 case LPFC_RSC_TYPE_FCOE_RPI:
5816 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5817 break;
5818 case LPFC_RSC_TYPE_FCOE_VPI:
5819 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5820 break;
5821 case LPFC_RSC_TYPE_FCOE_XRI:
5822 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5823 break;
5824 case LPFC_RSC_TYPE_FCOE_VFI:
5825 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5826 break;
5827 default:
5828 break;
5829 }
5830
5831 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5832 curr_ext_cnt++;
5833 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5834 size_diff++;
5835 }
5836
5837 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5838 rc = 1;
5839
5840 return rc;
5841 }
5842
5843 /**
5844 * lpfc_sli4_cfg_post_extnts -
5845 * @phba: Pointer to HBA context object.
5846 * @extnt_cnt: number of available extents.
5847 * @type: the extent type (rpi, xri, vfi, vpi).
5848 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5849 * @mbox: pointer to the caller's allocated mailbox structure.
5850 *
5851 * This function executes the extents allocation request. It also
5852 * takes care of the amount of memory needed to allocate or get the
5853 * allocated extents. It is the caller's responsibility to evaluate
5854 * the response.
5855 *
5856 * Returns:
5857 * -Error: Error value describes the condition found.
5858 * 0: if successful
5859 **/
5860 static int
lpfc_sli4_cfg_post_extnts(struct lpfc_hba * phba,uint16_t extnt_cnt,uint16_t type,bool * emb,LPFC_MBOXQ_t * mbox)5861 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5862 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5863 {
5864 int rc = 0;
5865 uint32_t req_len;
5866 uint32_t emb_len;
5867 uint32_t alloc_len, mbox_tmo;
5868
5869 /* Calculate the total requested length of the dma memory */
5870 req_len = extnt_cnt * sizeof(uint16_t);
5871
5872 /*
5873 * Calculate the size of an embedded mailbox. The uint32_t
5874 * accounts for extents-specific word.
5875 */
5876 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5877 sizeof(uint32_t);
5878
5879 /*
5880 * Presume the allocation and response will fit into an embedded
5881 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5882 */
5883 *emb = LPFC_SLI4_MBX_EMBED;
5884 if (req_len > emb_len) {
5885 req_len = extnt_cnt * sizeof(uint16_t) +
5886 sizeof(union lpfc_sli4_cfg_shdr) +
5887 sizeof(uint32_t);
5888 *emb = LPFC_SLI4_MBX_NEMBED;
5889 }
5890
5891 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5892 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5893 req_len, *emb);
5894 if (alloc_len < req_len) {
5895 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5896 "2982 Allocated DMA memory size (x%x) is "
5897 "less than the requested DMA memory "
5898 "size (x%x)\n", alloc_len, req_len);
5899 return -ENOMEM;
5900 }
5901 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5902 if (unlikely(rc))
5903 return -EIO;
5904
5905 if (!phba->sli4_hba.intr_enable)
5906 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5907 else {
5908 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5909 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5910 }
5911
5912 if (unlikely(rc))
5913 rc = -EIO;
5914 return rc;
5915 }
5916
5917 /**
5918 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5919 * @phba: Pointer to HBA context object.
5920 * @type: The resource extent type to allocate.
5921 *
5922 * This function allocates the number of elements for the specified
5923 * resource type.
5924 **/
5925 static int
lpfc_sli4_alloc_extent(struct lpfc_hba * phba,uint16_t type)5926 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5927 {
5928 bool emb = false;
5929 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5930 uint16_t rsrc_id, rsrc_start, j, k;
5931 uint16_t *ids;
5932 int i, rc;
5933 unsigned long longs;
5934 unsigned long *bmask;
5935 struct lpfc_rsrc_blks *rsrc_blks;
5936 LPFC_MBOXQ_t *mbox;
5937 uint32_t length;
5938 struct lpfc_id_range *id_array = NULL;
5939 void *virtaddr = NULL;
5940 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5941 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5942 struct list_head *ext_blk_list;
5943
5944 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5945 &rsrc_cnt,
5946 &rsrc_size);
5947 if (unlikely(rc))
5948 return -EIO;
5949
5950 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5951 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5952 "3009 No available Resource Extents "
5953 "for resource type 0x%x: Count: 0x%x, "
5954 "Size 0x%x\n", type, rsrc_cnt,
5955 rsrc_size);
5956 return -ENOMEM;
5957 }
5958
5959 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5960 "2903 Post resource extents type-0x%x: "
5961 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5962
5963 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5964 if (!mbox)
5965 return -ENOMEM;
5966
5967 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5968 if (unlikely(rc)) {
5969 rc = -EIO;
5970 goto err_exit;
5971 }
5972
5973 /*
5974 * Figure out where the response is located. Then get local pointers
5975 * to the response data. The port does not guarantee to respond to
5976 * all extents counts request so update the local variable with the
5977 * allocated count from the port.
5978 */
5979 if (emb == LPFC_SLI4_MBX_EMBED) {
5980 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5981 id_array = &rsrc_ext->u.rsp.id[0];
5982 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5983 } else {
5984 virtaddr = mbox->sge_array->addr[0];
5985 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5986 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5987 id_array = &n_rsrc->id;
5988 }
5989
5990 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5991 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5992
5993 /*
5994 * Based on the resource size and count, correct the base and max
5995 * resource values.
5996 */
5997 length = sizeof(struct lpfc_rsrc_blks);
5998 switch (type) {
5999 case LPFC_RSC_TYPE_FCOE_RPI:
6000 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6001 sizeof(unsigned long),
6002 GFP_KERNEL);
6003 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6004 rc = -ENOMEM;
6005 goto err_exit;
6006 }
6007 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6008 sizeof(uint16_t),
6009 GFP_KERNEL);
6010 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6011 kfree(phba->sli4_hba.rpi_bmask);
6012 rc = -ENOMEM;
6013 goto err_exit;
6014 }
6015
6016 /*
6017 * The next_rpi was initialized with the maximum available
6018 * count but the port may allocate a smaller number. Catch
6019 * that case and update the next_rpi.
6020 */
6021 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6022
6023 /* Initialize local ptrs for common extent processing later. */
6024 bmask = phba->sli4_hba.rpi_bmask;
6025 ids = phba->sli4_hba.rpi_ids;
6026 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6027 break;
6028 case LPFC_RSC_TYPE_FCOE_VPI:
6029 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6030 GFP_KERNEL);
6031 if (unlikely(!phba->vpi_bmask)) {
6032 rc = -ENOMEM;
6033 goto err_exit;
6034 }
6035 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6036 GFP_KERNEL);
6037 if (unlikely(!phba->vpi_ids)) {
6038 kfree(phba->vpi_bmask);
6039 rc = -ENOMEM;
6040 goto err_exit;
6041 }
6042
6043 /* Initialize local ptrs for common extent processing later. */
6044 bmask = phba->vpi_bmask;
6045 ids = phba->vpi_ids;
6046 ext_blk_list = &phba->lpfc_vpi_blk_list;
6047 break;
6048 case LPFC_RSC_TYPE_FCOE_XRI:
6049 phba->sli4_hba.xri_bmask = kcalloc(longs,
6050 sizeof(unsigned long),
6051 GFP_KERNEL);
6052 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6053 rc = -ENOMEM;
6054 goto err_exit;
6055 }
6056 phba->sli4_hba.max_cfg_param.xri_used = 0;
6057 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6058 sizeof(uint16_t),
6059 GFP_KERNEL);
6060 if (unlikely(!phba->sli4_hba.xri_ids)) {
6061 kfree(phba->sli4_hba.xri_bmask);
6062 rc = -ENOMEM;
6063 goto err_exit;
6064 }
6065
6066 /* Initialize local ptrs for common extent processing later. */
6067 bmask = phba->sli4_hba.xri_bmask;
6068 ids = phba->sli4_hba.xri_ids;
6069 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6070 break;
6071 case LPFC_RSC_TYPE_FCOE_VFI:
6072 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6073 sizeof(unsigned long),
6074 GFP_KERNEL);
6075 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6076 rc = -ENOMEM;
6077 goto err_exit;
6078 }
6079 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6080 sizeof(uint16_t),
6081 GFP_KERNEL);
6082 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6083 kfree(phba->sli4_hba.vfi_bmask);
6084 rc = -ENOMEM;
6085 goto err_exit;
6086 }
6087
6088 /* Initialize local ptrs for common extent processing later. */
6089 bmask = phba->sli4_hba.vfi_bmask;
6090 ids = phba->sli4_hba.vfi_ids;
6091 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6092 break;
6093 default:
6094 /* Unsupported Opcode. Fail call. */
6095 id_array = NULL;
6096 bmask = NULL;
6097 ids = NULL;
6098 ext_blk_list = NULL;
6099 goto err_exit;
6100 }
6101
6102 /*
6103 * Complete initializing the extent configuration with the
6104 * allocated ids assigned to this function. The bitmask serves
6105 * as an index into the array and manages the available ids. The
6106 * array just stores the ids communicated to the port via the wqes.
6107 */
6108 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6109 if ((i % 2) == 0)
6110 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6111 &id_array[k]);
6112 else
6113 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6114 &id_array[k]);
6115
6116 rsrc_blks = kzalloc(length, GFP_KERNEL);
6117 if (unlikely(!rsrc_blks)) {
6118 rc = -ENOMEM;
6119 kfree(bmask);
6120 kfree(ids);
6121 goto err_exit;
6122 }
6123 rsrc_blks->rsrc_start = rsrc_id;
6124 rsrc_blks->rsrc_size = rsrc_size;
6125 list_add_tail(&rsrc_blks->list, ext_blk_list);
6126 rsrc_start = rsrc_id;
6127 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6128 phba->sli4_hba.io_xri_start = rsrc_start +
6129 lpfc_sli4_get_iocb_cnt(phba);
6130 }
6131
6132 while (rsrc_id < (rsrc_start + rsrc_size)) {
6133 ids[j] = rsrc_id;
6134 rsrc_id++;
6135 j++;
6136 }
6137 /* Entire word processed. Get next word.*/
6138 if ((i % 2) == 1)
6139 k++;
6140 }
6141 err_exit:
6142 lpfc_sli4_mbox_cmd_free(phba, mbox);
6143 return rc;
6144 }
6145
6146
6147
6148 /**
6149 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6150 * @phba: Pointer to HBA context object.
6151 * @type: the extent's type.
6152 *
6153 * This function deallocates all extents of a particular resource type.
6154 * SLI4 does not allow for deallocating a particular extent range. It
6155 * is the caller's responsibility to release all kernel memory resources.
6156 **/
6157 static int
lpfc_sli4_dealloc_extent(struct lpfc_hba * phba,uint16_t type)6158 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6159 {
6160 int rc;
6161 uint32_t length, mbox_tmo = 0;
6162 LPFC_MBOXQ_t *mbox;
6163 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6164 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6165
6166 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6167 if (!mbox)
6168 return -ENOMEM;
6169
6170 /*
6171 * This function sends an embedded mailbox because it only sends the
6172 * the resource type. All extents of this type are released by the
6173 * port.
6174 */
6175 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6176 sizeof(struct lpfc_sli4_cfg_mhdr));
6177 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6178 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6179 length, LPFC_SLI4_MBX_EMBED);
6180
6181 /* Send an extents count of 0 - the dealloc doesn't use it. */
6182 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6183 LPFC_SLI4_MBX_EMBED);
6184 if (unlikely(rc)) {
6185 rc = -EIO;
6186 goto out_free_mbox;
6187 }
6188 if (!phba->sli4_hba.intr_enable)
6189 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6190 else {
6191 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6192 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6193 }
6194 if (unlikely(rc)) {
6195 rc = -EIO;
6196 goto out_free_mbox;
6197 }
6198
6199 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6200 if (bf_get(lpfc_mbox_hdr_status,
6201 &dealloc_rsrc->header.cfg_shdr.response)) {
6202 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6203 "2919 Failed to release resource extents "
6204 "for type %d - Status 0x%x Add'l Status 0x%x. "
6205 "Resource memory not released.\n",
6206 type,
6207 bf_get(lpfc_mbox_hdr_status,
6208 &dealloc_rsrc->header.cfg_shdr.response),
6209 bf_get(lpfc_mbox_hdr_add_status,
6210 &dealloc_rsrc->header.cfg_shdr.response));
6211 rc = -EIO;
6212 goto out_free_mbox;
6213 }
6214
6215 /* Release kernel memory resources for the specific type. */
6216 switch (type) {
6217 case LPFC_RSC_TYPE_FCOE_VPI:
6218 kfree(phba->vpi_bmask);
6219 kfree(phba->vpi_ids);
6220 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6221 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6222 &phba->lpfc_vpi_blk_list, list) {
6223 list_del_init(&rsrc_blk->list);
6224 kfree(rsrc_blk);
6225 }
6226 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6227 break;
6228 case LPFC_RSC_TYPE_FCOE_XRI:
6229 kfree(phba->sli4_hba.xri_bmask);
6230 kfree(phba->sli4_hba.xri_ids);
6231 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6232 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6233 list_del_init(&rsrc_blk->list);
6234 kfree(rsrc_blk);
6235 }
6236 break;
6237 case LPFC_RSC_TYPE_FCOE_VFI:
6238 kfree(phba->sli4_hba.vfi_bmask);
6239 kfree(phba->sli4_hba.vfi_ids);
6240 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6241 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6242 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6243 list_del_init(&rsrc_blk->list);
6244 kfree(rsrc_blk);
6245 }
6246 break;
6247 case LPFC_RSC_TYPE_FCOE_RPI:
6248 /* RPI bitmask and physical id array are cleaned up earlier. */
6249 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6250 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6251 list_del_init(&rsrc_blk->list);
6252 kfree(rsrc_blk);
6253 }
6254 break;
6255 default:
6256 break;
6257 }
6258
6259 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6260
6261 out_free_mbox:
6262 mempool_free(mbox, phba->mbox_mem_pool);
6263 return rc;
6264 }
6265
6266 static void
lpfc_set_features(struct lpfc_hba * phba,LPFC_MBOXQ_t * mbox,uint32_t feature)6267 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6268 uint32_t feature)
6269 {
6270 uint32_t len;
6271
6272 len = sizeof(struct lpfc_mbx_set_feature) -
6273 sizeof(struct lpfc_sli4_cfg_mhdr);
6274 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6275 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6276 LPFC_SLI4_MBX_EMBED);
6277
6278 switch (feature) {
6279 case LPFC_SET_UE_RECOVERY:
6280 bf_set(lpfc_mbx_set_feature_UER,
6281 &mbox->u.mqe.un.set_feature, 1);
6282 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6283 mbox->u.mqe.un.set_feature.param_len = 8;
6284 break;
6285 case LPFC_SET_MDS_DIAGS:
6286 bf_set(lpfc_mbx_set_feature_mds,
6287 &mbox->u.mqe.un.set_feature, 1);
6288 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6289 &mbox->u.mqe.un.set_feature, 1);
6290 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6291 mbox->u.mqe.un.set_feature.param_len = 8;
6292 break;
6293 case LPFC_SET_DUAL_DUMP:
6294 bf_set(lpfc_mbx_set_feature_dd,
6295 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6296 bf_set(lpfc_mbx_set_feature_ddquery,
6297 &mbox->u.mqe.un.set_feature, 0);
6298 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6299 mbox->u.mqe.un.set_feature.param_len = 4;
6300 break;
6301 }
6302
6303 return;
6304 }
6305
6306 /**
6307 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6308 * @phba: Pointer to HBA context object.
6309 *
6310 * Disable FW logging into host memory on the adapter. To
6311 * be done before reading logs from the host memory.
6312 **/
6313 void
lpfc_ras_stop_fwlog(struct lpfc_hba * phba)6314 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6315 {
6316 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6317
6318 spin_lock_irq(&phba->hbalock);
6319 ras_fwlog->state = INACTIVE;
6320 spin_unlock_irq(&phba->hbalock);
6321
6322 /* Disable FW logging to host memory */
6323 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6324 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6325
6326 /* Wait 10ms for firmware to stop using DMA buffer */
6327 usleep_range(10 * 1000, 20 * 1000);
6328 }
6329
6330 /**
6331 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6332 * @phba: Pointer to HBA context object.
6333 *
6334 * This function is called to free memory allocated for RAS FW logging
6335 * support in the driver.
6336 **/
6337 void
lpfc_sli4_ras_dma_free(struct lpfc_hba * phba)6338 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6339 {
6340 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6341 struct lpfc_dmabuf *dmabuf, *next;
6342
6343 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6344 list_for_each_entry_safe(dmabuf, next,
6345 &ras_fwlog->fwlog_buff_list,
6346 list) {
6347 list_del(&dmabuf->list);
6348 dma_free_coherent(&phba->pcidev->dev,
6349 LPFC_RAS_MAX_ENTRY_SIZE,
6350 dmabuf->virt, dmabuf->phys);
6351 kfree(dmabuf);
6352 }
6353 }
6354
6355 if (ras_fwlog->lwpd.virt) {
6356 dma_free_coherent(&phba->pcidev->dev,
6357 sizeof(uint32_t) * 2,
6358 ras_fwlog->lwpd.virt,
6359 ras_fwlog->lwpd.phys);
6360 ras_fwlog->lwpd.virt = NULL;
6361 }
6362
6363 spin_lock_irq(&phba->hbalock);
6364 ras_fwlog->state = INACTIVE;
6365 spin_unlock_irq(&phba->hbalock);
6366 }
6367
6368 /**
6369 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6370 * @phba: Pointer to HBA context object.
6371 * @fwlog_buff_count: Count of buffers to be created.
6372 *
6373 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6374 * to update FW log is posted to the adapter.
6375 * Buffer count is calculated based on module param ras_fwlog_buffsize
6376 * Size of each buffer posted to FW is 64K.
6377 **/
6378
6379 static int
lpfc_sli4_ras_dma_alloc(struct lpfc_hba * phba,uint32_t fwlog_buff_count)6380 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6381 uint32_t fwlog_buff_count)
6382 {
6383 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6384 struct lpfc_dmabuf *dmabuf;
6385 int rc = 0, i = 0;
6386
6387 /* Initialize List */
6388 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6389
6390 /* Allocate memory for the LWPD */
6391 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6392 sizeof(uint32_t) * 2,
6393 &ras_fwlog->lwpd.phys,
6394 GFP_KERNEL);
6395 if (!ras_fwlog->lwpd.virt) {
6396 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6397 "6185 LWPD Memory Alloc Failed\n");
6398
6399 return -ENOMEM;
6400 }
6401
6402 ras_fwlog->fw_buffcount = fwlog_buff_count;
6403 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6404 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6405 GFP_KERNEL);
6406 if (!dmabuf) {
6407 rc = -ENOMEM;
6408 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6409 "6186 Memory Alloc failed FW logging");
6410 goto free_mem;
6411 }
6412
6413 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6414 LPFC_RAS_MAX_ENTRY_SIZE,
6415 &dmabuf->phys, GFP_KERNEL);
6416 if (!dmabuf->virt) {
6417 kfree(dmabuf);
6418 rc = -ENOMEM;
6419 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6420 "6187 DMA Alloc Failed FW logging");
6421 goto free_mem;
6422 }
6423 dmabuf->buffer_tag = i;
6424 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6425 }
6426
6427 free_mem:
6428 if (rc)
6429 lpfc_sli4_ras_dma_free(phba);
6430
6431 return rc;
6432 }
6433
6434 /**
6435 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6436 * @phba: pointer to lpfc hba data structure.
6437 * @pmb: pointer to the driver internal queue element for mailbox command.
6438 *
6439 * Completion handler for driver's RAS MBX command to the device.
6440 **/
6441 static void
lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)6442 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6443 {
6444 MAILBOX_t *mb;
6445 union lpfc_sli4_cfg_shdr *shdr;
6446 uint32_t shdr_status, shdr_add_status;
6447 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6448
6449 mb = &pmb->u.mb;
6450
6451 shdr = (union lpfc_sli4_cfg_shdr *)
6452 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6453 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6454 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6455
6456 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6457 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6458 "6188 FW LOG mailbox "
6459 "completed with status x%x add_status x%x,"
6460 " mbx status x%x\n",
6461 shdr_status, shdr_add_status, mb->mbxStatus);
6462
6463 ras_fwlog->ras_hwsupport = false;
6464 goto disable_ras;
6465 }
6466
6467 spin_lock_irq(&phba->hbalock);
6468 ras_fwlog->state = ACTIVE;
6469 spin_unlock_irq(&phba->hbalock);
6470 mempool_free(pmb, phba->mbox_mem_pool);
6471
6472 return;
6473
6474 disable_ras:
6475 /* Free RAS DMA memory */
6476 lpfc_sli4_ras_dma_free(phba);
6477 mempool_free(pmb, phba->mbox_mem_pool);
6478 }
6479
6480 /**
6481 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6482 * @phba: pointer to lpfc hba data structure.
6483 * @fwlog_level: Logging verbosity level.
6484 * @fwlog_enable: Enable/Disable logging.
6485 *
6486 * Initialize memory and post mailbox command to enable FW logging in host
6487 * memory.
6488 **/
6489 int
lpfc_sli4_ras_fwlog_init(struct lpfc_hba * phba,uint32_t fwlog_level,uint32_t fwlog_enable)6490 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6491 uint32_t fwlog_level,
6492 uint32_t fwlog_enable)
6493 {
6494 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6495 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6496 struct lpfc_dmabuf *dmabuf;
6497 LPFC_MBOXQ_t *mbox;
6498 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6499 int rc = 0;
6500
6501 spin_lock_irq(&phba->hbalock);
6502 ras_fwlog->state = INACTIVE;
6503 spin_unlock_irq(&phba->hbalock);
6504
6505 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6506 phba->cfg_ras_fwlog_buffsize);
6507 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6508
6509 /*
6510 * If re-enabling FW logging support use earlier allocated
6511 * DMA buffers while posting MBX command.
6512 **/
6513 if (!ras_fwlog->lwpd.virt) {
6514 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6515 if (rc) {
6516 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6517 "6189 FW Log Memory Allocation Failed");
6518 return rc;
6519 }
6520 }
6521
6522 /* Setup Mailbox command */
6523 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6524 if (!mbox) {
6525 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6526 "6190 RAS MBX Alloc Failed");
6527 rc = -ENOMEM;
6528 goto mem_free;
6529 }
6530
6531 ras_fwlog->fw_loglevel = fwlog_level;
6532 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6533 sizeof(struct lpfc_sli4_cfg_mhdr));
6534
6535 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6536 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6537 len, LPFC_SLI4_MBX_EMBED);
6538
6539 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6540 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6541 fwlog_enable);
6542 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6543 ras_fwlog->fw_loglevel);
6544 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6545 ras_fwlog->fw_buffcount);
6546 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6547 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6548
6549 /* Update DMA buffer address */
6550 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6551 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6552
6553 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6554 putPaddrLow(dmabuf->phys);
6555
6556 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6557 putPaddrHigh(dmabuf->phys);
6558 }
6559
6560 /* Update LPWD address */
6561 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6562 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6563
6564 spin_lock_irq(&phba->hbalock);
6565 ras_fwlog->state = REG_INPROGRESS;
6566 spin_unlock_irq(&phba->hbalock);
6567 mbox->vport = phba->pport;
6568 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6569
6570 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6571
6572 if (rc == MBX_NOT_FINISHED) {
6573 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6574 "6191 FW-Log Mailbox failed. "
6575 "status %d mbxStatus : x%x", rc,
6576 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6577 mempool_free(mbox, phba->mbox_mem_pool);
6578 rc = -EIO;
6579 goto mem_free;
6580 } else
6581 rc = 0;
6582 mem_free:
6583 if (rc)
6584 lpfc_sli4_ras_dma_free(phba);
6585
6586 return rc;
6587 }
6588
6589 /**
6590 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6591 * @phba: Pointer to HBA context object.
6592 *
6593 * Check if RAS is supported on the adapter and initialize it.
6594 **/
6595 void
lpfc_sli4_ras_setup(struct lpfc_hba * phba)6596 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6597 {
6598 /* Check RAS FW Log needs to be enabled or not */
6599 if (lpfc_check_fwlog_support(phba))
6600 return;
6601
6602 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6603 LPFC_RAS_ENABLE_LOGGING);
6604 }
6605
6606 /**
6607 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6608 * @phba: Pointer to HBA context object.
6609 *
6610 * This function allocates all SLI4 resource identifiers.
6611 **/
6612 int
lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba * phba)6613 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6614 {
6615 int i, rc, error = 0;
6616 uint16_t count, base;
6617 unsigned long longs;
6618
6619 if (!phba->sli4_hba.rpi_hdrs_in_use)
6620 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6621 if (phba->sli4_hba.extents_in_use) {
6622 /*
6623 * The port supports resource extents. The XRI, VPI, VFI, RPI
6624 * resource extent count must be read and allocated before
6625 * provisioning the resource id arrays.
6626 */
6627 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6628 LPFC_IDX_RSRC_RDY) {
6629 /*
6630 * Extent-based resources are set - the driver could
6631 * be in a port reset. Figure out if any corrective
6632 * actions need to be taken.
6633 */
6634 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6635 LPFC_RSC_TYPE_FCOE_VFI);
6636 if (rc != 0)
6637 error++;
6638 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6639 LPFC_RSC_TYPE_FCOE_VPI);
6640 if (rc != 0)
6641 error++;
6642 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6643 LPFC_RSC_TYPE_FCOE_XRI);
6644 if (rc != 0)
6645 error++;
6646 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6647 LPFC_RSC_TYPE_FCOE_RPI);
6648 if (rc != 0)
6649 error++;
6650
6651 /*
6652 * It's possible that the number of resources
6653 * provided to this port instance changed between
6654 * resets. Detect this condition and reallocate
6655 * resources. Otherwise, there is no action.
6656 */
6657 if (error) {
6658 lpfc_printf_log(phba, KERN_INFO,
6659 LOG_MBOX | LOG_INIT,
6660 "2931 Detected extent resource "
6661 "change. Reallocating all "
6662 "extents.\n");
6663 rc = lpfc_sli4_dealloc_extent(phba,
6664 LPFC_RSC_TYPE_FCOE_VFI);
6665 rc = lpfc_sli4_dealloc_extent(phba,
6666 LPFC_RSC_TYPE_FCOE_VPI);
6667 rc = lpfc_sli4_dealloc_extent(phba,
6668 LPFC_RSC_TYPE_FCOE_XRI);
6669 rc = lpfc_sli4_dealloc_extent(phba,
6670 LPFC_RSC_TYPE_FCOE_RPI);
6671 } else
6672 return 0;
6673 }
6674
6675 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6676 if (unlikely(rc))
6677 goto err_exit;
6678
6679 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6680 if (unlikely(rc))
6681 goto err_exit;
6682
6683 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6684 if (unlikely(rc))
6685 goto err_exit;
6686
6687 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6688 if (unlikely(rc))
6689 goto err_exit;
6690 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6691 LPFC_IDX_RSRC_RDY);
6692 return rc;
6693 } else {
6694 /*
6695 * The port does not support resource extents. The XRI, VPI,
6696 * VFI, RPI resource ids were determined from READ_CONFIG.
6697 * Just allocate the bitmasks and provision the resource id
6698 * arrays. If a port reset is active, the resources don't
6699 * need any action - just exit.
6700 */
6701 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6702 LPFC_IDX_RSRC_RDY) {
6703 lpfc_sli4_dealloc_resource_identifiers(phba);
6704 lpfc_sli4_remove_rpis(phba);
6705 }
6706 /* RPIs. */
6707 count = phba->sli4_hba.max_cfg_param.max_rpi;
6708 if (count <= 0) {
6709 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6710 "3279 Invalid provisioning of "
6711 "rpi:%d\n", count);
6712 rc = -EINVAL;
6713 goto err_exit;
6714 }
6715 base = phba->sli4_hba.max_cfg_param.rpi_base;
6716 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6717 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6718 sizeof(unsigned long),
6719 GFP_KERNEL);
6720 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6721 rc = -ENOMEM;
6722 goto err_exit;
6723 }
6724 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6725 GFP_KERNEL);
6726 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6727 rc = -ENOMEM;
6728 goto free_rpi_bmask;
6729 }
6730
6731 for (i = 0; i < count; i++)
6732 phba->sli4_hba.rpi_ids[i] = base + i;
6733
6734 /* VPIs. */
6735 count = phba->sli4_hba.max_cfg_param.max_vpi;
6736 if (count <= 0) {
6737 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6738 "3280 Invalid provisioning of "
6739 "vpi:%d\n", count);
6740 rc = -EINVAL;
6741 goto free_rpi_ids;
6742 }
6743 base = phba->sli4_hba.max_cfg_param.vpi_base;
6744 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6745 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6746 GFP_KERNEL);
6747 if (unlikely(!phba->vpi_bmask)) {
6748 rc = -ENOMEM;
6749 goto free_rpi_ids;
6750 }
6751 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6752 GFP_KERNEL);
6753 if (unlikely(!phba->vpi_ids)) {
6754 rc = -ENOMEM;
6755 goto free_vpi_bmask;
6756 }
6757
6758 for (i = 0; i < count; i++)
6759 phba->vpi_ids[i] = base + i;
6760
6761 /* XRIs. */
6762 count = phba->sli4_hba.max_cfg_param.max_xri;
6763 if (count <= 0) {
6764 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6765 "3281 Invalid provisioning of "
6766 "xri:%d\n", count);
6767 rc = -EINVAL;
6768 goto free_vpi_ids;
6769 }
6770 base = phba->sli4_hba.max_cfg_param.xri_base;
6771 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6772 phba->sli4_hba.xri_bmask = kcalloc(longs,
6773 sizeof(unsigned long),
6774 GFP_KERNEL);
6775 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6776 rc = -ENOMEM;
6777 goto free_vpi_ids;
6778 }
6779 phba->sli4_hba.max_cfg_param.xri_used = 0;
6780 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6781 GFP_KERNEL);
6782 if (unlikely(!phba->sli4_hba.xri_ids)) {
6783 rc = -ENOMEM;
6784 goto free_xri_bmask;
6785 }
6786
6787 for (i = 0; i < count; i++)
6788 phba->sli4_hba.xri_ids[i] = base + i;
6789
6790 /* VFIs. */
6791 count = phba->sli4_hba.max_cfg_param.max_vfi;
6792 if (count <= 0) {
6793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6794 "3282 Invalid provisioning of "
6795 "vfi:%d\n", count);
6796 rc = -EINVAL;
6797 goto free_xri_ids;
6798 }
6799 base = phba->sli4_hba.max_cfg_param.vfi_base;
6800 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6801 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6802 sizeof(unsigned long),
6803 GFP_KERNEL);
6804 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6805 rc = -ENOMEM;
6806 goto free_xri_ids;
6807 }
6808 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6809 GFP_KERNEL);
6810 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6811 rc = -ENOMEM;
6812 goto free_vfi_bmask;
6813 }
6814
6815 for (i = 0; i < count; i++)
6816 phba->sli4_hba.vfi_ids[i] = base + i;
6817
6818 /*
6819 * Mark all resources ready. An HBA reset doesn't need
6820 * to reset the initialization.
6821 */
6822 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6823 LPFC_IDX_RSRC_RDY);
6824 return 0;
6825 }
6826
6827 free_vfi_bmask:
6828 kfree(phba->sli4_hba.vfi_bmask);
6829 phba->sli4_hba.vfi_bmask = NULL;
6830 free_xri_ids:
6831 kfree(phba->sli4_hba.xri_ids);
6832 phba->sli4_hba.xri_ids = NULL;
6833 free_xri_bmask:
6834 kfree(phba->sli4_hba.xri_bmask);
6835 phba->sli4_hba.xri_bmask = NULL;
6836 free_vpi_ids:
6837 kfree(phba->vpi_ids);
6838 phba->vpi_ids = NULL;
6839 free_vpi_bmask:
6840 kfree(phba->vpi_bmask);
6841 phba->vpi_bmask = NULL;
6842 free_rpi_ids:
6843 kfree(phba->sli4_hba.rpi_ids);
6844 phba->sli4_hba.rpi_ids = NULL;
6845 free_rpi_bmask:
6846 kfree(phba->sli4_hba.rpi_bmask);
6847 phba->sli4_hba.rpi_bmask = NULL;
6848 err_exit:
6849 return rc;
6850 }
6851
6852 /**
6853 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6854 * @phba: Pointer to HBA context object.
6855 *
6856 * This function allocates the number of elements for the specified
6857 * resource type.
6858 **/
6859 int
lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba * phba)6860 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6861 {
6862 if (phba->sli4_hba.extents_in_use) {
6863 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6864 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6865 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6866 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6867 } else {
6868 kfree(phba->vpi_bmask);
6869 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6870 kfree(phba->vpi_ids);
6871 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6872 kfree(phba->sli4_hba.xri_bmask);
6873 kfree(phba->sli4_hba.xri_ids);
6874 kfree(phba->sli4_hba.vfi_bmask);
6875 kfree(phba->sli4_hba.vfi_ids);
6876 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6877 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6878 }
6879
6880 return 0;
6881 }
6882
6883 /**
6884 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6885 * @phba: Pointer to HBA context object.
6886 * @type: The resource extent type.
6887 * @extnt_cnt: buffer to hold port extent count response
6888 * @extnt_size: buffer to hold port extent size response.
6889 *
6890 * This function calls the port to read the host allocated extents
6891 * for a particular type.
6892 **/
6893 int
lpfc_sli4_get_allocated_extnts(struct lpfc_hba * phba,uint16_t type,uint16_t * extnt_cnt,uint16_t * extnt_size)6894 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6895 uint16_t *extnt_cnt, uint16_t *extnt_size)
6896 {
6897 bool emb;
6898 int rc = 0;
6899 uint16_t curr_blks = 0;
6900 uint32_t req_len, emb_len;
6901 uint32_t alloc_len, mbox_tmo;
6902 struct list_head *blk_list_head;
6903 struct lpfc_rsrc_blks *rsrc_blk;
6904 LPFC_MBOXQ_t *mbox;
6905 void *virtaddr = NULL;
6906 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6907 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6908 union lpfc_sli4_cfg_shdr *shdr;
6909
6910 switch (type) {
6911 case LPFC_RSC_TYPE_FCOE_VPI:
6912 blk_list_head = &phba->lpfc_vpi_blk_list;
6913 break;
6914 case LPFC_RSC_TYPE_FCOE_XRI:
6915 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6916 break;
6917 case LPFC_RSC_TYPE_FCOE_VFI:
6918 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6919 break;
6920 case LPFC_RSC_TYPE_FCOE_RPI:
6921 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6922 break;
6923 default:
6924 return -EIO;
6925 }
6926
6927 /* Count the number of extents currently allocatd for this type. */
6928 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6929 if (curr_blks == 0) {
6930 /*
6931 * The GET_ALLOCATED mailbox does not return the size,
6932 * just the count. The size should be just the size
6933 * stored in the current allocated block and all sizes
6934 * for an extent type are the same so set the return
6935 * value now.
6936 */
6937 *extnt_size = rsrc_blk->rsrc_size;
6938 }
6939 curr_blks++;
6940 }
6941
6942 /*
6943 * Calculate the size of an embedded mailbox. The uint32_t
6944 * accounts for extents-specific word.
6945 */
6946 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6947 sizeof(uint32_t);
6948
6949 /*
6950 * Presume the allocation and response will fit into an embedded
6951 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6952 */
6953 emb = LPFC_SLI4_MBX_EMBED;
6954 req_len = emb_len;
6955 if (req_len > emb_len) {
6956 req_len = curr_blks * sizeof(uint16_t) +
6957 sizeof(union lpfc_sli4_cfg_shdr) +
6958 sizeof(uint32_t);
6959 emb = LPFC_SLI4_MBX_NEMBED;
6960 }
6961
6962 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6963 if (!mbox)
6964 return -ENOMEM;
6965 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6966
6967 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6968 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6969 req_len, emb);
6970 if (alloc_len < req_len) {
6971 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6972 "2983 Allocated DMA memory size (x%x) is "
6973 "less than the requested DMA memory "
6974 "size (x%x)\n", alloc_len, req_len);
6975 rc = -ENOMEM;
6976 goto err_exit;
6977 }
6978 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6979 if (unlikely(rc)) {
6980 rc = -EIO;
6981 goto err_exit;
6982 }
6983
6984 if (!phba->sli4_hba.intr_enable)
6985 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6986 else {
6987 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6988 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6989 }
6990
6991 if (unlikely(rc)) {
6992 rc = -EIO;
6993 goto err_exit;
6994 }
6995
6996 /*
6997 * Figure out where the response is located. Then get local pointers
6998 * to the response data. The port does not guarantee to respond to
6999 * all extents counts request so update the local variable with the
7000 * allocated count from the port.
7001 */
7002 if (emb == LPFC_SLI4_MBX_EMBED) {
7003 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7004 shdr = &rsrc_ext->header.cfg_shdr;
7005 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7006 } else {
7007 virtaddr = mbox->sge_array->addr[0];
7008 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7009 shdr = &n_rsrc->cfg_shdr;
7010 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7011 }
7012
7013 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7014 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7015 "2984 Failed to read allocated resources "
7016 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7017 type,
7018 bf_get(lpfc_mbox_hdr_status, &shdr->response),
7019 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7020 rc = -EIO;
7021 goto err_exit;
7022 }
7023 err_exit:
7024 lpfc_sli4_mbox_cmd_free(phba, mbox);
7025 return rc;
7026 }
7027
7028 /**
7029 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7030 * @phba: pointer to lpfc hba data structure.
7031 * @sgl_list: linked link of sgl buffers to post
7032 * @cnt: number of linked list buffers
7033 *
7034 * This routine walks the list of buffers that have been allocated and
7035 * repost them to the port by using SGL block post. This is needed after a
7036 * pci_function_reset/warm_start or start. It attempts to construct blocks
7037 * of buffer sgls which contains contiguous xris and uses the non-embedded
7038 * SGL block post mailbox commands to post them to the port. For single
7039 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7040 * mailbox command for posting.
7041 *
7042 * Returns: 0 = success, non-zero failure.
7043 **/
7044 static int
lpfc_sli4_repost_sgl_list(struct lpfc_hba * phba,struct list_head * sgl_list,int cnt)7045 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7046 struct list_head *sgl_list, int cnt)
7047 {
7048 struct lpfc_sglq *sglq_entry = NULL;
7049 struct lpfc_sglq *sglq_entry_next = NULL;
7050 struct lpfc_sglq *sglq_entry_first = NULL;
7051 int status, total_cnt;
7052 int post_cnt = 0, num_posted = 0, block_cnt = 0;
7053 int last_xritag = NO_XRI;
7054 LIST_HEAD(prep_sgl_list);
7055 LIST_HEAD(blck_sgl_list);
7056 LIST_HEAD(allc_sgl_list);
7057 LIST_HEAD(post_sgl_list);
7058 LIST_HEAD(free_sgl_list);
7059
7060 spin_lock_irq(&phba->hbalock);
7061 spin_lock(&phba->sli4_hba.sgl_list_lock);
7062 list_splice_init(sgl_list, &allc_sgl_list);
7063 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7064 spin_unlock_irq(&phba->hbalock);
7065
7066 total_cnt = cnt;
7067 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7068 &allc_sgl_list, list) {
7069 list_del_init(&sglq_entry->list);
7070 block_cnt++;
7071 if ((last_xritag != NO_XRI) &&
7072 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7073 /* a hole in xri block, form a sgl posting block */
7074 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7075 post_cnt = block_cnt - 1;
7076 /* prepare list for next posting block */
7077 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7078 block_cnt = 1;
7079 } else {
7080 /* prepare list for next posting block */
7081 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7082 /* enough sgls for non-embed sgl mbox command */
7083 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7084 list_splice_init(&prep_sgl_list,
7085 &blck_sgl_list);
7086 post_cnt = block_cnt;
7087 block_cnt = 0;
7088 }
7089 }
7090 num_posted++;
7091
7092 /* keep track of last sgl's xritag */
7093 last_xritag = sglq_entry->sli4_xritag;
7094
7095 /* end of repost sgl list condition for buffers */
7096 if (num_posted == total_cnt) {
7097 if (post_cnt == 0) {
7098 list_splice_init(&prep_sgl_list,
7099 &blck_sgl_list);
7100 post_cnt = block_cnt;
7101 } else if (block_cnt == 1) {
7102 status = lpfc_sli4_post_sgl(phba,
7103 sglq_entry->phys, 0,
7104 sglq_entry->sli4_xritag);
7105 if (!status) {
7106 /* successful, put sgl to posted list */
7107 list_add_tail(&sglq_entry->list,
7108 &post_sgl_list);
7109 } else {
7110 /* Failure, put sgl to free list */
7111 lpfc_printf_log(phba, KERN_WARNING,
7112 LOG_SLI,
7113 "3159 Failed to post "
7114 "sgl, xritag:x%x\n",
7115 sglq_entry->sli4_xritag);
7116 list_add_tail(&sglq_entry->list,
7117 &free_sgl_list);
7118 total_cnt--;
7119 }
7120 }
7121 }
7122
7123 /* continue until a nembed page worth of sgls */
7124 if (post_cnt == 0)
7125 continue;
7126
7127 /* post the buffer list sgls as a block */
7128 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7129 post_cnt);
7130
7131 if (!status) {
7132 /* success, put sgl list to posted sgl list */
7133 list_splice_init(&blck_sgl_list, &post_sgl_list);
7134 } else {
7135 /* Failure, put sgl list to free sgl list */
7136 sglq_entry_first = list_first_entry(&blck_sgl_list,
7137 struct lpfc_sglq,
7138 list);
7139 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7140 "3160 Failed to post sgl-list, "
7141 "xritag:x%x-x%x\n",
7142 sglq_entry_first->sli4_xritag,
7143 (sglq_entry_first->sli4_xritag +
7144 post_cnt - 1));
7145 list_splice_init(&blck_sgl_list, &free_sgl_list);
7146 total_cnt -= post_cnt;
7147 }
7148
7149 /* don't reset xirtag due to hole in xri block */
7150 if (block_cnt == 0)
7151 last_xritag = NO_XRI;
7152
7153 /* reset sgl post count for next round of posting */
7154 post_cnt = 0;
7155 }
7156
7157 /* free the sgls failed to post */
7158 lpfc_free_sgl_list(phba, &free_sgl_list);
7159
7160 /* push sgls posted to the available list */
7161 if (!list_empty(&post_sgl_list)) {
7162 spin_lock_irq(&phba->hbalock);
7163 spin_lock(&phba->sli4_hba.sgl_list_lock);
7164 list_splice_init(&post_sgl_list, sgl_list);
7165 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7166 spin_unlock_irq(&phba->hbalock);
7167 } else {
7168 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7169 "3161 Failure to post sgl to port.\n");
7170 return -EIO;
7171 }
7172
7173 /* return the number of XRIs actually posted */
7174 return total_cnt;
7175 }
7176
7177 /**
7178 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7179 * @phba: pointer to lpfc hba data structure.
7180 *
7181 * This routine walks the list of nvme buffers that have been allocated and
7182 * repost them to the port by using SGL block post. This is needed after a
7183 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7184 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7185 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7186 *
7187 * Returns: 0 = success, non-zero failure.
7188 **/
7189 static int
lpfc_sli4_repost_io_sgl_list(struct lpfc_hba * phba)7190 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7191 {
7192 LIST_HEAD(post_nblist);
7193 int num_posted, rc = 0;
7194
7195 /* get all NVME buffers need to repost to a local list */
7196 lpfc_io_buf_flush(phba, &post_nblist);
7197
7198 /* post the list of nvme buffer sgls to port if available */
7199 if (!list_empty(&post_nblist)) {
7200 num_posted = lpfc_sli4_post_io_sgl_list(
7201 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7202 /* failed to post any nvme buffer, return error */
7203 if (num_posted == 0)
7204 rc = -EIO;
7205 }
7206 return rc;
7207 }
7208
7209 static void
lpfc_set_host_data(struct lpfc_hba * phba,LPFC_MBOXQ_t * mbox)7210 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7211 {
7212 uint32_t len;
7213
7214 len = sizeof(struct lpfc_mbx_set_host_data) -
7215 sizeof(struct lpfc_sli4_cfg_mhdr);
7216 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7217 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7218 LPFC_SLI4_MBX_EMBED);
7219
7220 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7221 mbox->u.mqe.un.set_host_data.param_len =
7222 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7223 snprintf(mbox->u.mqe.un.set_host_data.data,
7224 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7225 "Linux %s v"LPFC_DRIVER_VERSION,
7226 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7227 }
7228
7229 int
lpfc_post_rq_buffer(struct lpfc_hba * phba,struct lpfc_queue * hrq,struct lpfc_queue * drq,int count,int idx)7230 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7231 struct lpfc_queue *drq, int count, int idx)
7232 {
7233 int rc, i;
7234 struct lpfc_rqe hrqe;
7235 struct lpfc_rqe drqe;
7236 struct lpfc_rqb *rqbp;
7237 unsigned long flags;
7238 struct rqb_dmabuf *rqb_buffer;
7239 LIST_HEAD(rqb_buf_list);
7240
7241 rqbp = hrq->rqbp;
7242 for (i = 0; i < count; i++) {
7243 spin_lock_irqsave(&phba->hbalock, flags);
7244 /* IF RQ is already full, don't bother */
7245 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7246 spin_unlock_irqrestore(&phba->hbalock, flags);
7247 break;
7248 }
7249 spin_unlock_irqrestore(&phba->hbalock, flags);
7250
7251 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7252 if (!rqb_buffer)
7253 break;
7254 rqb_buffer->hrq = hrq;
7255 rqb_buffer->drq = drq;
7256 rqb_buffer->idx = idx;
7257 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7258 }
7259
7260 spin_lock_irqsave(&phba->hbalock, flags);
7261 while (!list_empty(&rqb_buf_list)) {
7262 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7263 hbuf.list);
7264
7265 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7266 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7267 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7268 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7269 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7270 if (rc < 0) {
7271 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7272 "6421 Cannot post to HRQ %d: %x %x %x "
7273 "DRQ %x %x\n",
7274 hrq->queue_id,
7275 hrq->host_index,
7276 hrq->hba_index,
7277 hrq->entry_count,
7278 drq->host_index,
7279 drq->hba_index);
7280 rqbp->rqb_free_buffer(phba, rqb_buffer);
7281 } else {
7282 list_add_tail(&rqb_buffer->hbuf.list,
7283 &rqbp->rqb_buffer_list);
7284 rqbp->buffer_count++;
7285 }
7286 }
7287 spin_unlock_irqrestore(&phba->hbalock, flags);
7288 return 1;
7289 }
7290
7291 /**
7292 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7293 * @phba: pointer to lpfc hba data structure.
7294 *
7295 * This routine initializes the per-cq idle_stat to dynamically dictate
7296 * polling decisions.
7297 *
7298 * Return codes:
7299 * None
7300 **/
lpfc_init_idle_stat_hb(struct lpfc_hba * phba)7301 static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7302 {
7303 int i;
7304 struct lpfc_sli4_hdw_queue *hdwq;
7305 struct lpfc_queue *cq;
7306 struct lpfc_idle_stat *idle_stat;
7307 u64 wall;
7308
7309 for_each_present_cpu(i) {
7310 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7311 cq = hdwq->io_cq;
7312
7313 /* Skip if we've already handled this cq's primary CPU */
7314 if (cq->chann != i)
7315 continue;
7316
7317 idle_stat = &phba->sli4_hba.idle_stat[i];
7318
7319 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7320 idle_stat->prev_wall = wall;
7321
7322 if (phba->nvmet_support)
7323 cq->poll_mode = LPFC_QUEUE_WORK;
7324 else
7325 cq->poll_mode = LPFC_IRQ_POLL;
7326 }
7327
7328 if (!phba->nvmet_support)
7329 schedule_delayed_work(&phba->idle_stat_delay_work,
7330 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7331 }
7332
lpfc_sli4_dip(struct lpfc_hba * phba)7333 static void lpfc_sli4_dip(struct lpfc_hba *phba)
7334 {
7335 uint32_t if_type;
7336
7337 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7338 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7339 if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7340 struct lpfc_register reg_data;
7341
7342 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7343 ®_data.word0))
7344 return;
7345
7346 if (bf_get(lpfc_sliport_status_dip, ®_data))
7347 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7348 "2904 Firmware Dump Image Present"
7349 " on Adapter");
7350 }
7351 }
7352
7353 /**
7354 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
7355 * @phba: Pointer to HBA context object.
7356 *
7357 * This function is the main SLI4 device initialization PCI function. This
7358 * function is called by the HBA initialization code, HBA reset code and
7359 * HBA error attention handler code. Caller is not required to hold any
7360 * locks.
7361 **/
7362 int
lpfc_sli4_hba_setup(struct lpfc_hba * phba)7363 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7364 {
7365 int rc, i, cnt, len, dd;
7366 LPFC_MBOXQ_t *mboxq;
7367 struct lpfc_mqe *mqe;
7368 uint8_t *vpd;
7369 uint32_t vpd_size;
7370 uint32_t ftr_rsp = 0;
7371 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7372 struct lpfc_vport *vport = phba->pport;
7373 struct lpfc_dmabuf *mp;
7374 struct lpfc_rqb *rqbp;
7375
7376 /* Perform a PCI function reset to start from clean */
7377 rc = lpfc_pci_function_reset(phba);
7378 if (unlikely(rc))
7379 return -ENODEV;
7380
7381 /* Check the HBA Host Status Register for readyness */
7382 rc = lpfc_sli4_post_status_check(phba);
7383 if (unlikely(rc))
7384 return -ENODEV;
7385 else {
7386 spin_lock_irq(&phba->hbalock);
7387 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7388 spin_unlock_irq(&phba->hbalock);
7389 }
7390
7391 lpfc_sli4_dip(phba);
7392
7393 /*
7394 * Allocate a single mailbox container for initializing the
7395 * port.
7396 */
7397 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7398 if (!mboxq)
7399 return -ENOMEM;
7400
7401 /* Issue READ_REV to collect vpd and FW information. */
7402 vpd_size = SLI4_PAGE_SIZE;
7403 vpd = kzalloc(vpd_size, GFP_KERNEL);
7404 if (!vpd) {
7405 rc = -ENOMEM;
7406 goto out_free_mbox;
7407 }
7408
7409 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7410 if (unlikely(rc)) {
7411 kfree(vpd);
7412 goto out_free_mbox;
7413 }
7414
7415 mqe = &mboxq->u.mqe;
7416 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7417 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7418 phba->hba_flag |= HBA_FCOE_MODE;
7419 phba->fcp_embed_io = 0; /* SLI4 FC support only */
7420 } else {
7421 phba->hba_flag &= ~HBA_FCOE_MODE;
7422 }
7423
7424 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7425 LPFC_DCBX_CEE_MODE)
7426 phba->hba_flag |= HBA_FIP_SUPPORT;
7427 else
7428 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7429
7430 phba->hba_flag &= ~HBA_IOQ_FLUSH;
7431
7432 if (phba->sli_rev != LPFC_SLI_REV4) {
7433 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7434 "0376 READ_REV Error. SLI Level %d "
7435 "FCoE enabled %d\n",
7436 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7437 rc = -EIO;
7438 kfree(vpd);
7439 goto out_free_mbox;
7440 }
7441
7442 /*
7443 * Continue initialization with default values even if driver failed
7444 * to read FCoE param config regions, only read parameters if the
7445 * board is FCoE
7446 */
7447 if (phba->hba_flag & HBA_FCOE_MODE &&
7448 lpfc_sli4_read_fcoe_params(phba))
7449 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7450 "2570 Failed to read FCoE parameters\n");
7451
7452 /*
7453 * Retrieve sli4 device physical port name, failure of doing it
7454 * is considered as non-fatal.
7455 */
7456 rc = lpfc_sli4_retrieve_pport_name(phba);
7457 if (!rc)
7458 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7459 "3080 Successful retrieving SLI4 device "
7460 "physical port name: %s.\n", phba->Port);
7461
7462 rc = lpfc_sli4_get_ctl_attr(phba);
7463 if (!rc)
7464 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7465 "8351 Successful retrieving SLI4 device "
7466 "CTL ATTR\n");
7467
7468 /*
7469 * Evaluate the read rev and vpd data. Populate the driver
7470 * state with the results. If this routine fails, the failure
7471 * is not fatal as the driver will use generic values.
7472 */
7473 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7474 if (unlikely(!rc)) {
7475 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7476 "0377 Error %d parsing vpd. "
7477 "Using defaults.\n", rc);
7478 rc = 0;
7479 }
7480 kfree(vpd);
7481
7482 /* Save information as VPD data */
7483 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7484 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7485
7486 /*
7487 * This is because first G7 ASIC doesn't support the standard
7488 * 0x5a NVME cmd descriptor type/subtype
7489 */
7490 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7491 LPFC_SLI_INTF_IF_TYPE_6) &&
7492 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7493 (phba->vpd.rev.smRev == 0) &&
7494 (phba->cfg_nvme_embed_cmd == 1))
7495 phba->cfg_nvme_embed_cmd = 0;
7496
7497 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7498 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7499 &mqe->un.read_rev);
7500 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7501 &mqe->un.read_rev);
7502 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7503 &mqe->un.read_rev);
7504 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7505 &mqe->un.read_rev);
7506 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7507 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7508 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7509 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7510 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7511 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7512 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7513 "(%d):0380 READ_REV Status x%x "
7514 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7515 mboxq->vport ? mboxq->vport->vpi : 0,
7516 bf_get(lpfc_mqe_status, mqe),
7517 phba->vpd.rev.opFwName,
7518 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7519 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7520
7521 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7522 LPFC_SLI_INTF_IF_TYPE_0) {
7523 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7524 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7525 if (rc == MBX_SUCCESS) {
7526 phba->hba_flag |= HBA_RECOVERABLE_UE;
7527 /* Set 1Sec interval to detect UE */
7528 phba->eratt_poll_interval = 1;
7529 phba->sli4_hba.ue_to_sr = bf_get(
7530 lpfc_mbx_set_feature_UESR,
7531 &mboxq->u.mqe.un.set_feature);
7532 phba->sli4_hba.ue_to_rp = bf_get(
7533 lpfc_mbx_set_feature_UERP,
7534 &mboxq->u.mqe.un.set_feature);
7535 }
7536 }
7537
7538 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7539 /* Enable MDS Diagnostics only if the SLI Port supports it */
7540 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7541 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7542 if (rc != MBX_SUCCESS)
7543 phba->mds_diags_support = 0;
7544 }
7545
7546 /*
7547 * Discover the port's supported feature set and match it against the
7548 * hosts requests.
7549 */
7550 lpfc_request_features(phba, mboxq);
7551 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7552 if (unlikely(rc)) {
7553 rc = -EIO;
7554 goto out_free_mbox;
7555 }
7556
7557 /*
7558 * The port must support FCP initiator mode as this is the
7559 * only mode running in the host.
7560 */
7561 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7562 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7563 "0378 No support for fcpi mode.\n");
7564 ftr_rsp++;
7565 }
7566
7567 /* Performance Hints are ONLY for FCoE */
7568 if (phba->hba_flag & HBA_FCOE_MODE) {
7569 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7570 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7571 else
7572 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7573 }
7574
7575 /*
7576 * If the port cannot support the host's requested features
7577 * then turn off the global config parameters to disable the
7578 * feature in the driver. This is not a fatal error.
7579 */
7580 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7581 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7582 phba->cfg_enable_bg = 0;
7583 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7584 ftr_rsp++;
7585 }
7586 }
7587
7588 if (phba->max_vpi && phba->cfg_enable_npiv &&
7589 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7590 ftr_rsp++;
7591
7592 if (ftr_rsp) {
7593 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7594 "0379 Feature Mismatch Data: x%08x %08x "
7595 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7596 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7597 phba->cfg_enable_npiv, phba->max_vpi);
7598 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7599 phba->cfg_enable_bg = 0;
7600 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7601 phba->cfg_enable_npiv = 0;
7602 }
7603
7604 /* These SLI3 features are assumed in SLI4 */
7605 spin_lock_irq(&phba->hbalock);
7606 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7607 spin_unlock_irq(&phba->hbalock);
7608
7609 /* Always try to enable dual dump feature if we can */
7610 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
7611 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7612 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
7613 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
7614 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7615 "6448 Dual Dump is enabled\n");
7616 else
7617 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
7618 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
7619 "rc:x%x dd:x%x\n",
7620 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7621 lpfc_sli_config_mbox_subsys_get(
7622 phba, mboxq),
7623 lpfc_sli_config_mbox_opcode_get(
7624 phba, mboxq),
7625 rc, dd);
7626 /*
7627 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7628 * calls depends on these resources to complete port setup.
7629 */
7630 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7631 if (rc) {
7632 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7633 "2920 Failed to alloc Resource IDs "
7634 "rc = x%x\n", rc);
7635 goto out_free_mbox;
7636 }
7637
7638 lpfc_set_host_data(phba, mboxq);
7639
7640 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7641 if (rc) {
7642 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7643 "2134 Failed to set host os driver version %x",
7644 rc);
7645 }
7646
7647 /* Read the port's service parameters. */
7648 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7649 if (rc) {
7650 phba->link_state = LPFC_HBA_ERROR;
7651 rc = -ENOMEM;
7652 goto out_free_mbox;
7653 }
7654
7655 mboxq->vport = vport;
7656 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7657 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7658 if (rc == MBX_SUCCESS) {
7659 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7660 rc = 0;
7661 }
7662
7663 /*
7664 * This memory was allocated by the lpfc_read_sparam routine. Release
7665 * it to the mbuf pool.
7666 */
7667 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7668 kfree(mp);
7669 mboxq->ctx_buf = NULL;
7670 if (unlikely(rc)) {
7671 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7672 "0382 READ_SPARAM command failed "
7673 "status %d, mbxStatus x%x\n",
7674 rc, bf_get(lpfc_mqe_status, mqe));
7675 phba->link_state = LPFC_HBA_ERROR;
7676 rc = -EIO;
7677 goto out_free_mbox;
7678 }
7679
7680 lpfc_update_vport_wwn(vport);
7681
7682 /* Update the fc_host data structures with new wwn. */
7683 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7684 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7685
7686 /* Create all the SLI4 queues */
7687 rc = lpfc_sli4_queue_create(phba);
7688 if (rc) {
7689 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7690 "3089 Failed to allocate queues\n");
7691 rc = -ENODEV;
7692 goto out_free_mbox;
7693 }
7694 /* Set up all the queues to the device */
7695 rc = lpfc_sli4_queue_setup(phba);
7696 if (unlikely(rc)) {
7697 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7698 "0381 Error %d during queue setup.\n ", rc);
7699 goto out_stop_timers;
7700 }
7701 /* Initialize the driver internal SLI layer lists. */
7702 lpfc_sli4_setup(phba);
7703 lpfc_sli4_queue_init(phba);
7704
7705 /* update host els xri-sgl sizes and mappings */
7706 rc = lpfc_sli4_els_sgl_update(phba);
7707 if (unlikely(rc)) {
7708 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7709 "1400 Failed to update xri-sgl size and "
7710 "mapping: %d\n", rc);
7711 goto out_destroy_queue;
7712 }
7713
7714 /* register the els sgl pool to the port */
7715 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7716 phba->sli4_hba.els_xri_cnt);
7717 if (unlikely(rc < 0)) {
7718 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7719 "0582 Error %d during els sgl post "
7720 "operation\n", rc);
7721 rc = -ENODEV;
7722 goto out_destroy_queue;
7723 }
7724 phba->sli4_hba.els_xri_cnt = rc;
7725
7726 if (phba->nvmet_support) {
7727 /* update host nvmet xri-sgl sizes and mappings */
7728 rc = lpfc_sli4_nvmet_sgl_update(phba);
7729 if (unlikely(rc)) {
7730 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7731 "6308 Failed to update nvmet-sgl size "
7732 "and mapping: %d\n", rc);
7733 goto out_destroy_queue;
7734 }
7735
7736 /* register the nvmet sgl pool to the port */
7737 rc = lpfc_sli4_repost_sgl_list(
7738 phba,
7739 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7740 phba->sli4_hba.nvmet_xri_cnt);
7741 if (unlikely(rc < 0)) {
7742 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7743 "3117 Error %d during nvmet "
7744 "sgl post\n", rc);
7745 rc = -ENODEV;
7746 goto out_destroy_queue;
7747 }
7748 phba->sli4_hba.nvmet_xri_cnt = rc;
7749
7750 /* We allocate an iocbq for every receive context SGL.
7751 * The additional allocation is for abort and ls handling.
7752 */
7753 cnt = phba->sli4_hba.nvmet_xri_cnt +
7754 phba->sli4_hba.max_cfg_param.max_xri;
7755 } else {
7756 /* update host common xri-sgl sizes and mappings */
7757 rc = lpfc_sli4_io_sgl_update(phba);
7758 if (unlikely(rc)) {
7759 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7760 "6082 Failed to update nvme-sgl size "
7761 "and mapping: %d\n", rc);
7762 goto out_destroy_queue;
7763 }
7764
7765 /* register the allocated common sgl pool to the port */
7766 rc = lpfc_sli4_repost_io_sgl_list(phba);
7767 if (unlikely(rc)) {
7768 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7769 "6116 Error %d during nvme sgl post "
7770 "operation\n", rc);
7771 /* Some NVME buffers were moved to abort nvme list */
7772 /* A pci function reset will repost them */
7773 rc = -ENODEV;
7774 goto out_destroy_queue;
7775 }
7776 /* Each lpfc_io_buf job structure has an iocbq element.
7777 * This cnt provides for abort, els, ct and ls requests.
7778 */
7779 cnt = phba->sli4_hba.max_cfg_param.max_xri;
7780 }
7781
7782 if (!phba->sli.iocbq_lookup) {
7783 /* Initialize and populate the iocb list per host */
7784 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7785 "2821 initialize iocb list with %d entries\n",
7786 cnt);
7787 rc = lpfc_init_iocb_list(phba, cnt);
7788 if (rc) {
7789 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7790 "1413 Failed to init iocb list.\n");
7791 goto out_destroy_queue;
7792 }
7793 }
7794
7795 if (phba->nvmet_support)
7796 lpfc_nvmet_create_targetport(phba);
7797
7798 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7799 /* Post initial buffers to all RQs created */
7800 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7801 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7802 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7803 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7804 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7805 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7806 rqbp->buffer_count = 0;
7807
7808 lpfc_post_rq_buffer(
7809 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7810 phba->sli4_hba.nvmet_mrq_data[i],
7811 phba->cfg_nvmet_mrq_post, i);
7812 }
7813 }
7814
7815 /* Post the rpi header region to the device. */
7816 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7817 if (unlikely(rc)) {
7818 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7819 "0393 Error %d during rpi post operation\n",
7820 rc);
7821 rc = -ENODEV;
7822 goto out_free_iocblist;
7823 }
7824 lpfc_sli4_node_prep(phba);
7825
7826 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7827 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7828 /*
7829 * The FC Port needs to register FCFI (index 0)
7830 */
7831 lpfc_reg_fcfi(phba, mboxq);
7832 mboxq->vport = phba->pport;
7833 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7834 if (rc != MBX_SUCCESS)
7835 goto out_unset_queue;
7836 rc = 0;
7837 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7838 &mboxq->u.mqe.un.reg_fcfi);
7839 } else {
7840 /* We are a NVME Target mode with MRQ > 1 */
7841
7842 /* First register the FCFI */
7843 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7844 mboxq->vport = phba->pport;
7845 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7846 if (rc != MBX_SUCCESS)
7847 goto out_unset_queue;
7848 rc = 0;
7849 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7850 &mboxq->u.mqe.un.reg_fcfi_mrq);
7851
7852 /* Next register the MRQs */
7853 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7854 mboxq->vport = phba->pport;
7855 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7856 if (rc != MBX_SUCCESS)
7857 goto out_unset_queue;
7858 rc = 0;
7859 }
7860 /* Check if the port is configured to be disabled */
7861 lpfc_sli_read_link_ste(phba);
7862 }
7863
7864 /* Don't post more new bufs if repost already recovered
7865 * the nvme sgls.
7866 */
7867 if (phba->nvmet_support == 0) {
7868 if (phba->sli4_hba.io_xri_cnt == 0) {
7869 len = lpfc_new_io_buf(
7870 phba, phba->sli4_hba.io_xri_max);
7871 if (len == 0) {
7872 rc = -ENOMEM;
7873 goto out_unset_queue;
7874 }
7875
7876 if (phba->cfg_xri_rebalancing)
7877 lpfc_create_multixri_pools(phba);
7878 }
7879 } else {
7880 phba->cfg_xri_rebalancing = 0;
7881 }
7882
7883 /* Allow asynchronous mailbox command to go through */
7884 spin_lock_irq(&phba->hbalock);
7885 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7886 spin_unlock_irq(&phba->hbalock);
7887
7888 /* Post receive buffers to the device */
7889 lpfc_sli4_rb_setup(phba);
7890
7891 /* Reset HBA FCF states after HBA reset */
7892 phba->fcf.fcf_flag = 0;
7893 phba->fcf.current_rec.flag = 0;
7894
7895 /* Start the ELS watchdog timer */
7896 mod_timer(&vport->els_tmofunc,
7897 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7898
7899 /* Start heart beat timer */
7900 mod_timer(&phba->hb_tmofunc,
7901 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7902 phba->hb_outstanding = 0;
7903 phba->last_completion_time = jiffies;
7904
7905 /* start eq_delay heartbeat */
7906 if (phba->cfg_auto_imax)
7907 queue_delayed_work(phba->wq, &phba->eq_delay_work,
7908 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7909
7910 /* start per phba idle_stat_delay heartbeat */
7911 lpfc_init_idle_stat_hb(phba);
7912
7913 /* Start error attention (ERATT) polling timer */
7914 mod_timer(&phba->eratt_poll,
7915 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7916
7917 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7918 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7919 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7920 if (!rc) {
7921 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7922 "2829 This device supports "
7923 "Advanced Error Reporting (AER)\n");
7924 spin_lock_irq(&phba->hbalock);
7925 phba->hba_flag |= HBA_AER_ENABLED;
7926 spin_unlock_irq(&phba->hbalock);
7927 } else {
7928 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7929 "2830 This device does not support "
7930 "Advanced Error Reporting (AER)\n");
7931 phba->cfg_aer_support = 0;
7932 }
7933 rc = 0;
7934 }
7935
7936 /*
7937 * The port is ready, set the host's link state to LINK_DOWN
7938 * in preparation for link interrupts.
7939 */
7940 spin_lock_irq(&phba->hbalock);
7941 phba->link_state = LPFC_LINK_DOWN;
7942
7943 /* Check if physical ports are trunked */
7944 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7945 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7946 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7947 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7948 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7949 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7950 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7951 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
7952 spin_unlock_irq(&phba->hbalock);
7953
7954 /* Arm the CQs and then EQs on device */
7955 lpfc_sli4_arm_cqeq_intr(phba);
7956
7957 /* Indicate device interrupt mode */
7958 phba->sli4_hba.intr_enable = 1;
7959
7960 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7961 (phba->hba_flag & LINK_DISABLED)) {
7962 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7963 "3103 Adapter Link is disabled.\n");
7964 lpfc_down_link(phba, mboxq);
7965 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7966 if (rc != MBX_SUCCESS) {
7967 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7968 "3104 Adapter failed to issue "
7969 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7970 goto out_io_buff_free;
7971 }
7972 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7973 /* don't perform init_link on SLI4 FC port loopback test */
7974 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7975 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7976 if (rc)
7977 goto out_io_buff_free;
7978 }
7979 }
7980 mempool_free(mboxq, phba->mbox_mem_pool);
7981 return rc;
7982 out_io_buff_free:
7983 /* Free allocated IO Buffers */
7984 lpfc_io_free(phba);
7985 out_unset_queue:
7986 /* Unset all the queues set up in this routine when error out */
7987 lpfc_sli4_queue_unset(phba);
7988 out_free_iocblist:
7989 lpfc_free_iocb_list(phba);
7990 out_destroy_queue:
7991 lpfc_sli4_queue_destroy(phba);
7992 out_stop_timers:
7993 lpfc_stop_hba_timers(phba);
7994 out_free_mbox:
7995 mempool_free(mboxq, phba->mbox_mem_pool);
7996 return rc;
7997 }
7998
7999 /**
8000 * lpfc_mbox_timeout - Timeout call back function for mbox timer
8001 * @t: Context to fetch pointer to hba structure from.
8002 *
8003 * This is the callback function for mailbox timer. The mailbox
8004 * timer is armed when a new mailbox command is issued and the timer
8005 * is deleted when the mailbox complete. The function is called by
8006 * the kernel timer code when a mailbox does not complete within
8007 * expected time. This function wakes up the worker thread to
8008 * process the mailbox timeout and returns. All the processing is
8009 * done by the worker thread function lpfc_mbox_timeout_handler.
8010 **/
8011 void
lpfc_mbox_timeout(struct timer_list * t)8012 lpfc_mbox_timeout(struct timer_list *t)
8013 {
8014 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
8015 unsigned long iflag;
8016 uint32_t tmo_posted;
8017
8018 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
8019 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
8020 if (!tmo_posted)
8021 phba->pport->work_port_events |= WORKER_MBOX_TMO;
8022 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
8023
8024 if (!tmo_posted)
8025 lpfc_worker_wake_up(phba);
8026 return;
8027 }
8028
8029 /**
8030 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
8031 * are pending
8032 * @phba: Pointer to HBA context object.
8033 *
8034 * This function checks if any mailbox completions are present on the mailbox
8035 * completion queue.
8036 **/
8037 static bool
lpfc_sli4_mbox_completions_pending(struct lpfc_hba * phba)8038 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
8039 {
8040
8041 uint32_t idx;
8042 struct lpfc_queue *mcq;
8043 struct lpfc_mcqe *mcqe;
8044 bool pending_completions = false;
8045 uint8_t qe_valid;
8046
8047 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8048 return false;
8049
8050 /* Check for completions on mailbox completion queue */
8051
8052 mcq = phba->sli4_hba.mbx_cq;
8053 idx = mcq->hba_index;
8054 qe_valid = mcq->qe_valid;
8055 while (bf_get_le32(lpfc_cqe_valid,
8056 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8057 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
8058 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8059 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8060 pending_completions = true;
8061 break;
8062 }
8063 idx = (idx + 1) % mcq->entry_count;
8064 if (mcq->hba_index == idx)
8065 break;
8066
8067 /* if the index wrapped around, toggle the valid bit */
8068 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8069 qe_valid = (qe_valid) ? 0 : 1;
8070 }
8071 return pending_completions;
8072
8073 }
8074
8075 /**
8076 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
8077 * that were missed.
8078 * @phba: Pointer to HBA context object.
8079 *
8080 * For sli4, it is possible to miss an interrupt. As such mbox completions
8081 * maybe missed causing erroneous mailbox timeouts to occur. This function
8082 * checks to see if mbox completions are on the mailbox completion queue
8083 * and will process all the completions associated with the eq for the
8084 * mailbox completion queue.
8085 **/
8086 static bool
lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba * phba)8087 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8088 {
8089 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
8090 uint32_t eqidx;
8091 struct lpfc_queue *fpeq = NULL;
8092 struct lpfc_queue *eq;
8093 bool mbox_pending;
8094
8095 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8096 return false;
8097
8098 /* Find the EQ associated with the mbox CQ */
8099 if (sli4_hba->hdwq) {
8100 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8101 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
8102 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
8103 fpeq = eq;
8104 break;
8105 }
8106 }
8107 }
8108 if (!fpeq)
8109 return false;
8110
8111 /* Turn off interrupts from this EQ */
8112
8113 sli4_hba->sli4_eq_clr_intr(fpeq);
8114
8115 /* Check to see if a mbox completion is pending */
8116
8117 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8118
8119 /*
8120 * If a mbox completion is pending, process all the events on EQ
8121 * associated with the mbox completion queue (this could include
8122 * mailbox commands, async events, els commands, receive queue data
8123 * and fcp commands)
8124 */
8125
8126 if (mbox_pending)
8127 /* process and rearm the EQ */
8128 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
8129 else
8130 /* Always clear and re-arm the EQ */
8131 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
8132
8133 return mbox_pending;
8134
8135 }
8136
8137 /**
8138 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
8139 * @phba: Pointer to HBA context object.
8140 *
8141 * This function is called from worker thread when a mailbox command times out.
8142 * The caller is not required to hold any locks. This function will reset the
8143 * HBA and recover all the pending commands.
8144 **/
8145 void
lpfc_mbox_timeout_handler(struct lpfc_hba * phba)8146 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
8147 {
8148 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
8149 MAILBOX_t *mb = NULL;
8150
8151 struct lpfc_sli *psli = &phba->sli;
8152
8153 /* If the mailbox completed, process the completion and return */
8154 if (lpfc_sli4_process_missed_mbox_completions(phba))
8155 return;
8156
8157 if (pmbox != NULL)
8158 mb = &pmbox->u.mb;
8159 /* Check the pmbox pointer first. There is a race condition
8160 * between the mbox timeout handler getting executed in the
8161 * worklist and the mailbox actually completing. When this
8162 * race condition occurs, the mbox_active will be NULL.
8163 */
8164 spin_lock_irq(&phba->hbalock);
8165 if (pmbox == NULL) {
8166 lpfc_printf_log(phba, KERN_WARNING,
8167 LOG_MBOX | LOG_SLI,
8168 "0353 Active Mailbox cleared - mailbox timeout "
8169 "exiting\n");
8170 spin_unlock_irq(&phba->hbalock);
8171 return;
8172 }
8173
8174 /* Mbox cmd <mbxCommand> timeout */
8175 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8176 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
8177 mb->mbxCommand,
8178 phba->pport->port_state,
8179 phba->sli.sli_flag,
8180 phba->sli.mbox_active);
8181 spin_unlock_irq(&phba->hbalock);
8182
8183 /* Setting state unknown so lpfc_sli_abort_iocb_ring
8184 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
8185 * it to fail all outstanding SCSI IO.
8186 */
8187 spin_lock_irq(&phba->pport->work_port_lock);
8188 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8189 spin_unlock_irq(&phba->pport->work_port_lock);
8190 spin_lock_irq(&phba->hbalock);
8191 phba->link_state = LPFC_LINK_UNKNOWN;
8192 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8193 spin_unlock_irq(&phba->hbalock);
8194
8195 lpfc_sli_abort_fcp_rings(phba);
8196
8197 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8198 "0345 Resetting board due to mailbox timeout\n");
8199
8200 /* Reset the HBA device */
8201 lpfc_reset_hba(phba);
8202 }
8203
8204 /**
8205 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
8206 * @phba: Pointer to HBA context object.
8207 * @pmbox: Pointer to mailbox object.
8208 * @flag: Flag indicating how the mailbox need to be processed.
8209 *
8210 * This function is called by discovery code and HBA management code
8211 * to submit a mailbox command to firmware with SLI-3 interface spec. This
8212 * function gets the hbalock to protect the data structures.
8213 * The mailbox command can be submitted in polling mode, in which case
8214 * this function will wait in a polling loop for the completion of the
8215 * mailbox.
8216 * If the mailbox is submitted in no_wait mode (not polling) the
8217 * function will submit the command and returns immediately without waiting
8218 * for the mailbox completion. The no_wait is supported only when HBA
8219 * is in SLI2/SLI3 mode - interrupts are enabled.
8220 * The SLI interface allows only one mailbox pending at a time. If the
8221 * mailbox is issued in polling mode and there is already a mailbox
8222 * pending, then the function will return an error. If the mailbox is issued
8223 * in NO_WAIT mode and there is a mailbox pending already, the function
8224 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8225 * The sli layer owns the mailbox object until the completion of mailbox
8226 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8227 * return codes the caller owns the mailbox command after the return of
8228 * the function.
8229 **/
8230 static int
lpfc_sli_issue_mbox_s3(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmbox,uint32_t flag)8231 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8232 uint32_t flag)
8233 {
8234 MAILBOX_t *mbx;
8235 struct lpfc_sli *psli = &phba->sli;
8236 uint32_t status, evtctr;
8237 uint32_t ha_copy, hc_copy;
8238 int i;
8239 unsigned long timeout;
8240 unsigned long drvr_flag = 0;
8241 uint32_t word0, ldata;
8242 void __iomem *to_slim;
8243 int processing_queue = 0;
8244
8245 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8246 if (!pmbox) {
8247 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8248 /* processing mbox queue from intr_handler */
8249 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8250 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8251 return MBX_SUCCESS;
8252 }
8253 processing_queue = 1;
8254 pmbox = lpfc_mbox_get(phba);
8255 if (!pmbox) {
8256 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8257 return MBX_SUCCESS;
8258 }
8259 }
8260
8261 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8262 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8263 if(!pmbox->vport) {
8264 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8265 lpfc_printf_log(phba, KERN_ERR,
8266 LOG_MBOX | LOG_VPORT,
8267 "1806 Mbox x%x failed. No vport\n",
8268 pmbox->u.mb.mbxCommand);
8269 dump_stack();
8270 goto out_not_finished;
8271 }
8272 }
8273
8274 /* If the PCI channel is in offline state, do not post mbox. */
8275 if (unlikely(pci_channel_offline(phba->pcidev))) {
8276 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8277 goto out_not_finished;
8278 }
8279
8280 /* If HBA has a deferred error attention, fail the iocb. */
8281 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8282 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8283 goto out_not_finished;
8284 }
8285
8286 psli = &phba->sli;
8287
8288 mbx = &pmbox->u.mb;
8289 status = MBX_SUCCESS;
8290
8291 if (phba->link_state == LPFC_HBA_ERROR) {
8292 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8293
8294 /* Mbox command <mbxCommand> cannot issue */
8295 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8296 "(%d):0311 Mailbox command x%x cannot "
8297 "issue Data: x%x x%x\n",
8298 pmbox->vport ? pmbox->vport->vpi : 0,
8299 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8300 goto out_not_finished;
8301 }
8302
8303 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8304 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8305 !(hc_copy & HC_MBINT_ENA)) {
8306 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8307 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8308 "(%d):2528 Mailbox command x%x cannot "
8309 "issue Data: x%x x%x\n",
8310 pmbox->vport ? pmbox->vport->vpi : 0,
8311 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8312 goto out_not_finished;
8313 }
8314 }
8315
8316 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8317 /* Polling for a mbox command when another one is already active
8318 * is not allowed in SLI. Also, the driver must have established
8319 * SLI2 mode to queue and process multiple mbox commands.
8320 */
8321
8322 if (flag & MBX_POLL) {
8323 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8324
8325 /* Mbox command <mbxCommand> cannot issue */
8326 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8327 "(%d):2529 Mailbox command x%x "
8328 "cannot issue Data: x%x x%x\n",
8329 pmbox->vport ? pmbox->vport->vpi : 0,
8330 pmbox->u.mb.mbxCommand,
8331 psli->sli_flag, flag);
8332 goto out_not_finished;
8333 }
8334
8335 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8336 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8337 /* Mbox command <mbxCommand> cannot issue */
8338 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8339 "(%d):2530 Mailbox command x%x "
8340 "cannot issue Data: x%x x%x\n",
8341 pmbox->vport ? pmbox->vport->vpi : 0,
8342 pmbox->u.mb.mbxCommand,
8343 psli->sli_flag, flag);
8344 goto out_not_finished;
8345 }
8346
8347 /* Another mailbox command is still being processed, queue this
8348 * command to be processed later.
8349 */
8350 lpfc_mbox_put(phba, pmbox);
8351
8352 /* Mbox cmd issue - BUSY */
8353 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8354 "(%d):0308 Mbox cmd issue - BUSY Data: "
8355 "x%x x%x x%x x%x\n",
8356 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8357 mbx->mbxCommand,
8358 phba->pport ? phba->pport->port_state : 0xff,
8359 psli->sli_flag, flag);
8360
8361 psli->slistat.mbox_busy++;
8362 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8363
8364 if (pmbox->vport) {
8365 lpfc_debugfs_disc_trc(pmbox->vport,
8366 LPFC_DISC_TRC_MBOX_VPORT,
8367 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
8368 (uint32_t)mbx->mbxCommand,
8369 mbx->un.varWords[0], mbx->un.varWords[1]);
8370 }
8371 else {
8372 lpfc_debugfs_disc_trc(phba->pport,
8373 LPFC_DISC_TRC_MBOX,
8374 "MBOX Bsy: cmd:x%x mb:x%x x%x",
8375 (uint32_t)mbx->mbxCommand,
8376 mbx->un.varWords[0], mbx->un.varWords[1]);
8377 }
8378
8379 return MBX_BUSY;
8380 }
8381
8382 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8383
8384 /* If we are not polling, we MUST be in SLI2 mode */
8385 if (flag != MBX_POLL) {
8386 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8387 (mbx->mbxCommand != MBX_KILL_BOARD)) {
8388 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8389 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8390 /* Mbox command <mbxCommand> cannot issue */
8391 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8392 "(%d):2531 Mailbox command x%x "
8393 "cannot issue Data: x%x x%x\n",
8394 pmbox->vport ? pmbox->vport->vpi : 0,
8395 pmbox->u.mb.mbxCommand,
8396 psli->sli_flag, flag);
8397 goto out_not_finished;
8398 }
8399 /* timeout active mbox command */
8400 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8401 1000);
8402 mod_timer(&psli->mbox_tmo, jiffies + timeout);
8403 }
8404
8405 /* Mailbox cmd <cmd> issue */
8406 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8407 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8408 "x%x\n",
8409 pmbox->vport ? pmbox->vport->vpi : 0,
8410 mbx->mbxCommand,
8411 phba->pport ? phba->pport->port_state : 0xff,
8412 psli->sli_flag, flag);
8413
8414 if (mbx->mbxCommand != MBX_HEARTBEAT) {
8415 if (pmbox->vport) {
8416 lpfc_debugfs_disc_trc(pmbox->vport,
8417 LPFC_DISC_TRC_MBOX_VPORT,
8418 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8419 (uint32_t)mbx->mbxCommand,
8420 mbx->un.varWords[0], mbx->un.varWords[1]);
8421 }
8422 else {
8423 lpfc_debugfs_disc_trc(phba->pport,
8424 LPFC_DISC_TRC_MBOX,
8425 "MBOX Send: cmd:x%x mb:x%x x%x",
8426 (uint32_t)mbx->mbxCommand,
8427 mbx->un.varWords[0], mbx->un.varWords[1]);
8428 }
8429 }
8430
8431 psli->slistat.mbox_cmd++;
8432 evtctr = psli->slistat.mbox_event;
8433
8434 /* next set own bit for the adapter and copy over command word */
8435 mbx->mbxOwner = OWN_CHIP;
8436
8437 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8438 /* Populate mbox extension offset word. */
8439 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8440 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8441 = (uint8_t *)phba->mbox_ext
8442 - (uint8_t *)phba->mbox;
8443 }
8444
8445 /* Copy the mailbox extension data */
8446 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8447 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8448 (uint8_t *)phba->mbox_ext,
8449 pmbox->in_ext_byte_len);
8450 }
8451 /* Copy command data to host SLIM area */
8452 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8453 } else {
8454 /* Populate mbox extension offset word. */
8455 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8456 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8457 = MAILBOX_HBA_EXT_OFFSET;
8458
8459 /* Copy the mailbox extension data */
8460 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8461 lpfc_memcpy_to_slim(phba->MBslimaddr +
8462 MAILBOX_HBA_EXT_OFFSET,
8463 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8464
8465 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8466 /* copy command data into host mbox for cmpl */
8467 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8468 MAILBOX_CMD_SIZE);
8469
8470 /* First copy mbox command data to HBA SLIM, skip past first
8471 word */
8472 to_slim = phba->MBslimaddr + sizeof (uint32_t);
8473 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8474 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8475
8476 /* Next copy over first word, with mbxOwner set */
8477 ldata = *((uint32_t *)mbx);
8478 to_slim = phba->MBslimaddr;
8479 writel(ldata, to_slim);
8480 readl(to_slim); /* flush */
8481
8482 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8483 /* switch over to host mailbox */
8484 psli->sli_flag |= LPFC_SLI_ACTIVE;
8485 }
8486
8487 wmb();
8488
8489 switch (flag) {
8490 case MBX_NOWAIT:
8491 /* Set up reference to mailbox command */
8492 psli->mbox_active = pmbox;
8493 /* Interrupt board to do it */
8494 writel(CA_MBATT, phba->CAregaddr);
8495 readl(phba->CAregaddr); /* flush */
8496 /* Don't wait for it to finish, just return */
8497 break;
8498
8499 case MBX_POLL:
8500 /* Set up null reference to mailbox command */
8501 psli->mbox_active = NULL;
8502 /* Interrupt board to do it */
8503 writel(CA_MBATT, phba->CAregaddr);
8504 readl(phba->CAregaddr); /* flush */
8505
8506 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8507 /* First read mbox status word */
8508 word0 = *((uint32_t *)phba->mbox);
8509 word0 = le32_to_cpu(word0);
8510 } else {
8511 /* First read mbox status word */
8512 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8513 spin_unlock_irqrestore(&phba->hbalock,
8514 drvr_flag);
8515 goto out_not_finished;
8516 }
8517 }
8518
8519 /* Read the HBA Host Attention Register */
8520 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8521 spin_unlock_irqrestore(&phba->hbalock,
8522 drvr_flag);
8523 goto out_not_finished;
8524 }
8525 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8526 1000) + jiffies;
8527 i = 0;
8528 /* Wait for command to complete */
8529 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8530 (!(ha_copy & HA_MBATT) &&
8531 (phba->link_state > LPFC_WARM_START))) {
8532 if (time_after(jiffies, timeout)) {
8533 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8534 spin_unlock_irqrestore(&phba->hbalock,
8535 drvr_flag);
8536 goto out_not_finished;
8537 }
8538
8539 /* Check if we took a mbox interrupt while we were
8540 polling */
8541 if (((word0 & OWN_CHIP) != OWN_CHIP)
8542 && (evtctr != psli->slistat.mbox_event))
8543 break;
8544
8545 if (i++ > 10) {
8546 spin_unlock_irqrestore(&phba->hbalock,
8547 drvr_flag);
8548 msleep(1);
8549 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8550 }
8551
8552 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8553 /* First copy command data */
8554 word0 = *((uint32_t *)phba->mbox);
8555 word0 = le32_to_cpu(word0);
8556 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8557 MAILBOX_t *slimmb;
8558 uint32_t slimword0;
8559 /* Check real SLIM for any errors */
8560 slimword0 = readl(phba->MBslimaddr);
8561 slimmb = (MAILBOX_t *) & slimword0;
8562 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8563 && slimmb->mbxStatus) {
8564 psli->sli_flag &=
8565 ~LPFC_SLI_ACTIVE;
8566 word0 = slimword0;
8567 }
8568 }
8569 } else {
8570 /* First copy command data */
8571 word0 = readl(phba->MBslimaddr);
8572 }
8573 /* Read the HBA Host Attention Register */
8574 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8575 spin_unlock_irqrestore(&phba->hbalock,
8576 drvr_flag);
8577 goto out_not_finished;
8578 }
8579 }
8580
8581 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8582 /* copy results back to user */
8583 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8584 MAILBOX_CMD_SIZE);
8585 /* Copy the mailbox extension data */
8586 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8587 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8588 pmbox->ctx_buf,
8589 pmbox->out_ext_byte_len);
8590 }
8591 } else {
8592 /* First copy command data */
8593 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8594 MAILBOX_CMD_SIZE);
8595 /* Copy the mailbox extension data */
8596 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8597 lpfc_memcpy_from_slim(
8598 pmbox->ctx_buf,
8599 phba->MBslimaddr +
8600 MAILBOX_HBA_EXT_OFFSET,
8601 pmbox->out_ext_byte_len);
8602 }
8603 }
8604
8605 writel(HA_MBATT, phba->HAregaddr);
8606 readl(phba->HAregaddr); /* flush */
8607
8608 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8609 status = mbx->mbxStatus;
8610 }
8611
8612 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8613 return status;
8614
8615 out_not_finished:
8616 if (processing_queue) {
8617 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8618 lpfc_mbox_cmpl_put(phba, pmbox);
8619 }
8620 return MBX_NOT_FINISHED;
8621 }
8622
8623 /**
8624 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8625 * @phba: Pointer to HBA context object.
8626 *
8627 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8628 * the driver internal pending mailbox queue. It will then try to wait out the
8629 * possible outstanding mailbox command before return.
8630 *
8631 * Returns:
8632 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8633 * the outstanding mailbox command timed out.
8634 **/
8635 static int
lpfc_sli4_async_mbox_block(struct lpfc_hba * phba)8636 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8637 {
8638 struct lpfc_sli *psli = &phba->sli;
8639 int rc = 0;
8640 unsigned long timeout = 0;
8641
8642 /* Mark the asynchronous mailbox command posting as blocked */
8643 spin_lock_irq(&phba->hbalock);
8644 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8645 /* Determine how long we might wait for the active mailbox
8646 * command to be gracefully completed by firmware.
8647 */
8648 if (phba->sli.mbox_active)
8649 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8650 phba->sli.mbox_active) *
8651 1000) + jiffies;
8652 spin_unlock_irq(&phba->hbalock);
8653
8654 /* Make sure the mailbox is really active */
8655 if (timeout)
8656 lpfc_sli4_process_missed_mbox_completions(phba);
8657
8658 /* Wait for the outstnading mailbox command to complete */
8659 while (phba->sli.mbox_active) {
8660 /* Check active mailbox complete status every 2ms */
8661 msleep(2);
8662 if (time_after(jiffies, timeout)) {
8663 /* Timeout, marked the outstanding cmd not complete */
8664 rc = 1;
8665 break;
8666 }
8667 }
8668
8669 /* Can not cleanly block async mailbox command, fails it */
8670 if (rc) {
8671 spin_lock_irq(&phba->hbalock);
8672 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8673 spin_unlock_irq(&phba->hbalock);
8674 }
8675 return rc;
8676 }
8677
8678 /**
8679 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8680 * @phba: Pointer to HBA context object.
8681 *
8682 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8683 * commands from the driver internal pending mailbox queue. It makes sure
8684 * that there is no outstanding mailbox command before resuming posting
8685 * asynchronous mailbox commands. If, for any reason, there is outstanding
8686 * mailbox command, it will try to wait it out before resuming asynchronous
8687 * mailbox command posting.
8688 **/
8689 static void
lpfc_sli4_async_mbox_unblock(struct lpfc_hba * phba)8690 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8691 {
8692 struct lpfc_sli *psli = &phba->sli;
8693
8694 spin_lock_irq(&phba->hbalock);
8695 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8696 /* Asynchronous mailbox posting is not blocked, do nothing */
8697 spin_unlock_irq(&phba->hbalock);
8698 return;
8699 }
8700
8701 /* Outstanding synchronous mailbox command is guaranteed to be done,
8702 * successful or timeout, after timing-out the outstanding mailbox
8703 * command shall always be removed, so just unblock posting async
8704 * mailbox command and resume
8705 */
8706 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8707 spin_unlock_irq(&phba->hbalock);
8708
8709 /* wake up worker thread to post asynchronous mailbox command */
8710 lpfc_worker_wake_up(phba);
8711 }
8712
8713 /**
8714 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8715 * @phba: Pointer to HBA context object.
8716 * @mboxq: Pointer to mailbox object.
8717 *
8718 * The function waits for the bootstrap mailbox register ready bit from
8719 * port for twice the regular mailbox command timeout value.
8720 *
8721 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8722 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8723 **/
8724 static int
lpfc_sli4_wait_bmbx_ready(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)8725 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8726 {
8727 uint32_t db_ready;
8728 unsigned long timeout;
8729 struct lpfc_register bmbx_reg;
8730
8731 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8732 * 1000) + jiffies;
8733
8734 do {
8735 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8736 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8737 if (!db_ready)
8738 mdelay(2);
8739
8740 if (time_after(jiffies, timeout))
8741 return MBXERR_ERROR;
8742 } while (!db_ready);
8743
8744 return 0;
8745 }
8746
8747 /**
8748 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8749 * @phba: Pointer to HBA context object.
8750 * @mboxq: Pointer to mailbox object.
8751 *
8752 * The function posts a mailbox to the port. The mailbox is expected
8753 * to be comletely filled in and ready for the port to operate on it.
8754 * This routine executes a synchronous completion operation on the
8755 * mailbox by polling for its completion.
8756 *
8757 * The caller must not be holding any locks when calling this routine.
8758 *
8759 * Returns:
8760 * MBX_SUCCESS - mailbox posted successfully
8761 * Any of the MBX error values.
8762 **/
8763 static int
lpfc_sli4_post_sync_mbox(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)8764 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8765 {
8766 int rc = MBX_SUCCESS;
8767 unsigned long iflag;
8768 uint32_t mcqe_status;
8769 uint32_t mbx_cmnd;
8770 struct lpfc_sli *psli = &phba->sli;
8771 struct lpfc_mqe *mb = &mboxq->u.mqe;
8772 struct lpfc_bmbx_create *mbox_rgn;
8773 struct dma_address *dma_address;
8774
8775 /*
8776 * Only one mailbox can be active to the bootstrap mailbox region
8777 * at a time and there is no queueing provided.
8778 */
8779 spin_lock_irqsave(&phba->hbalock, iflag);
8780 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8781 spin_unlock_irqrestore(&phba->hbalock, iflag);
8782 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8783 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8784 "cannot issue Data: x%x x%x\n",
8785 mboxq->vport ? mboxq->vport->vpi : 0,
8786 mboxq->u.mb.mbxCommand,
8787 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8788 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8789 psli->sli_flag, MBX_POLL);
8790 return MBXERR_ERROR;
8791 }
8792 /* The server grabs the token and owns it until release */
8793 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8794 phba->sli.mbox_active = mboxq;
8795 spin_unlock_irqrestore(&phba->hbalock, iflag);
8796
8797 /* wait for bootstrap mbox register for readyness */
8798 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8799 if (rc)
8800 goto exit;
8801 /*
8802 * Initialize the bootstrap memory region to avoid stale data areas
8803 * in the mailbox post. Then copy the caller's mailbox contents to
8804 * the bmbx mailbox region.
8805 */
8806 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8807 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8808 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8809 sizeof(struct lpfc_mqe));
8810
8811 /* Post the high mailbox dma address to the port and wait for ready. */
8812 dma_address = &phba->sli4_hba.bmbx.dma_address;
8813 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8814
8815 /* wait for bootstrap mbox register for hi-address write done */
8816 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8817 if (rc)
8818 goto exit;
8819
8820 /* Post the low mailbox dma address to the port. */
8821 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8822
8823 /* wait for bootstrap mbox register for low address write done */
8824 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8825 if (rc)
8826 goto exit;
8827
8828 /*
8829 * Read the CQ to ensure the mailbox has completed.
8830 * If so, update the mailbox status so that the upper layers
8831 * can complete the request normally.
8832 */
8833 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8834 sizeof(struct lpfc_mqe));
8835 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8836 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8837 sizeof(struct lpfc_mcqe));
8838 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8839 /*
8840 * When the CQE status indicates a failure and the mailbox status
8841 * indicates success then copy the CQE status into the mailbox status
8842 * (and prefix it with x4000).
8843 */
8844 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8845 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8846 bf_set(lpfc_mqe_status, mb,
8847 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8848 rc = MBXERR_ERROR;
8849 } else
8850 lpfc_sli4_swap_str(phba, mboxq);
8851
8852 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8853 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8854 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8855 " x%x x%x CQ: x%x x%x x%x x%x\n",
8856 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8857 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8858 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8859 bf_get(lpfc_mqe_status, mb),
8860 mb->un.mb_words[0], mb->un.mb_words[1],
8861 mb->un.mb_words[2], mb->un.mb_words[3],
8862 mb->un.mb_words[4], mb->un.mb_words[5],
8863 mb->un.mb_words[6], mb->un.mb_words[7],
8864 mb->un.mb_words[8], mb->un.mb_words[9],
8865 mb->un.mb_words[10], mb->un.mb_words[11],
8866 mb->un.mb_words[12], mboxq->mcqe.word0,
8867 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8868 mboxq->mcqe.trailer);
8869 exit:
8870 /* We are holding the token, no needed for lock when release */
8871 spin_lock_irqsave(&phba->hbalock, iflag);
8872 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8873 phba->sli.mbox_active = NULL;
8874 spin_unlock_irqrestore(&phba->hbalock, iflag);
8875 return rc;
8876 }
8877
8878 /**
8879 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8880 * @phba: Pointer to HBA context object.
8881 * @mboxq: Pointer to mailbox object.
8882 * @flag: Flag indicating how the mailbox need to be processed.
8883 *
8884 * This function is called by discovery code and HBA management code to submit
8885 * a mailbox command to firmware with SLI-4 interface spec.
8886 *
8887 * Return codes the caller owns the mailbox command after the return of the
8888 * function.
8889 **/
8890 static int
lpfc_sli_issue_mbox_s4(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq,uint32_t flag)8891 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8892 uint32_t flag)
8893 {
8894 struct lpfc_sli *psli = &phba->sli;
8895 unsigned long iflags;
8896 int rc;
8897
8898 /* dump from issue mailbox command if setup */
8899 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8900
8901 rc = lpfc_mbox_dev_check(phba);
8902 if (unlikely(rc)) {
8903 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8904 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8905 "cannot issue Data: x%x x%x\n",
8906 mboxq->vport ? mboxq->vport->vpi : 0,
8907 mboxq->u.mb.mbxCommand,
8908 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8909 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8910 psli->sli_flag, flag);
8911 goto out_not_finished;
8912 }
8913
8914 /* Detect polling mode and jump to a handler */
8915 if (!phba->sli4_hba.intr_enable) {
8916 if (flag == MBX_POLL)
8917 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8918 else
8919 rc = -EIO;
8920 if (rc != MBX_SUCCESS)
8921 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8922 "(%d):2541 Mailbox command x%x "
8923 "(x%x/x%x) failure: "
8924 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8925 "Data: x%x x%x\n,",
8926 mboxq->vport ? mboxq->vport->vpi : 0,
8927 mboxq->u.mb.mbxCommand,
8928 lpfc_sli_config_mbox_subsys_get(phba,
8929 mboxq),
8930 lpfc_sli_config_mbox_opcode_get(phba,
8931 mboxq),
8932 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8933 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8934 bf_get(lpfc_mcqe_ext_status,
8935 &mboxq->mcqe),
8936 psli->sli_flag, flag);
8937 return rc;
8938 } else if (flag == MBX_POLL) {
8939 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8940 "(%d):2542 Try to issue mailbox command "
8941 "x%x (x%x/x%x) synchronously ahead of async "
8942 "mailbox command queue: x%x x%x\n",
8943 mboxq->vport ? mboxq->vport->vpi : 0,
8944 mboxq->u.mb.mbxCommand,
8945 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8946 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8947 psli->sli_flag, flag);
8948 /* Try to block the asynchronous mailbox posting */
8949 rc = lpfc_sli4_async_mbox_block(phba);
8950 if (!rc) {
8951 /* Successfully blocked, now issue sync mbox cmd */
8952 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8953 if (rc != MBX_SUCCESS)
8954 lpfc_printf_log(phba, KERN_WARNING,
8955 LOG_MBOX | LOG_SLI,
8956 "(%d):2597 Sync Mailbox command "
8957 "x%x (x%x/x%x) failure: "
8958 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8959 "Data: x%x x%x\n,",
8960 mboxq->vport ? mboxq->vport->vpi : 0,
8961 mboxq->u.mb.mbxCommand,
8962 lpfc_sli_config_mbox_subsys_get(phba,
8963 mboxq),
8964 lpfc_sli_config_mbox_opcode_get(phba,
8965 mboxq),
8966 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8967 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8968 bf_get(lpfc_mcqe_ext_status,
8969 &mboxq->mcqe),
8970 psli->sli_flag, flag);
8971 /* Unblock the async mailbox posting afterward */
8972 lpfc_sli4_async_mbox_unblock(phba);
8973 }
8974 return rc;
8975 }
8976
8977 /* Now, interrupt mode asynchronous mailbox command */
8978 rc = lpfc_mbox_cmd_check(phba, mboxq);
8979 if (rc) {
8980 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8981 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8982 "cannot issue Data: x%x x%x\n",
8983 mboxq->vport ? mboxq->vport->vpi : 0,
8984 mboxq->u.mb.mbxCommand,
8985 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8986 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8987 psli->sli_flag, flag);
8988 goto out_not_finished;
8989 }
8990
8991 /* Put the mailbox command to the driver internal FIFO */
8992 psli->slistat.mbox_busy++;
8993 spin_lock_irqsave(&phba->hbalock, iflags);
8994 lpfc_mbox_put(phba, mboxq);
8995 spin_unlock_irqrestore(&phba->hbalock, iflags);
8996 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8997 "(%d):0354 Mbox cmd issue - Enqueue Data: "
8998 "x%x (x%x/x%x) x%x x%x x%x\n",
8999 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
9000 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
9001 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9002 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9003 phba->pport->port_state,
9004 psli->sli_flag, MBX_NOWAIT);
9005 /* Wake up worker thread to transport mailbox command from head */
9006 lpfc_worker_wake_up(phba);
9007
9008 return MBX_BUSY;
9009
9010 out_not_finished:
9011 return MBX_NOT_FINISHED;
9012 }
9013
9014 /**
9015 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
9016 * @phba: Pointer to HBA context object.
9017 *
9018 * This function is called by worker thread to send a mailbox command to
9019 * SLI4 HBA firmware.
9020 *
9021 **/
9022 int
lpfc_sli4_post_async_mbox(struct lpfc_hba * phba)9023 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
9024 {
9025 struct lpfc_sli *psli = &phba->sli;
9026 LPFC_MBOXQ_t *mboxq;
9027 int rc = MBX_SUCCESS;
9028 unsigned long iflags;
9029 struct lpfc_mqe *mqe;
9030 uint32_t mbx_cmnd;
9031
9032 /* Check interrupt mode before post async mailbox command */
9033 if (unlikely(!phba->sli4_hba.intr_enable))
9034 return MBX_NOT_FINISHED;
9035
9036 /* Check for mailbox command service token */
9037 spin_lock_irqsave(&phba->hbalock, iflags);
9038 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9039 spin_unlock_irqrestore(&phba->hbalock, iflags);
9040 return MBX_NOT_FINISHED;
9041 }
9042 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9043 spin_unlock_irqrestore(&phba->hbalock, iflags);
9044 return MBX_NOT_FINISHED;
9045 }
9046 if (unlikely(phba->sli.mbox_active)) {
9047 spin_unlock_irqrestore(&phba->hbalock, iflags);
9048 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9049 "0384 There is pending active mailbox cmd\n");
9050 return MBX_NOT_FINISHED;
9051 }
9052 /* Take the mailbox command service token */
9053 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9054
9055 /* Get the next mailbox command from head of queue */
9056 mboxq = lpfc_mbox_get(phba);
9057
9058 /* If no more mailbox command waiting for post, we're done */
9059 if (!mboxq) {
9060 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9061 spin_unlock_irqrestore(&phba->hbalock, iflags);
9062 return MBX_SUCCESS;
9063 }
9064 phba->sli.mbox_active = mboxq;
9065 spin_unlock_irqrestore(&phba->hbalock, iflags);
9066
9067 /* Check device readiness for posting mailbox command */
9068 rc = lpfc_mbox_dev_check(phba);
9069 if (unlikely(rc))
9070 /* Driver clean routine will clean up pending mailbox */
9071 goto out_not_finished;
9072
9073 /* Prepare the mbox command to be posted */
9074 mqe = &mboxq->u.mqe;
9075 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9076
9077 /* Start timer for the mbox_tmo and log some mailbox post messages */
9078 mod_timer(&psli->mbox_tmo, (jiffies +
9079 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
9080
9081 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9082 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
9083 "x%x x%x\n",
9084 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9085 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9086 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9087 phba->pport->port_state, psli->sli_flag);
9088
9089 if (mbx_cmnd != MBX_HEARTBEAT) {
9090 if (mboxq->vport) {
9091 lpfc_debugfs_disc_trc(mboxq->vport,
9092 LPFC_DISC_TRC_MBOX_VPORT,
9093 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9094 mbx_cmnd, mqe->un.mb_words[0],
9095 mqe->un.mb_words[1]);
9096 } else {
9097 lpfc_debugfs_disc_trc(phba->pport,
9098 LPFC_DISC_TRC_MBOX,
9099 "MBOX Send: cmd:x%x mb:x%x x%x",
9100 mbx_cmnd, mqe->un.mb_words[0],
9101 mqe->un.mb_words[1]);
9102 }
9103 }
9104 psli->slistat.mbox_cmd++;
9105
9106 /* Post the mailbox command to the port */
9107 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
9108 if (rc != MBX_SUCCESS) {
9109 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9110 "(%d):2533 Mailbox command x%x (x%x/x%x) "
9111 "cannot issue Data: x%x x%x\n",
9112 mboxq->vport ? mboxq->vport->vpi : 0,
9113 mboxq->u.mb.mbxCommand,
9114 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9115 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9116 psli->sli_flag, MBX_NOWAIT);
9117 goto out_not_finished;
9118 }
9119
9120 return rc;
9121
9122 out_not_finished:
9123 spin_lock_irqsave(&phba->hbalock, iflags);
9124 if (phba->sli.mbox_active) {
9125 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9126 __lpfc_mbox_cmpl_put(phba, mboxq);
9127 /* Release the token */
9128 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9129 phba->sli.mbox_active = NULL;
9130 }
9131 spin_unlock_irqrestore(&phba->hbalock, iflags);
9132
9133 return MBX_NOT_FINISHED;
9134 }
9135
9136 /**
9137 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
9138 * @phba: Pointer to HBA context object.
9139 * @pmbox: Pointer to mailbox object.
9140 * @flag: Flag indicating how the mailbox need to be processed.
9141 *
9142 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
9143 * the API jump table function pointer from the lpfc_hba struct.
9144 *
9145 * Return codes the caller owns the mailbox command after the return of the
9146 * function.
9147 **/
9148 int
lpfc_sli_issue_mbox(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmbox,uint32_t flag)9149 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
9150 {
9151 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
9152 }
9153
9154 /**
9155 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
9156 * @phba: The hba struct for which this call is being executed.
9157 * @dev_grp: The HBA PCI-Device group number.
9158 *
9159 * This routine sets up the mbox interface API function jump table in @phba
9160 * struct.
9161 * Returns: 0 - success, -ENODEV - failure.
9162 **/
9163 int
lpfc_mbox_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)9164 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9165 {
9166
9167 switch (dev_grp) {
9168 case LPFC_PCI_DEV_LP:
9169 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
9170 phba->lpfc_sli_handle_slow_ring_event =
9171 lpfc_sli_handle_slow_ring_event_s3;
9172 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
9173 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
9174 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
9175 break;
9176 case LPFC_PCI_DEV_OC:
9177 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
9178 phba->lpfc_sli_handle_slow_ring_event =
9179 lpfc_sli_handle_slow_ring_event_s4;
9180 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
9181 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
9182 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
9183 break;
9184 default:
9185 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9186 "1420 Invalid HBA PCI-device group: 0x%x\n",
9187 dev_grp);
9188 return -ENODEV;
9189 break;
9190 }
9191 return 0;
9192 }
9193
9194 /**
9195 * __lpfc_sli_ringtx_put - Add an iocb to the txq
9196 * @phba: Pointer to HBA context object.
9197 * @pring: Pointer to driver SLI ring object.
9198 * @piocb: Pointer to address of newly added command iocb.
9199 *
9200 * This function is called with hbalock held for SLI3 ports or
9201 * the ring lock held for SLI4 ports to add a command
9202 * iocb to the txq when SLI layer cannot submit the command iocb
9203 * to the ring.
9204 **/
9205 void
__lpfc_sli_ringtx_put(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * piocb)9206 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9207 struct lpfc_iocbq *piocb)
9208 {
9209 if (phba->sli_rev == LPFC_SLI_REV4)
9210 lockdep_assert_held(&pring->ring_lock);
9211 else
9212 lockdep_assert_held(&phba->hbalock);
9213 /* Insert the caller's iocb in the txq tail for later processing. */
9214 list_add_tail(&piocb->list, &pring->txq);
9215 }
9216
9217 /**
9218 * lpfc_sli_next_iocb - Get the next iocb in the txq
9219 * @phba: Pointer to HBA context object.
9220 * @pring: Pointer to driver SLI ring object.
9221 * @piocb: Pointer to address of newly added command iocb.
9222 *
9223 * This function is called with hbalock held before a new
9224 * iocb is submitted to the firmware. This function checks
9225 * txq to flush the iocbs in txq to Firmware before
9226 * submitting new iocbs to the Firmware.
9227 * If there are iocbs in the txq which need to be submitted
9228 * to firmware, lpfc_sli_next_iocb returns the first element
9229 * of the txq after dequeuing it from txq.
9230 * If there is no iocb in the txq then the function will return
9231 * *piocb and *piocb is set to NULL. Caller needs to check
9232 * *piocb to find if there are more commands in the txq.
9233 **/
9234 static struct lpfc_iocbq *
lpfc_sli_next_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq ** piocb)9235 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9236 struct lpfc_iocbq **piocb)
9237 {
9238 struct lpfc_iocbq * nextiocb;
9239
9240 lockdep_assert_held(&phba->hbalock);
9241
9242 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9243 if (!nextiocb) {
9244 nextiocb = *piocb;
9245 *piocb = NULL;
9246 }
9247
9248 return nextiocb;
9249 }
9250
9251 /**
9252 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
9253 * @phba: Pointer to HBA context object.
9254 * @ring_number: SLI ring number to issue iocb on.
9255 * @piocb: Pointer to command iocb.
9256 * @flag: Flag indicating if this command can be put into txq.
9257 *
9258 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9259 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9260 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9261 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9262 * this function allows only iocbs for posting buffers. This function finds
9263 * next available slot in the command ring and posts the command to the
9264 * available slot and writes the port attention register to request HBA start
9265 * processing new iocb. If there is no slot available in the ring and
9266 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9267 * the function returns IOCB_BUSY.
9268 *
9269 * This function is called with hbalock held. The function will return success
9270 * after it successfully submit the iocb to firmware or after adding to the
9271 * txq.
9272 **/
9273 static int
__lpfc_sli_issue_iocb_s3(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)9274 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9275 struct lpfc_iocbq *piocb, uint32_t flag)
9276 {
9277 struct lpfc_iocbq *nextiocb;
9278 IOCB_t *iocb;
9279 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9280
9281 lockdep_assert_held(&phba->hbalock);
9282
9283 if (piocb->iocb_cmpl && (!piocb->vport) &&
9284 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9285 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9286 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9287 "1807 IOCB x%x failed. No vport\n",
9288 piocb->iocb.ulpCommand);
9289 dump_stack();
9290 return IOCB_ERROR;
9291 }
9292
9293
9294 /* If the PCI channel is in offline state, do not post iocbs. */
9295 if (unlikely(pci_channel_offline(phba->pcidev)))
9296 return IOCB_ERROR;
9297
9298 /* If HBA has a deferred error attention, fail the iocb. */
9299 if (unlikely(phba->hba_flag & DEFER_ERATT))
9300 return IOCB_ERROR;
9301
9302 /*
9303 * We should never get an IOCB if we are in a < LINK_DOWN state
9304 */
9305 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9306 return IOCB_ERROR;
9307
9308 /*
9309 * Check to see if we are blocking IOCB processing because of a
9310 * outstanding event.
9311 */
9312 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9313 goto iocb_busy;
9314
9315 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9316 /*
9317 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
9318 * can be issued if the link is not up.
9319 */
9320 switch (piocb->iocb.ulpCommand) {
9321 case CMD_GEN_REQUEST64_CR:
9322 case CMD_GEN_REQUEST64_CX:
9323 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9324 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9325 FC_RCTL_DD_UNSOL_CMD) ||
9326 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9327 MENLO_TRANSPORT_TYPE))
9328
9329 goto iocb_busy;
9330 break;
9331 case CMD_QUE_RING_BUF_CN:
9332 case CMD_QUE_RING_BUF64_CN:
9333 /*
9334 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9335 * completion, iocb_cmpl MUST be 0.
9336 */
9337 if (piocb->iocb_cmpl)
9338 piocb->iocb_cmpl = NULL;
9339 fallthrough;
9340 case CMD_CREATE_XRI_CR:
9341 case CMD_CLOSE_XRI_CN:
9342 case CMD_CLOSE_XRI_CX:
9343 break;
9344 default:
9345 goto iocb_busy;
9346 }
9347
9348 /*
9349 * For FCP commands, we must be in a state where we can process link
9350 * attention events.
9351 */
9352 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9353 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9354 goto iocb_busy;
9355 }
9356
9357 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9358 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9359 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9360
9361 if (iocb)
9362 lpfc_sli_update_ring(phba, pring);
9363 else
9364 lpfc_sli_update_full_ring(phba, pring);
9365
9366 if (!piocb)
9367 return IOCB_SUCCESS;
9368
9369 goto out_busy;
9370
9371 iocb_busy:
9372 pring->stats.iocb_cmd_delay++;
9373
9374 out_busy:
9375
9376 if (!(flag & SLI_IOCB_RET_IOCB)) {
9377 __lpfc_sli_ringtx_put(phba, pring, piocb);
9378 return IOCB_SUCCESS;
9379 }
9380
9381 return IOCB_BUSY;
9382 }
9383
9384 /**
9385 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9386 * @phba: Pointer to HBA context object.
9387 * @piocbq: Pointer to command iocb.
9388 * @sglq: Pointer to the scatter gather queue object.
9389 *
9390 * This routine converts the bpl or bde that is in the IOCB
9391 * to a sgl list for the sli4 hardware. The physical address
9392 * of the bpl/bde is converted back to a virtual address.
9393 * If the IOCB contains a BPL then the list of BDE's is
9394 * converted to sli4_sge's. If the IOCB contains a single
9395 * BDE then it is converted to a single sli_sge.
9396 * The IOCB is still in cpu endianess so the contents of
9397 * the bpl can be used without byte swapping.
9398 *
9399 * Returns valid XRI = Success, NO_XRI = Failure.
9400 **/
9401 static uint16_t
lpfc_sli4_bpl2sgl(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq,struct lpfc_sglq * sglq)9402 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9403 struct lpfc_sglq *sglq)
9404 {
9405 uint16_t xritag = NO_XRI;
9406 struct ulp_bde64 *bpl = NULL;
9407 struct ulp_bde64 bde;
9408 struct sli4_sge *sgl = NULL;
9409 struct lpfc_dmabuf *dmabuf;
9410 IOCB_t *icmd;
9411 int numBdes = 0;
9412 int i = 0;
9413 uint32_t offset = 0; /* accumulated offset in the sg request list */
9414 int inbound = 0; /* number of sg reply entries inbound from firmware */
9415
9416 if (!piocbq || !sglq)
9417 return xritag;
9418
9419 sgl = (struct sli4_sge *)sglq->sgl;
9420 icmd = &piocbq->iocb;
9421 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9422 return sglq->sli4_xritag;
9423 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9424 numBdes = icmd->un.genreq64.bdl.bdeSize /
9425 sizeof(struct ulp_bde64);
9426 /* The addrHigh and addrLow fields within the IOCB
9427 * have not been byteswapped yet so there is no
9428 * need to swap them back.
9429 */
9430 if (piocbq->context3)
9431 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9432 else
9433 return xritag;
9434
9435 bpl = (struct ulp_bde64 *)dmabuf->virt;
9436 if (!bpl)
9437 return xritag;
9438
9439 for (i = 0; i < numBdes; i++) {
9440 /* Should already be byte swapped. */
9441 sgl->addr_hi = bpl->addrHigh;
9442 sgl->addr_lo = bpl->addrLow;
9443
9444 sgl->word2 = le32_to_cpu(sgl->word2);
9445 if ((i+1) == numBdes)
9446 bf_set(lpfc_sli4_sge_last, sgl, 1);
9447 else
9448 bf_set(lpfc_sli4_sge_last, sgl, 0);
9449 /* swap the size field back to the cpu so we
9450 * can assign it to the sgl.
9451 */
9452 bde.tus.w = le32_to_cpu(bpl->tus.w);
9453 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9454 /* The offsets in the sgl need to be accumulated
9455 * separately for the request and reply lists.
9456 * The request is always first, the reply follows.
9457 */
9458 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9459 /* add up the reply sg entries */
9460 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9461 inbound++;
9462 /* first inbound? reset the offset */
9463 if (inbound == 1)
9464 offset = 0;
9465 bf_set(lpfc_sli4_sge_offset, sgl, offset);
9466 bf_set(lpfc_sli4_sge_type, sgl,
9467 LPFC_SGE_TYPE_DATA);
9468 offset += bde.tus.f.bdeSize;
9469 }
9470 sgl->word2 = cpu_to_le32(sgl->word2);
9471 bpl++;
9472 sgl++;
9473 }
9474 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9475 /* The addrHigh and addrLow fields of the BDE have not
9476 * been byteswapped yet so they need to be swapped
9477 * before putting them in the sgl.
9478 */
9479 sgl->addr_hi =
9480 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9481 sgl->addr_lo =
9482 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9483 sgl->word2 = le32_to_cpu(sgl->word2);
9484 bf_set(lpfc_sli4_sge_last, sgl, 1);
9485 sgl->word2 = cpu_to_le32(sgl->word2);
9486 sgl->sge_len =
9487 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9488 }
9489 return sglq->sli4_xritag;
9490 }
9491
9492 /**
9493 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
9494 * @phba: Pointer to HBA context object.
9495 * @iocbq: Pointer to command iocb.
9496 * @wqe: Pointer to the work queue entry.
9497 *
9498 * This routine converts the iocb command to its Work Queue Entry
9499 * equivalent. The wqe pointer should not have any fields set when
9500 * this routine is called because it will memcpy over them.
9501 * This routine does not set the CQ_ID or the WQEC bits in the
9502 * wqe.
9503 *
9504 * Returns: 0 = Success, IOCB_ERROR = Failure.
9505 **/
9506 static int
lpfc_sli4_iocb2wqe(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq,union lpfc_wqe128 * wqe)9507 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9508 union lpfc_wqe128 *wqe)
9509 {
9510 uint32_t xmit_len = 0, total_len = 0;
9511 uint8_t ct = 0;
9512 uint32_t fip;
9513 uint32_t abort_tag;
9514 uint8_t command_type = ELS_COMMAND_NON_FIP;
9515 uint8_t cmnd;
9516 uint16_t xritag;
9517 uint16_t abrt_iotag;
9518 struct lpfc_iocbq *abrtiocbq;
9519 struct ulp_bde64 *bpl = NULL;
9520 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9521 int numBdes, i;
9522 struct ulp_bde64 bde;
9523 struct lpfc_nodelist *ndlp;
9524 uint32_t *pcmd;
9525 uint32_t if_type;
9526
9527 fip = phba->hba_flag & HBA_FIP_SUPPORT;
9528 /* The fcp commands will set command type */
9529 if (iocbq->iocb_flag & LPFC_IO_FCP)
9530 command_type = FCP_COMMAND;
9531 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9532 command_type = ELS_COMMAND_FIP;
9533 else
9534 command_type = ELS_COMMAND_NON_FIP;
9535
9536 if (phba->fcp_embed_io)
9537 memset(wqe, 0, sizeof(union lpfc_wqe128));
9538 /* Some of the fields are in the right position already */
9539 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9540 /* The ct field has moved so reset */
9541 wqe->generic.wqe_com.word7 = 0;
9542 wqe->generic.wqe_com.word10 = 0;
9543
9544 abort_tag = (uint32_t) iocbq->iotag;
9545 xritag = iocbq->sli4_xritag;
9546 /* words0-2 bpl convert bde */
9547 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9548 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9549 sizeof(struct ulp_bde64);
9550 bpl = (struct ulp_bde64 *)
9551 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9552 if (!bpl)
9553 return IOCB_ERROR;
9554
9555 /* Should already be byte swapped. */
9556 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9557 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9558 /* swap the size field back to the cpu so we
9559 * can assign it to the sgl.
9560 */
9561 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
9562 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9563 total_len = 0;
9564 for (i = 0; i < numBdes; i++) {
9565 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9566 total_len += bde.tus.f.bdeSize;
9567 }
9568 } else
9569 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9570
9571 iocbq->iocb.ulpIoTag = iocbq->iotag;
9572 cmnd = iocbq->iocb.ulpCommand;
9573
9574 switch (iocbq->iocb.ulpCommand) {
9575 case CMD_ELS_REQUEST64_CR:
9576 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9577 ndlp = iocbq->context_un.ndlp;
9578 else
9579 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9580 if (!iocbq->iocb.ulpLe) {
9581 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9582 "2007 Only Limited Edition cmd Format"
9583 " supported 0x%x\n",
9584 iocbq->iocb.ulpCommand);
9585 return IOCB_ERROR;
9586 }
9587
9588 wqe->els_req.payload_len = xmit_len;
9589 /* Els_reguest64 has a TMO */
9590 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9591 iocbq->iocb.ulpTimeout);
9592 /* Need a VF for word 4 set the vf bit*/
9593 bf_set(els_req64_vf, &wqe->els_req, 0);
9594 /* And a VFID for word 12 */
9595 bf_set(els_req64_vfid, &wqe->els_req, 0);
9596 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9597 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9598 iocbq->iocb.ulpContext);
9599 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9600 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9601 /* CCP CCPE PV PRI in word10 were set in the memcpy */
9602 if (command_type == ELS_COMMAND_FIP)
9603 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9604 >> LPFC_FIP_ELS_ID_SHIFT);
9605 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9606 iocbq->context2)->virt);
9607 if_type = bf_get(lpfc_sli_intf_if_type,
9608 &phba->sli4_hba.sli_intf);
9609 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9610 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9611 *pcmd == ELS_CMD_SCR ||
9612 *pcmd == ELS_CMD_RDF ||
9613 *pcmd == ELS_CMD_RSCN_XMT ||
9614 *pcmd == ELS_CMD_FDISC ||
9615 *pcmd == ELS_CMD_LOGO ||
9616 *pcmd == ELS_CMD_PLOGI)) {
9617 bf_set(els_req64_sp, &wqe->els_req, 1);
9618 bf_set(els_req64_sid, &wqe->els_req,
9619 iocbq->vport->fc_myDID);
9620 if ((*pcmd == ELS_CMD_FLOGI) &&
9621 !(phba->fc_topology ==
9622 LPFC_TOPOLOGY_LOOP))
9623 bf_set(els_req64_sid, &wqe->els_req, 0);
9624 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9625 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9626 phba->vpi_ids[iocbq->vport->vpi]);
9627 } else if (pcmd && iocbq->context1) {
9628 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9629 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9630 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9631 }
9632 }
9633 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9634 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9635 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9636 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9637 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9638 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9639 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9640 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9641 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9642 break;
9643 case CMD_XMIT_SEQUENCE64_CX:
9644 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9645 iocbq->iocb.un.ulpWord[3]);
9646 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9647 iocbq->iocb.unsli3.rcvsli3.ox_id);
9648 /* The entire sequence is transmitted for this IOCB */
9649 xmit_len = total_len;
9650 cmnd = CMD_XMIT_SEQUENCE64_CR;
9651 if (phba->link_flag & LS_LOOPBACK_MODE)
9652 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9653 fallthrough;
9654 case CMD_XMIT_SEQUENCE64_CR:
9655 /* word3 iocb=io_tag32 wqe=reserved */
9656 wqe->xmit_sequence.rsvd3 = 0;
9657 /* word4 relative_offset memcpy */
9658 /* word5 r_ctl/df_ctl memcpy */
9659 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9660 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9661 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9662 LPFC_WQE_IOD_WRITE);
9663 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9664 LPFC_WQE_LENLOC_WORD12);
9665 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9666 wqe->xmit_sequence.xmit_len = xmit_len;
9667 command_type = OTHER_COMMAND;
9668 break;
9669 case CMD_XMIT_BCAST64_CN:
9670 /* word3 iocb=iotag32 wqe=seq_payload_len */
9671 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9672 /* word4 iocb=rsvd wqe=rsvd */
9673 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9674 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9675 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9676 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9677 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9678 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9679 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9680 LPFC_WQE_LENLOC_WORD3);
9681 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9682 break;
9683 case CMD_FCP_IWRITE64_CR:
9684 command_type = FCP_COMMAND_DATA_OUT;
9685 /* word3 iocb=iotag wqe=payload_offset_len */
9686 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9687 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9688 xmit_len + sizeof(struct fcp_rsp));
9689 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9690 0);
9691 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9692 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9693 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9694 iocbq->iocb.ulpFCP2Rcvy);
9695 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9696 /* Always open the exchange */
9697 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9698 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9699 LPFC_WQE_LENLOC_WORD4);
9700 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9701 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9702 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9703 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9704 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9705 if (iocbq->priority) {
9706 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9707 (iocbq->priority << 1));
9708 } else {
9709 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9710 (phba->cfg_XLanePriority << 1));
9711 }
9712 }
9713 /* Note, word 10 is already initialized to 0 */
9714
9715 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9716 if (phba->cfg_enable_pbde)
9717 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9718 else
9719 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9720
9721 if (phba->fcp_embed_io) {
9722 struct lpfc_io_buf *lpfc_cmd;
9723 struct sli4_sge *sgl;
9724 struct fcp_cmnd *fcp_cmnd;
9725 uint32_t *ptr;
9726
9727 /* 128 byte wqe support here */
9728
9729 lpfc_cmd = iocbq->context1;
9730 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9731 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9732
9733 /* Word 0-2 - FCP_CMND */
9734 wqe->generic.bde.tus.f.bdeFlags =
9735 BUFF_TYPE_BDE_IMMED;
9736 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9737 wqe->generic.bde.addrHigh = 0;
9738 wqe->generic.bde.addrLow = 88; /* Word 22 */
9739
9740 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9741 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9742
9743 /* Word 22-29 FCP CMND Payload */
9744 ptr = &wqe->words[22];
9745 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9746 }
9747 break;
9748 case CMD_FCP_IREAD64_CR:
9749 /* word3 iocb=iotag wqe=payload_offset_len */
9750 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9751 bf_set(payload_offset_len, &wqe->fcp_iread,
9752 xmit_len + sizeof(struct fcp_rsp));
9753 bf_set(cmd_buff_len, &wqe->fcp_iread,
9754 0);
9755 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9756 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9757 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9758 iocbq->iocb.ulpFCP2Rcvy);
9759 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9760 /* Always open the exchange */
9761 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9762 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9763 LPFC_WQE_LENLOC_WORD4);
9764 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9765 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9766 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9767 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9768 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9769 if (iocbq->priority) {
9770 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9771 (iocbq->priority << 1));
9772 } else {
9773 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9774 (phba->cfg_XLanePriority << 1));
9775 }
9776 }
9777 /* Note, word 10 is already initialized to 0 */
9778
9779 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9780 if (phba->cfg_enable_pbde)
9781 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9782 else
9783 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9784
9785 if (phba->fcp_embed_io) {
9786 struct lpfc_io_buf *lpfc_cmd;
9787 struct sli4_sge *sgl;
9788 struct fcp_cmnd *fcp_cmnd;
9789 uint32_t *ptr;
9790
9791 /* 128 byte wqe support here */
9792
9793 lpfc_cmd = iocbq->context1;
9794 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9795 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9796
9797 /* Word 0-2 - FCP_CMND */
9798 wqe->generic.bde.tus.f.bdeFlags =
9799 BUFF_TYPE_BDE_IMMED;
9800 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9801 wqe->generic.bde.addrHigh = 0;
9802 wqe->generic.bde.addrLow = 88; /* Word 22 */
9803
9804 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9805 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9806
9807 /* Word 22-29 FCP CMND Payload */
9808 ptr = &wqe->words[22];
9809 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9810 }
9811 break;
9812 case CMD_FCP_ICMND64_CR:
9813 /* word3 iocb=iotag wqe=payload_offset_len */
9814 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9815 bf_set(payload_offset_len, &wqe->fcp_icmd,
9816 xmit_len + sizeof(struct fcp_rsp));
9817 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9818 0);
9819 /* word3 iocb=IO_TAG wqe=reserved */
9820 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9821 /* Always open the exchange */
9822 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9823 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9824 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9825 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9826 LPFC_WQE_LENLOC_NONE);
9827 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9828 iocbq->iocb.ulpFCP2Rcvy);
9829 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9830 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9831 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9832 if (iocbq->priority) {
9833 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9834 (iocbq->priority << 1));
9835 } else {
9836 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9837 (phba->cfg_XLanePriority << 1));
9838 }
9839 }
9840 /* Note, word 10 is already initialized to 0 */
9841
9842 if (phba->fcp_embed_io) {
9843 struct lpfc_io_buf *lpfc_cmd;
9844 struct sli4_sge *sgl;
9845 struct fcp_cmnd *fcp_cmnd;
9846 uint32_t *ptr;
9847
9848 /* 128 byte wqe support here */
9849
9850 lpfc_cmd = iocbq->context1;
9851 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9852 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9853
9854 /* Word 0-2 - FCP_CMND */
9855 wqe->generic.bde.tus.f.bdeFlags =
9856 BUFF_TYPE_BDE_IMMED;
9857 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9858 wqe->generic.bde.addrHigh = 0;
9859 wqe->generic.bde.addrLow = 88; /* Word 22 */
9860
9861 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9862 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
9863
9864 /* Word 22-29 FCP CMND Payload */
9865 ptr = &wqe->words[22];
9866 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9867 }
9868 break;
9869 case CMD_GEN_REQUEST64_CR:
9870 /* For this command calculate the xmit length of the
9871 * request bde.
9872 */
9873 xmit_len = 0;
9874 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9875 sizeof(struct ulp_bde64);
9876 for (i = 0; i < numBdes; i++) {
9877 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9878 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9879 break;
9880 xmit_len += bde.tus.f.bdeSize;
9881 }
9882 /* word3 iocb=IO_TAG wqe=request_payload_len */
9883 wqe->gen_req.request_payload_len = xmit_len;
9884 /* word4 iocb=parameter wqe=relative_offset memcpy */
9885 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
9886 /* word6 context tag copied in memcpy */
9887 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9888 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9889 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9890 "2015 Invalid CT %x command 0x%x\n",
9891 ct, iocbq->iocb.ulpCommand);
9892 return IOCB_ERROR;
9893 }
9894 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9895 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9896 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9897 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9898 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9899 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9900 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9901 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9902 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9903 command_type = OTHER_COMMAND;
9904 break;
9905 case CMD_XMIT_ELS_RSP64_CX:
9906 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9907 /* words0-2 BDE memcpy */
9908 /* word3 iocb=iotag32 wqe=response_payload_len */
9909 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9910 /* word4 */
9911 wqe->xmit_els_rsp.word4 = 0;
9912 /* word5 iocb=rsvd wge=did */
9913 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9914 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9915
9916 if_type = bf_get(lpfc_sli_intf_if_type,
9917 &phba->sli4_hba.sli_intf);
9918 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9919 if (iocbq->vport->fc_flag & FC_PT2PT) {
9920 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9921 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9922 iocbq->vport->fc_myDID);
9923 if (iocbq->vport->fc_myDID == Fabric_DID) {
9924 bf_set(wqe_els_did,
9925 &wqe->xmit_els_rsp.wqe_dest, 0);
9926 }
9927 }
9928 }
9929 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9930 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9931 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9932 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9933 iocbq->iocb.unsli3.rcvsli3.ox_id);
9934 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9935 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9936 phba->vpi_ids[iocbq->vport->vpi]);
9937 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9938 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9939 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9940 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9941 LPFC_WQE_LENLOC_WORD3);
9942 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9943 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9944 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9945 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9946 iocbq->context2)->virt);
9947 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9948 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9949 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9950 iocbq->vport->fc_myDID);
9951 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9952 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9953 phba->vpi_ids[phba->pport->vpi]);
9954 }
9955 command_type = OTHER_COMMAND;
9956 break;
9957 case CMD_CLOSE_XRI_CN:
9958 case CMD_ABORT_XRI_CN:
9959 case CMD_ABORT_XRI_CX:
9960 /* words 0-2 memcpy should be 0 rserved */
9961 /* port will send abts */
9962 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9963 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9964 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9965 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9966 } else
9967 fip = 0;
9968
9969 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9970 /*
9971 * The link is down, or the command was ELS_FIP
9972 * so the fw does not need to send abts
9973 * on the wire.
9974 */
9975 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9976 else
9977 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9978 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9979 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9980 wqe->abort_cmd.rsrvd5 = 0;
9981 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9982 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9983 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9984 /*
9985 * The abort handler will send us CMD_ABORT_XRI_CN or
9986 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9987 */
9988 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9989 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9990 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9991 LPFC_WQE_LENLOC_NONE);
9992 cmnd = CMD_ABORT_XRI_CX;
9993 command_type = OTHER_COMMAND;
9994 xritag = 0;
9995 break;
9996 case CMD_XMIT_BLS_RSP64_CX:
9997 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9998 /* As BLS ABTS RSP WQE is very different from other WQEs,
9999 * we re-construct this WQE here based on information in
10000 * iocbq from scratch.
10001 */
10002 memset(wqe, 0, sizeof(*wqe));
10003 /* OX_ID is invariable to who sent ABTS to CT exchange */
10004 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
10005 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
10006 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
10007 LPFC_ABTS_UNSOL_INT) {
10008 /* ABTS sent by initiator to CT exchange, the
10009 * RX_ID field will be filled with the newly
10010 * allocated responder XRI.
10011 */
10012 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10013 iocbq->sli4_xritag);
10014 } else {
10015 /* ABTS sent by responder to CT exchange, the
10016 * RX_ID field will be filled with the responder
10017 * RX_ID from ABTS.
10018 */
10019 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10020 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
10021 }
10022 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
10023 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
10024
10025 /* Use CT=VPI */
10026 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
10027 ndlp->nlp_DID);
10028 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
10029 iocbq->iocb.ulpContext);
10030 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
10031 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
10032 phba->vpi_ids[phba->pport->vpi]);
10033 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
10034 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
10035 LPFC_WQE_LENLOC_NONE);
10036 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
10037 command_type = OTHER_COMMAND;
10038 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
10039 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
10040 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
10041 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
10042 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
10043 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
10044 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
10045 }
10046
10047 break;
10048 case CMD_SEND_FRAME:
10049 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
10050 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
10051 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
10052 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
10053 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
10054 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10055 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
10056 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
10057 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10058 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10059 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10060 return 0;
10061 case CMD_XRI_ABORTED_CX:
10062 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
10063 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
10064 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
10065 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
10066 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
10067 default:
10068 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10069 "2014 Invalid command 0x%x\n",
10070 iocbq->iocb.ulpCommand);
10071 return IOCB_ERROR;
10072 break;
10073 }
10074
10075 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
10076 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
10077 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
10078 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
10079 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
10080 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
10081 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
10082 LPFC_IO_DIF_INSERT);
10083 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10084 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10085 wqe->generic.wqe_com.abort_tag = abort_tag;
10086 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
10087 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
10088 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
10089 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10090 return 0;
10091 }
10092
10093 /**
10094 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10095 * @phba: Pointer to HBA context object.
10096 * @ring_number: SLI ring number to issue iocb on.
10097 * @piocb: Pointer to command iocb.
10098 * @flag: Flag indicating if this command can be put into txq.
10099 *
10100 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10101 * an iocb command to an HBA with SLI-4 interface spec.
10102 *
10103 * This function is called with ringlock held. The function will return success
10104 * after it successfully submit the iocb to firmware or after adding to the
10105 * txq.
10106 **/
10107 static int
__lpfc_sli_issue_iocb_s4(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10108 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10109 struct lpfc_iocbq *piocb, uint32_t flag)
10110 {
10111 struct lpfc_sglq *sglq;
10112 union lpfc_wqe128 wqe;
10113 struct lpfc_queue *wq;
10114 struct lpfc_sli_ring *pring;
10115
10116 /* Get the WQ */
10117 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
10118 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10119 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10120 } else {
10121 wq = phba->sli4_hba.els_wq;
10122 }
10123
10124 /* Get corresponding ring */
10125 pring = wq->pring;
10126
10127 /*
10128 * The WQE can be either 64 or 128 bytes,
10129 */
10130
10131 lockdep_assert_held(&pring->ring_lock);
10132
10133 if (piocb->sli4_xritag == NO_XRI) {
10134 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10135 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
10136 sglq = NULL;
10137 else {
10138 if (!list_empty(&pring->txq)) {
10139 if (!(flag & SLI_IOCB_RET_IOCB)) {
10140 __lpfc_sli_ringtx_put(phba,
10141 pring, piocb);
10142 return IOCB_SUCCESS;
10143 } else {
10144 return IOCB_BUSY;
10145 }
10146 } else {
10147 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10148 if (!sglq) {
10149 if (!(flag & SLI_IOCB_RET_IOCB)) {
10150 __lpfc_sli_ringtx_put(phba,
10151 pring,
10152 piocb);
10153 return IOCB_SUCCESS;
10154 } else
10155 return IOCB_BUSY;
10156 }
10157 }
10158 }
10159 } else if (piocb->iocb_flag & LPFC_IO_FCP)
10160 /* These IO's already have an XRI and a mapped sgl. */
10161 sglq = NULL;
10162 else {
10163 /*
10164 * This is a continuation of a commandi,(CX) so this
10165 * sglq is on the active list
10166 */
10167 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10168 if (!sglq)
10169 return IOCB_ERROR;
10170 }
10171
10172 if (sglq) {
10173 piocb->sli4_lxritag = sglq->sli4_lxritag;
10174 piocb->sli4_xritag = sglq->sli4_xritag;
10175 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
10176 return IOCB_ERROR;
10177 }
10178
10179 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
10180 return IOCB_ERROR;
10181
10182 if (lpfc_sli4_wq_put(wq, &wqe))
10183 return IOCB_ERROR;
10184 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10185
10186 return 0;
10187 }
10188
10189 /*
10190 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10191 *
10192 * This routine wraps the actual lockless version for issusing IOCB function
10193 * pointer from the lpfc_hba struct.
10194 *
10195 * Return codes:
10196 * IOCB_ERROR - Error
10197 * IOCB_SUCCESS - Success
10198 * IOCB_BUSY - Busy
10199 **/
10200 int
__lpfc_sli_issue_iocb(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10201 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10202 struct lpfc_iocbq *piocb, uint32_t flag)
10203 {
10204 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10205 }
10206
10207 /**
10208 * lpfc_sli_api_table_setup - Set up sli api function jump table
10209 * @phba: The hba struct for which this call is being executed.
10210 * @dev_grp: The HBA PCI-Device group number.
10211 *
10212 * This routine sets up the SLI interface API function jump table in @phba
10213 * struct.
10214 * Returns: 0 - success, -ENODEV - failure.
10215 **/
10216 int
lpfc_sli_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)10217 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10218 {
10219
10220 switch (dev_grp) {
10221 case LPFC_PCI_DEV_LP:
10222 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10223 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10224 break;
10225 case LPFC_PCI_DEV_OC:
10226 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10227 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10228 break;
10229 default:
10230 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10231 "1419 Invalid HBA PCI-device group: 0x%x\n",
10232 dev_grp);
10233 return -ENODEV;
10234 break;
10235 }
10236 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10237 return 0;
10238 }
10239
10240 /**
10241 * lpfc_sli4_calc_ring - Calculates which ring to use
10242 * @phba: Pointer to HBA context object.
10243 * @piocb: Pointer to command iocb.
10244 *
10245 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10246 * hba_wqidx, thus we need to calculate the corresponding ring.
10247 * Since ABORTS must go on the same WQ of the command they are
10248 * aborting, we use command's hba_wqidx.
10249 */
10250 struct lpfc_sli_ring *
lpfc_sli4_calc_ring(struct lpfc_hba * phba,struct lpfc_iocbq * piocb)10251 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10252 {
10253 struct lpfc_io_buf *lpfc_cmd;
10254
10255 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10256 if (unlikely(!phba->sli4_hba.hdwq))
10257 return NULL;
10258 /*
10259 * for abort iocb hba_wqidx should already
10260 * be setup based on what work queue we used.
10261 */
10262 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10263 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10264 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10265 }
10266 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10267 } else {
10268 if (unlikely(!phba->sli4_hba.els_wq))
10269 return NULL;
10270 piocb->hba_wqidx = 0;
10271 return phba->sli4_hba.els_wq->pring;
10272 }
10273 }
10274
10275 /**
10276 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10277 * @phba: Pointer to HBA context object.
10278 * @ring_number: Ring number
10279 * @piocb: Pointer to command iocb.
10280 * @flag: Flag indicating if this command can be put into txq.
10281 *
10282 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10283 * function. This function gets the hbalock and calls
10284 * __lpfc_sli_issue_iocb function and will return the error returned
10285 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10286 * functions which do not hold hbalock.
10287 **/
10288 int
lpfc_sli_issue_iocb(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10289 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10290 struct lpfc_iocbq *piocb, uint32_t flag)
10291 {
10292 struct lpfc_sli_ring *pring;
10293 struct lpfc_queue *eq;
10294 unsigned long iflags;
10295 int rc;
10296
10297 if (phba->sli_rev == LPFC_SLI_REV4) {
10298 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
10299
10300 pring = lpfc_sli4_calc_ring(phba, piocb);
10301 if (unlikely(pring == NULL))
10302 return IOCB_ERROR;
10303
10304 spin_lock_irqsave(&pring->ring_lock, iflags);
10305 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10306 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10307
10308 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
10309 } else {
10310 /* For now, SLI2/3 will still use hbalock */
10311 spin_lock_irqsave(&phba->hbalock, iflags);
10312 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10313 spin_unlock_irqrestore(&phba->hbalock, iflags);
10314 }
10315 return rc;
10316 }
10317
10318 /**
10319 * lpfc_extra_ring_setup - Extra ring setup function
10320 * @phba: Pointer to HBA context object.
10321 *
10322 * This function is called while driver attaches with the
10323 * HBA to setup the extra ring. The extra ring is used
10324 * only when driver needs to support target mode functionality
10325 * or IP over FC functionalities.
10326 *
10327 * This function is called with no lock held. SLI3 only.
10328 **/
10329 static int
lpfc_extra_ring_setup(struct lpfc_hba * phba)10330 lpfc_extra_ring_setup( struct lpfc_hba *phba)
10331 {
10332 struct lpfc_sli *psli;
10333 struct lpfc_sli_ring *pring;
10334
10335 psli = &phba->sli;
10336
10337 /* Adjust cmd/rsp ring iocb entries more evenly */
10338
10339 /* Take some away from the FCP ring */
10340 pring = &psli->sli3_ring[LPFC_FCP_RING];
10341 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10342 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10343 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10344 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10345
10346 /* and give them to the extra ring */
10347 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10348
10349 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10350 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10351 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10352 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10353
10354 /* Setup default profile for this ring */
10355 pring->iotag_max = 4096;
10356 pring->num_mask = 1;
10357 pring->prt[0].profile = 0; /* Mask 0 */
10358 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10359 pring->prt[0].type = phba->cfg_multi_ring_type;
10360 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10361 return 0;
10362 }
10363
10364 static void
lpfc_sli_post_recovery_event(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp)10365 lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
10366 struct lpfc_nodelist *ndlp)
10367 {
10368 unsigned long iflags;
10369 struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
10370
10371 spin_lock_irqsave(&phba->hbalock, iflags);
10372 if (!list_empty(&evtp->evt_listp)) {
10373 spin_unlock_irqrestore(&phba->hbalock, iflags);
10374 return;
10375 }
10376
10377 /* Incrementing the reference count until the queued work is done. */
10378 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
10379 if (!evtp->evt_arg1) {
10380 spin_unlock_irqrestore(&phba->hbalock, iflags);
10381 return;
10382 }
10383 evtp->evt = LPFC_EVT_RECOVER_PORT;
10384 list_add_tail(&evtp->evt_listp, &phba->work_list);
10385 spin_unlock_irqrestore(&phba->hbalock, iflags);
10386
10387 lpfc_worker_wake_up(phba);
10388 }
10389
10390 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10391 * @phba: Pointer to HBA context object.
10392 * @iocbq: Pointer to iocb object.
10393 *
10394 * The async_event handler calls this routine when it receives
10395 * an ASYNC_STATUS_CN event from the port. The port generates
10396 * this event when an Abort Sequence request to an rport fails
10397 * twice in succession. The abort could be originated by the
10398 * driver or by the port. The ABTS could have been for an ELS
10399 * or FCP IO. The port only generates this event when an ABTS
10400 * fails to complete after one retry.
10401 */
10402 static void
lpfc_sli_abts_err_handler(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)10403 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10404 struct lpfc_iocbq *iocbq)
10405 {
10406 struct lpfc_nodelist *ndlp = NULL;
10407 uint16_t rpi = 0, vpi = 0;
10408 struct lpfc_vport *vport = NULL;
10409
10410 /* The rpi in the ulpContext is vport-sensitive. */
10411 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10412 rpi = iocbq->iocb.ulpContext;
10413
10414 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10415 "3092 Port generated ABTS async event "
10416 "on vpi %d rpi %d status 0x%x\n",
10417 vpi, rpi, iocbq->iocb.ulpStatus);
10418
10419 vport = lpfc_find_vport_by_vpid(phba, vpi);
10420 if (!vport)
10421 goto err_exit;
10422 ndlp = lpfc_findnode_rpi(vport, rpi);
10423 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10424 goto err_exit;
10425
10426 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10427 lpfc_sli_abts_recover_port(vport, ndlp);
10428 return;
10429
10430 err_exit:
10431 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10432 "3095 Event Context not found, no "
10433 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10434 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10435 vpi, rpi);
10436 }
10437
10438 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10439 * @phba: pointer to HBA context object.
10440 * @ndlp: nodelist pointer for the impacted rport.
10441 * @axri: pointer to the wcqe containing the failed exchange.
10442 *
10443 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10444 * port. The port generates this event when an abort exchange request to an
10445 * rport fails twice in succession with no reply. The abort could be originated
10446 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10447 */
10448 void
lpfc_sli4_abts_err_handler(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct sli4_wcqe_xri_aborted * axri)10449 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10450 struct lpfc_nodelist *ndlp,
10451 struct sli4_wcqe_xri_aborted *axri)
10452 {
10453 uint32_t ext_status = 0;
10454
10455 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
10456 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10457 "3115 Node Context not found, driver "
10458 "ignoring abts err event\n");
10459 return;
10460 }
10461
10462 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10463 "3116 Port generated FCP XRI ABORT event on "
10464 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10465 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10466 bf_get(lpfc_wcqe_xa_xri, axri),
10467 bf_get(lpfc_wcqe_xa_status, axri),
10468 axri->parameter);
10469
10470 /*
10471 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10472 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10473 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10474 */
10475 ext_status = axri->parameter & IOERR_PARAM_MASK;
10476 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10477 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10478 lpfc_sli_post_recovery_event(phba, ndlp);
10479 }
10480
10481 /**
10482 * lpfc_sli_async_event_handler - ASYNC iocb handler function
10483 * @phba: Pointer to HBA context object.
10484 * @pring: Pointer to driver SLI ring object.
10485 * @iocbq: Pointer to iocb object.
10486 *
10487 * This function is called by the slow ring event handler
10488 * function when there is an ASYNC event iocb in the ring.
10489 * This function is called with no lock held.
10490 * Currently this function handles only temperature related
10491 * ASYNC events. The function decodes the temperature sensor
10492 * event message and posts events for the management applications.
10493 **/
10494 static void
lpfc_sli_async_event_handler(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * iocbq)10495 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10496 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10497 {
10498 IOCB_t *icmd;
10499 uint16_t evt_code;
10500 struct temp_event temp_event_data;
10501 struct Scsi_Host *shost;
10502 uint32_t *iocb_w;
10503
10504 icmd = &iocbq->iocb;
10505 evt_code = icmd->un.asyncstat.evt_code;
10506
10507 switch (evt_code) {
10508 case ASYNC_TEMP_WARN:
10509 case ASYNC_TEMP_SAFE:
10510 temp_event_data.data = (uint32_t) icmd->ulpContext;
10511 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10512 if (evt_code == ASYNC_TEMP_WARN) {
10513 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10514 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10515 "0347 Adapter is very hot, please take "
10516 "corrective action. temperature : %d Celsius\n",
10517 (uint32_t) icmd->ulpContext);
10518 } else {
10519 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10520 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10521 "0340 Adapter temperature is OK now. "
10522 "temperature : %d Celsius\n",
10523 (uint32_t) icmd->ulpContext);
10524 }
10525
10526 /* Send temperature change event to applications */
10527 shost = lpfc_shost_from_vport(phba->pport);
10528 fc_host_post_vendor_event(shost, fc_get_event_number(),
10529 sizeof(temp_event_data), (char *) &temp_event_data,
10530 LPFC_NL_VENDOR_ID);
10531 break;
10532 case ASYNC_STATUS_CN:
10533 lpfc_sli_abts_err_handler(phba, iocbq);
10534 break;
10535 default:
10536 iocb_w = (uint32_t *) icmd;
10537 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10538 "0346 Ring %d handler: unexpected ASYNC_STATUS"
10539 " evt_code 0x%x\n"
10540 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10541 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10542 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10543 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10544 pring->ringno, icmd->un.asyncstat.evt_code,
10545 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10546 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10547 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10548 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10549
10550 break;
10551 }
10552 }
10553
10554
10555 /**
10556 * lpfc_sli4_setup - SLI ring setup function
10557 * @phba: Pointer to HBA context object.
10558 *
10559 * lpfc_sli_setup sets up rings of the SLI interface with
10560 * number of iocbs per ring and iotags. This function is
10561 * called while driver attach to the HBA and before the
10562 * interrupts are enabled. So there is no need for locking.
10563 *
10564 * This function always returns 0.
10565 **/
10566 int
lpfc_sli4_setup(struct lpfc_hba * phba)10567 lpfc_sli4_setup(struct lpfc_hba *phba)
10568 {
10569 struct lpfc_sli_ring *pring;
10570
10571 pring = phba->sli4_hba.els_wq->pring;
10572 pring->num_mask = LPFC_MAX_RING_MASK;
10573 pring->prt[0].profile = 0; /* Mask 0 */
10574 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10575 pring->prt[0].type = FC_TYPE_ELS;
10576 pring->prt[0].lpfc_sli_rcv_unsol_event =
10577 lpfc_els_unsol_event;
10578 pring->prt[1].profile = 0; /* Mask 1 */
10579 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10580 pring->prt[1].type = FC_TYPE_ELS;
10581 pring->prt[1].lpfc_sli_rcv_unsol_event =
10582 lpfc_els_unsol_event;
10583 pring->prt[2].profile = 0; /* Mask 2 */
10584 /* NameServer Inquiry */
10585 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10586 /* NameServer */
10587 pring->prt[2].type = FC_TYPE_CT;
10588 pring->prt[2].lpfc_sli_rcv_unsol_event =
10589 lpfc_ct_unsol_event;
10590 pring->prt[3].profile = 0; /* Mask 3 */
10591 /* NameServer response */
10592 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10593 /* NameServer */
10594 pring->prt[3].type = FC_TYPE_CT;
10595 pring->prt[3].lpfc_sli_rcv_unsol_event =
10596 lpfc_ct_unsol_event;
10597 return 0;
10598 }
10599
10600 /**
10601 * lpfc_sli_setup - SLI ring setup function
10602 * @phba: Pointer to HBA context object.
10603 *
10604 * lpfc_sli_setup sets up rings of the SLI interface with
10605 * number of iocbs per ring and iotags. This function is
10606 * called while driver attach to the HBA and before the
10607 * interrupts are enabled. So there is no need for locking.
10608 *
10609 * This function always returns 0. SLI3 only.
10610 **/
10611 int
lpfc_sli_setup(struct lpfc_hba * phba)10612 lpfc_sli_setup(struct lpfc_hba *phba)
10613 {
10614 int i, totiocbsize = 0;
10615 struct lpfc_sli *psli = &phba->sli;
10616 struct lpfc_sli_ring *pring;
10617
10618 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10619 psli->sli_flag = 0;
10620
10621 psli->iocbq_lookup = NULL;
10622 psli->iocbq_lookup_len = 0;
10623 psli->last_iotag = 0;
10624
10625 for (i = 0; i < psli->num_rings; i++) {
10626 pring = &psli->sli3_ring[i];
10627 switch (i) {
10628 case LPFC_FCP_RING: /* ring 0 - FCP */
10629 /* numCiocb and numRiocb are used in config_port */
10630 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10631 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10632 pring->sli.sli3.numCiocb +=
10633 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10634 pring->sli.sli3.numRiocb +=
10635 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10636 pring->sli.sli3.numCiocb +=
10637 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10638 pring->sli.sli3.numRiocb +=
10639 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10640 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10641 SLI3_IOCB_CMD_SIZE :
10642 SLI2_IOCB_CMD_SIZE;
10643 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10644 SLI3_IOCB_RSP_SIZE :
10645 SLI2_IOCB_RSP_SIZE;
10646 pring->iotag_ctr = 0;
10647 pring->iotag_max =
10648 (phba->cfg_hba_queue_depth * 2);
10649 pring->fast_iotag = pring->iotag_max;
10650 pring->num_mask = 0;
10651 break;
10652 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
10653 /* numCiocb and numRiocb are used in config_port */
10654 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10655 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10656 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10657 SLI3_IOCB_CMD_SIZE :
10658 SLI2_IOCB_CMD_SIZE;
10659 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10660 SLI3_IOCB_RSP_SIZE :
10661 SLI2_IOCB_RSP_SIZE;
10662 pring->iotag_max = phba->cfg_hba_queue_depth;
10663 pring->num_mask = 0;
10664 break;
10665 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
10666 /* numCiocb and numRiocb are used in config_port */
10667 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10668 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10669 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10670 SLI3_IOCB_CMD_SIZE :
10671 SLI2_IOCB_CMD_SIZE;
10672 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10673 SLI3_IOCB_RSP_SIZE :
10674 SLI2_IOCB_RSP_SIZE;
10675 pring->fast_iotag = 0;
10676 pring->iotag_ctr = 0;
10677 pring->iotag_max = 4096;
10678 pring->lpfc_sli_rcv_async_status =
10679 lpfc_sli_async_event_handler;
10680 pring->num_mask = LPFC_MAX_RING_MASK;
10681 pring->prt[0].profile = 0; /* Mask 0 */
10682 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10683 pring->prt[0].type = FC_TYPE_ELS;
10684 pring->prt[0].lpfc_sli_rcv_unsol_event =
10685 lpfc_els_unsol_event;
10686 pring->prt[1].profile = 0; /* Mask 1 */
10687 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10688 pring->prt[1].type = FC_TYPE_ELS;
10689 pring->prt[1].lpfc_sli_rcv_unsol_event =
10690 lpfc_els_unsol_event;
10691 pring->prt[2].profile = 0; /* Mask 2 */
10692 /* NameServer Inquiry */
10693 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10694 /* NameServer */
10695 pring->prt[2].type = FC_TYPE_CT;
10696 pring->prt[2].lpfc_sli_rcv_unsol_event =
10697 lpfc_ct_unsol_event;
10698 pring->prt[3].profile = 0; /* Mask 3 */
10699 /* NameServer response */
10700 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10701 /* NameServer */
10702 pring->prt[3].type = FC_TYPE_CT;
10703 pring->prt[3].lpfc_sli_rcv_unsol_event =
10704 lpfc_ct_unsol_event;
10705 break;
10706 }
10707 totiocbsize += (pring->sli.sli3.numCiocb *
10708 pring->sli.sli3.sizeCiocb) +
10709 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10710 }
10711 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10712 /* Too many cmd / rsp ring entries in SLI2 SLIM */
10713 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10714 "SLI2 SLIM Data: x%x x%lx\n",
10715 phba->brd_no, totiocbsize,
10716 (unsigned long) MAX_SLIM_IOCB_SIZE);
10717 }
10718 if (phba->cfg_multi_ring_support == 2)
10719 lpfc_extra_ring_setup(phba);
10720
10721 return 0;
10722 }
10723
10724 /**
10725 * lpfc_sli4_queue_init - Queue initialization function
10726 * @phba: Pointer to HBA context object.
10727 *
10728 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
10729 * ring. This function also initializes ring indices of each ring.
10730 * This function is called during the initialization of the SLI
10731 * interface of an HBA.
10732 * This function is called with no lock held and always returns
10733 * 1.
10734 **/
10735 void
lpfc_sli4_queue_init(struct lpfc_hba * phba)10736 lpfc_sli4_queue_init(struct lpfc_hba *phba)
10737 {
10738 struct lpfc_sli *psli;
10739 struct lpfc_sli_ring *pring;
10740 int i;
10741
10742 psli = &phba->sli;
10743 spin_lock_irq(&phba->hbalock);
10744 INIT_LIST_HEAD(&psli->mboxq);
10745 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10746 /* Initialize list headers for txq and txcmplq as double linked lists */
10747 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10748 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
10749 pring->flag = 0;
10750 pring->ringno = LPFC_FCP_RING;
10751 pring->txcmplq_cnt = 0;
10752 INIT_LIST_HEAD(&pring->txq);
10753 INIT_LIST_HEAD(&pring->txcmplq);
10754 INIT_LIST_HEAD(&pring->iocb_continueq);
10755 spin_lock_init(&pring->ring_lock);
10756 }
10757 pring = phba->sli4_hba.els_wq->pring;
10758 pring->flag = 0;
10759 pring->ringno = LPFC_ELS_RING;
10760 pring->txcmplq_cnt = 0;
10761 INIT_LIST_HEAD(&pring->txq);
10762 INIT_LIST_HEAD(&pring->txcmplq);
10763 INIT_LIST_HEAD(&pring->iocb_continueq);
10764 spin_lock_init(&pring->ring_lock);
10765
10766 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10767 pring = phba->sli4_hba.nvmels_wq->pring;
10768 pring->flag = 0;
10769 pring->ringno = LPFC_ELS_RING;
10770 pring->txcmplq_cnt = 0;
10771 INIT_LIST_HEAD(&pring->txq);
10772 INIT_LIST_HEAD(&pring->txcmplq);
10773 INIT_LIST_HEAD(&pring->iocb_continueq);
10774 spin_lock_init(&pring->ring_lock);
10775 }
10776
10777 spin_unlock_irq(&phba->hbalock);
10778 }
10779
10780 /**
10781 * lpfc_sli_queue_init - Queue initialization function
10782 * @phba: Pointer to HBA context object.
10783 *
10784 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10785 * ring. This function also initializes ring indices of each ring.
10786 * This function is called during the initialization of the SLI
10787 * interface of an HBA.
10788 * This function is called with no lock held and always returns
10789 * 1.
10790 **/
10791 void
lpfc_sli_queue_init(struct lpfc_hba * phba)10792 lpfc_sli_queue_init(struct lpfc_hba *phba)
10793 {
10794 struct lpfc_sli *psli;
10795 struct lpfc_sli_ring *pring;
10796 int i;
10797
10798 psli = &phba->sli;
10799 spin_lock_irq(&phba->hbalock);
10800 INIT_LIST_HEAD(&psli->mboxq);
10801 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10802 /* Initialize list headers for txq and txcmplq as double linked lists */
10803 for (i = 0; i < psli->num_rings; i++) {
10804 pring = &psli->sli3_ring[i];
10805 pring->ringno = i;
10806 pring->sli.sli3.next_cmdidx = 0;
10807 pring->sli.sli3.local_getidx = 0;
10808 pring->sli.sli3.cmdidx = 0;
10809 INIT_LIST_HEAD(&pring->iocb_continueq);
10810 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10811 INIT_LIST_HEAD(&pring->postbufq);
10812 pring->flag = 0;
10813 INIT_LIST_HEAD(&pring->txq);
10814 INIT_LIST_HEAD(&pring->txcmplq);
10815 spin_lock_init(&pring->ring_lock);
10816 }
10817 spin_unlock_irq(&phba->hbalock);
10818 }
10819
10820 /**
10821 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10822 * @phba: Pointer to HBA context object.
10823 *
10824 * This routine flushes the mailbox command subsystem. It will unconditionally
10825 * flush all the mailbox commands in the three possible stages in the mailbox
10826 * command sub-system: pending mailbox command queue; the outstanding mailbox
10827 * command; and completed mailbox command queue. It is caller's responsibility
10828 * to make sure that the driver is in the proper state to flush the mailbox
10829 * command sub-system. Namely, the posting of mailbox commands into the
10830 * pending mailbox command queue from the various clients must be stopped;
10831 * either the HBA is in a state that it will never works on the outstanding
10832 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10833 * mailbox command has been completed.
10834 **/
10835 static void
lpfc_sli_mbox_sys_flush(struct lpfc_hba * phba)10836 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10837 {
10838 LIST_HEAD(completions);
10839 struct lpfc_sli *psli = &phba->sli;
10840 LPFC_MBOXQ_t *pmb;
10841 unsigned long iflag;
10842
10843 /* Disable softirqs, including timers from obtaining phba->hbalock */
10844 local_bh_disable();
10845
10846 /* Flush all the mailbox commands in the mbox system */
10847 spin_lock_irqsave(&phba->hbalock, iflag);
10848
10849 /* The pending mailbox command queue */
10850 list_splice_init(&phba->sli.mboxq, &completions);
10851 /* The outstanding active mailbox command */
10852 if (psli->mbox_active) {
10853 list_add_tail(&psli->mbox_active->list, &completions);
10854 psli->mbox_active = NULL;
10855 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10856 }
10857 /* The completed mailbox command queue */
10858 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10859 spin_unlock_irqrestore(&phba->hbalock, iflag);
10860
10861 /* Enable softirqs again, done with phba->hbalock */
10862 local_bh_enable();
10863
10864 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10865 while (!list_empty(&completions)) {
10866 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10867 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10868 if (pmb->mbox_cmpl)
10869 pmb->mbox_cmpl(phba, pmb);
10870 }
10871 }
10872
10873 /**
10874 * lpfc_sli_host_down - Vport cleanup function
10875 * @vport: Pointer to virtual port object.
10876 *
10877 * lpfc_sli_host_down is called to clean up the resources
10878 * associated with a vport before destroying virtual
10879 * port data structures.
10880 * This function does following operations:
10881 * - Free discovery resources associated with this virtual
10882 * port.
10883 * - Free iocbs associated with this virtual port in
10884 * the txq.
10885 * - Send abort for all iocb commands associated with this
10886 * vport in txcmplq.
10887 *
10888 * This function is called with no lock held and always returns 1.
10889 **/
10890 int
lpfc_sli_host_down(struct lpfc_vport * vport)10891 lpfc_sli_host_down(struct lpfc_vport *vport)
10892 {
10893 LIST_HEAD(completions);
10894 struct lpfc_hba *phba = vport->phba;
10895 struct lpfc_sli *psli = &phba->sli;
10896 struct lpfc_queue *qp = NULL;
10897 struct lpfc_sli_ring *pring;
10898 struct lpfc_iocbq *iocb, *next_iocb;
10899 int i;
10900 unsigned long flags = 0;
10901 uint16_t prev_pring_flag;
10902
10903 lpfc_cleanup_discovery_resources(vport);
10904
10905 spin_lock_irqsave(&phba->hbalock, flags);
10906
10907 /*
10908 * Error everything on the txq since these iocbs
10909 * have not been given to the FW yet.
10910 * Also issue ABTS for everything on the txcmplq
10911 */
10912 if (phba->sli_rev != LPFC_SLI_REV4) {
10913 for (i = 0; i < psli->num_rings; i++) {
10914 pring = &psli->sli3_ring[i];
10915 prev_pring_flag = pring->flag;
10916 /* Only slow rings */
10917 if (pring->ringno == LPFC_ELS_RING) {
10918 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10919 /* Set the lpfc data pending flag */
10920 set_bit(LPFC_DATA_READY, &phba->data_flags);
10921 }
10922 list_for_each_entry_safe(iocb, next_iocb,
10923 &pring->txq, list) {
10924 if (iocb->vport != vport)
10925 continue;
10926 list_move_tail(&iocb->list, &completions);
10927 }
10928 list_for_each_entry_safe(iocb, next_iocb,
10929 &pring->txcmplq, list) {
10930 if (iocb->vport != vport)
10931 continue;
10932 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10933 }
10934 pring->flag = prev_pring_flag;
10935 }
10936 } else {
10937 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10938 pring = qp->pring;
10939 if (!pring)
10940 continue;
10941 if (pring == phba->sli4_hba.els_wq->pring) {
10942 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10943 /* Set the lpfc data pending flag */
10944 set_bit(LPFC_DATA_READY, &phba->data_flags);
10945 }
10946 prev_pring_flag = pring->flag;
10947 spin_lock(&pring->ring_lock);
10948 list_for_each_entry_safe(iocb, next_iocb,
10949 &pring->txq, list) {
10950 if (iocb->vport != vport)
10951 continue;
10952 list_move_tail(&iocb->list, &completions);
10953 }
10954 spin_unlock(&pring->ring_lock);
10955 list_for_each_entry_safe(iocb, next_iocb,
10956 &pring->txcmplq, list) {
10957 if (iocb->vport != vport)
10958 continue;
10959 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10960 }
10961 pring->flag = prev_pring_flag;
10962 }
10963 }
10964 spin_unlock_irqrestore(&phba->hbalock, flags);
10965
10966 /* Cancel all the IOCBs from the completions list */
10967 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10968 IOERR_SLI_DOWN);
10969 return 1;
10970 }
10971
10972 /**
10973 * lpfc_sli_hba_down - Resource cleanup function for the HBA
10974 * @phba: Pointer to HBA context object.
10975 *
10976 * This function cleans up all iocb, buffers, mailbox commands
10977 * while shutting down the HBA. This function is called with no
10978 * lock held and always returns 1.
10979 * This function does the following to cleanup driver resources:
10980 * - Free discovery resources for each virtual port
10981 * - Cleanup any pending fabric iocbs
10982 * - Iterate through the iocb txq and free each entry
10983 * in the list.
10984 * - Free up any buffer posted to the HBA
10985 * - Free mailbox commands in the mailbox queue.
10986 **/
10987 int
lpfc_sli_hba_down(struct lpfc_hba * phba)10988 lpfc_sli_hba_down(struct lpfc_hba *phba)
10989 {
10990 LIST_HEAD(completions);
10991 struct lpfc_sli *psli = &phba->sli;
10992 struct lpfc_queue *qp = NULL;
10993 struct lpfc_sli_ring *pring;
10994 struct lpfc_dmabuf *buf_ptr;
10995 unsigned long flags = 0;
10996 int i;
10997
10998 /* Shutdown the mailbox command sub-system */
10999 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
11000
11001 lpfc_hba_down_prep(phba);
11002
11003 /* Disable softirqs, including timers from obtaining phba->hbalock */
11004 local_bh_disable();
11005
11006 lpfc_fabric_abort_hba(phba);
11007
11008 spin_lock_irqsave(&phba->hbalock, flags);
11009
11010 /*
11011 * Error everything on the txq since these iocbs
11012 * have not been given to the FW yet.
11013 */
11014 if (phba->sli_rev != LPFC_SLI_REV4) {
11015 for (i = 0; i < psli->num_rings; i++) {
11016 pring = &psli->sli3_ring[i];
11017 /* Only slow rings */
11018 if (pring->ringno == LPFC_ELS_RING) {
11019 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11020 /* Set the lpfc data pending flag */
11021 set_bit(LPFC_DATA_READY, &phba->data_flags);
11022 }
11023 list_splice_init(&pring->txq, &completions);
11024 }
11025 } else {
11026 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11027 pring = qp->pring;
11028 if (!pring)
11029 continue;
11030 spin_lock(&pring->ring_lock);
11031 list_splice_init(&pring->txq, &completions);
11032 spin_unlock(&pring->ring_lock);
11033 if (pring == phba->sli4_hba.els_wq->pring) {
11034 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11035 /* Set the lpfc data pending flag */
11036 set_bit(LPFC_DATA_READY, &phba->data_flags);
11037 }
11038 }
11039 }
11040 spin_unlock_irqrestore(&phba->hbalock, flags);
11041
11042 /* Cancel all the IOCBs from the completions list */
11043 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11044 IOERR_SLI_DOWN);
11045
11046 spin_lock_irqsave(&phba->hbalock, flags);
11047 list_splice_init(&phba->elsbuf, &completions);
11048 phba->elsbuf_cnt = 0;
11049 phba->elsbuf_prev_cnt = 0;
11050 spin_unlock_irqrestore(&phba->hbalock, flags);
11051
11052 while (!list_empty(&completions)) {
11053 list_remove_head(&completions, buf_ptr,
11054 struct lpfc_dmabuf, list);
11055 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
11056 kfree(buf_ptr);
11057 }
11058
11059 /* Enable softirqs again, done with phba->hbalock */
11060 local_bh_enable();
11061
11062 /* Return any active mbox cmds */
11063 del_timer_sync(&psli->mbox_tmo);
11064
11065 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
11066 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11067 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
11068
11069 return 1;
11070 }
11071
11072 /**
11073 * lpfc_sli_pcimem_bcopy - SLI memory copy function
11074 * @srcp: Source memory pointer.
11075 * @destp: Destination memory pointer.
11076 * @cnt: Number of words required to be copied.
11077 *
11078 * This function is used for copying data between driver memory
11079 * and the SLI memory. This function also changes the endianness
11080 * of each word if native endianness is different from SLI
11081 * endianness. This function can be called with or without
11082 * lock.
11083 **/
11084 void
lpfc_sli_pcimem_bcopy(void * srcp,void * destp,uint32_t cnt)11085 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
11086 {
11087 uint32_t *src = srcp;
11088 uint32_t *dest = destp;
11089 uint32_t ldata;
11090 int i;
11091
11092 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
11093 ldata = *src;
11094 ldata = le32_to_cpu(ldata);
11095 *dest = ldata;
11096 src++;
11097 dest++;
11098 }
11099 }
11100
11101
11102 /**
11103 * lpfc_sli_bemem_bcopy - SLI memory copy function
11104 * @srcp: Source memory pointer.
11105 * @destp: Destination memory pointer.
11106 * @cnt: Number of words required to be copied.
11107 *
11108 * This function is used for copying data between a data structure
11109 * with big endian representation to local endianness.
11110 * This function can be called with or without lock.
11111 **/
11112 void
lpfc_sli_bemem_bcopy(void * srcp,void * destp,uint32_t cnt)11113 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
11114 {
11115 uint32_t *src = srcp;
11116 uint32_t *dest = destp;
11117 uint32_t ldata;
11118 int i;
11119
11120 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
11121 ldata = *src;
11122 ldata = be32_to_cpu(ldata);
11123 *dest = ldata;
11124 src++;
11125 dest++;
11126 }
11127 }
11128
11129 /**
11130 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
11131 * @phba: Pointer to HBA context object.
11132 * @pring: Pointer to driver SLI ring object.
11133 * @mp: Pointer to driver buffer object.
11134 *
11135 * This function is called with no lock held.
11136 * It always return zero after adding the buffer to the postbufq
11137 * buffer list.
11138 **/
11139 int
lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_dmabuf * mp)11140 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11141 struct lpfc_dmabuf *mp)
11142 {
11143 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
11144 later */
11145 spin_lock_irq(&phba->hbalock);
11146 list_add_tail(&mp->list, &pring->postbufq);
11147 pring->postbufq_cnt++;
11148 spin_unlock_irq(&phba->hbalock);
11149 return 0;
11150 }
11151
11152 /**
11153 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
11154 * @phba: Pointer to HBA context object.
11155 *
11156 * When HBQ is enabled, buffers are searched based on tags. This function
11157 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
11158 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
11159 * does not conflict with tags of buffer posted for unsolicited events.
11160 * The function returns the allocated tag. The function is called with
11161 * no locks held.
11162 **/
11163 uint32_t
lpfc_sli_get_buffer_tag(struct lpfc_hba * phba)11164 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
11165 {
11166 spin_lock_irq(&phba->hbalock);
11167 phba->buffer_tag_count++;
11168 /*
11169 * Always set the QUE_BUFTAG_BIT to distiguish between
11170 * a tag assigned by HBQ.
11171 */
11172 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
11173 spin_unlock_irq(&phba->hbalock);
11174 return phba->buffer_tag_count;
11175 }
11176
11177 /**
11178 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
11179 * @phba: Pointer to HBA context object.
11180 * @pring: Pointer to driver SLI ring object.
11181 * @tag: Buffer tag.
11182 *
11183 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
11184 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
11185 * iocb is posted to the response ring with the tag of the buffer.
11186 * This function searches the pring->postbufq list using the tag
11187 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
11188 * iocb. If the buffer is found then lpfc_dmabuf object of the
11189 * buffer is returned to the caller else NULL is returned.
11190 * This function is called with no lock held.
11191 **/
11192 struct lpfc_dmabuf *
lpfc_sli_ring_taggedbuf_get(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t tag)11193 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11194 uint32_t tag)
11195 {
11196 struct lpfc_dmabuf *mp, *next_mp;
11197 struct list_head *slp = &pring->postbufq;
11198
11199 /* Search postbufq, from the beginning, looking for a match on tag */
11200 spin_lock_irq(&phba->hbalock);
11201 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11202 if (mp->buffer_tag == tag) {
11203 list_del_init(&mp->list);
11204 pring->postbufq_cnt--;
11205 spin_unlock_irq(&phba->hbalock);
11206 return mp;
11207 }
11208 }
11209
11210 spin_unlock_irq(&phba->hbalock);
11211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11212 "0402 Cannot find virtual addr for buffer tag on "
11213 "ring %d Data x%lx x%px x%px x%x\n",
11214 pring->ringno, (unsigned long) tag,
11215 slp->next, slp->prev, pring->postbufq_cnt);
11216
11217 return NULL;
11218 }
11219
11220 /**
11221 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
11222 * @phba: Pointer to HBA context object.
11223 * @pring: Pointer to driver SLI ring object.
11224 * @phys: DMA address of the buffer.
11225 *
11226 * This function searches the buffer list using the dma_address
11227 * of unsolicited event to find the driver's lpfc_dmabuf object
11228 * corresponding to the dma_address. The function returns the
11229 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11230 * This function is called by the ct and els unsolicited event
11231 * handlers to get the buffer associated with the unsolicited
11232 * event.
11233 *
11234 * This function is called with no lock held.
11235 **/
11236 struct lpfc_dmabuf *
lpfc_sli_ringpostbuf_get(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,dma_addr_t phys)11237 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11238 dma_addr_t phys)
11239 {
11240 struct lpfc_dmabuf *mp, *next_mp;
11241 struct list_head *slp = &pring->postbufq;
11242
11243 /* Search postbufq, from the beginning, looking for a match on phys */
11244 spin_lock_irq(&phba->hbalock);
11245 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11246 if (mp->phys == phys) {
11247 list_del_init(&mp->list);
11248 pring->postbufq_cnt--;
11249 spin_unlock_irq(&phba->hbalock);
11250 return mp;
11251 }
11252 }
11253
11254 spin_unlock_irq(&phba->hbalock);
11255 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11256 "0410 Cannot find virtual addr for mapped buf on "
11257 "ring %d Data x%llx x%px x%px x%x\n",
11258 pring->ringno, (unsigned long long)phys,
11259 slp->next, slp->prev, pring->postbufq_cnt);
11260 return NULL;
11261 }
11262
11263 /**
11264 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
11265 * @phba: Pointer to HBA context object.
11266 * @cmdiocb: Pointer to driver command iocb object.
11267 * @rspiocb: Pointer to driver response iocb object.
11268 *
11269 * This function is the completion handler for the abort iocbs for
11270 * ELS commands. This function is called from the ELS ring event
11271 * handler with no lock held. This function frees memory resources
11272 * associated with the abort iocb.
11273 **/
11274 static void
lpfc_sli_abort_els_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)11275 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11276 struct lpfc_iocbq *rspiocb)
11277 {
11278 IOCB_t *irsp = &rspiocb->iocb;
11279 uint16_t abort_iotag, abort_context;
11280 struct lpfc_iocbq *abort_iocb = NULL;
11281
11282 if (irsp->ulpStatus) {
11283
11284 /*
11285 * Assume that the port already completed and returned, or
11286 * will return the iocb. Just Log the message.
11287 */
11288 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11289 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11290
11291 spin_lock_irq(&phba->hbalock);
11292 if (phba->sli_rev < LPFC_SLI_REV4) {
11293 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11294 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11295 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11296 spin_unlock_irq(&phba->hbalock);
11297 goto release_iocb;
11298 }
11299 if (abort_iotag != 0 &&
11300 abort_iotag <= phba->sli.last_iotag)
11301 abort_iocb =
11302 phba->sli.iocbq_lookup[abort_iotag];
11303 } else
11304 /* For sli4 the abort_tag is the XRI,
11305 * so the abort routine puts the iotag of the iocb
11306 * being aborted in the context field of the abort
11307 * IOCB.
11308 */
11309 abort_iocb = phba->sli.iocbq_lookup[abort_context];
11310
11311 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11312 "0327 Cannot abort els iocb x%px "
11313 "with tag %x context %x, abort status %x, "
11314 "abort code %x\n",
11315 abort_iocb, abort_iotag, abort_context,
11316 irsp->ulpStatus, irsp->un.ulpWord[4]);
11317
11318 spin_unlock_irq(&phba->hbalock);
11319 }
11320 release_iocb:
11321 lpfc_sli_release_iocbq(phba, cmdiocb);
11322 return;
11323 }
11324
11325 /**
11326 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
11327 * @phba: Pointer to HBA context object.
11328 * @cmdiocb: Pointer to driver command iocb object.
11329 * @rspiocb: Pointer to driver response iocb object.
11330 *
11331 * The function is called from SLI ring event handler with no
11332 * lock held. This function is the completion handler for ELS commands
11333 * which are aborted. The function frees memory resources used for
11334 * the aborted ELS commands.
11335 **/
11336 static void
lpfc_ignore_els_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)11337 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11338 struct lpfc_iocbq *rspiocb)
11339 {
11340 IOCB_t *irsp = &rspiocb->iocb;
11341
11342 /* ELS cmd tag <ulpIoTag> completes */
11343 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11344 "0139 Ignoring ELS cmd tag x%x completion Data: "
11345 "x%x x%x x%x\n",
11346 irsp->ulpIoTag, irsp->ulpStatus,
11347 irsp->un.ulpWord[4], irsp->ulpTimeout);
11348 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11349 lpfc_ct_free_iocb(phba, cmdiocb);
11350 else
11351 lpfc_els_free_iocb(phba, cmdiocb);
11352 return;
11353 }
11354
11355 /**
11356 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
11357 * @phba: Pointer to HBA context object.
11358 * @pring: Pointer to driver SLI ring object.
11359 * @cmdiocb: Pointer to driver command iocb object.
11360 *
11361 * This function issues an abort iocb for the provided command iocb down to
11362 * the port. Other than the case the outstanding command iocb is an abort
11363 * request, this function issues abort out unconditionally. This function is
11364 * called with hbalock held. The function returns 0 when it fails due to
11365 * memory allocation failure or when the command iocb is an abort request.
11366 * The hbalock is asserted held in the code path calling this routine.
11367 **/
11368 static int
lpfc_sli_abort_iotag_issue(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * cmdiocb)11369 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11370 struct lpfc_iocbq *cmdiocb)
11371 {
11372 struct lpfc_vport *vport = cmdiocb->vport;
11373 struct lpfc_iocbq *abtsiocbp;
11374 IOCB_t *icmd = NULL;
11375 IOCB_t *iabt = NULL;
11376 int retval;
11377 unsigned long iflags;
11378 struct lpfc_nodelist *ndlp;
11379
11380 /*
11381 * There are certain command types we don't want to abort. And we
11382 * don't want to abort commands that are already in the process of
11383 * being aborted.
11384 */
11385 icmd = &cmdiocb->iocb;
11386 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11387 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11388 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11389 return 0;
11390
11391 /* issue ABTS for this IOCB based on iotag */
11392 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11393 if (abtsiocbp == NULL)
11394 return 0;
11395
11396 /* This signals the response to set the correct status
11397 * before calling the completion handler
11398 */
11399 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11400
11401 iabt = &abtsiocbp->iocb;
11402 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11403 iabt->un.acxri.abortContextTag = icmd->ulpContext;
11404 if (phba->sli_rev == LPFC_SLI_REV4) {
11405 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11406 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11407 } else {
11408 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11409 if (pring->ringno == LPFC_ELS_RING) {
11410 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11411 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11412 }
11413 }
11414 iabt->ulpLe = 1;
11415 iabt->ulpClass = icmd->ulpClass;
11416
11417 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11418 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11419 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11420 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11421 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11422 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11423
11424 if (phba->link_state >= LPFC_LINK_UP)
11425 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11426 else
11427 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11428
11429 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11430 abtsiocbp->vport = vport;
11431
11432 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11433 "0339 Abort xri x%x, original iotag x%x, "
11434 "abort cmd iotag x%x\n",
11435 iabt->un.acxri.abortIoTag,
11436 iabt->un.acxri.abortContextTag,
11437 abtsiocbp->iotag);
11438
11439 if (phba->sli_rev == LPFC_SLI_REV4) {
11440 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11441 if (unlikely(pring == NULL))
11442 return 0;
11443 /* Note: both hbalock and ring_lock need to be set here */
11444 spin_lock_irqsave(&pring->ring_lock, iflags);
11445 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11446 abtsiocbp, 0);
11447 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11448 } else {
11449 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11450 abtsiocbp, 0);
11451 }
11452
11453 if (retval)
11454 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11455
11456 /*
11457 * Caller to this routine should check for IOCB_ERROR
11458 * and handle it properly. This routine no longer removes
11459 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11460 */
11461 return retval;
11462 }
11463
11464 /**
11465 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11466 * @phba: Pointer to HBA context object.
11467 * @pring: Pointer to driver SLI ring object.
11468 * @cmdiocb: Pointer to driver command iocb object.
11469 *
11470 * This function issues an abort iocb for the provided command iocb. In case
11471 * of unloading, the abort iocb will not be issued to commands on the ELS
11472 * ring. Instead, the callback function shall be changed to those commands
11473 * so that nothing happens when them finishes. This function is called with
11474 * hbalock held. The function returns 0 when the command iocb is an abort
11475 * request.
11476 **/
11477 int
lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * cmdiocb)11478 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11479 struct lpfc_iocbq *cmdiocb)
11480 {
11481 struct lpfc_vport *vport = cmdiocb->vport;
11482 int retval = IOCB_ERROR;
11483 IOCB_t *icmd = NULL;
11484
11485 lockdep_assert_held(&phba->hbalock);
11486
11487 /*
11488 * There are certain command types we don't want to abort. And we
11489 * don't want to abort commands that are already in the process of
11490 * being aborted.
11491 */
11492 icmd = &cmdiocb->iocb;
11493 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11494 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11495 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11496 return 0;
11497
11498 if (!pring) {
11499 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11500 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11501 else
11502 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11503 goto abort_iotag_exit;
11504 }
11505
11506 /*
11507 * If we're unloading, don't abort iocb on the ELS ring, but change
11508 * the callback so that nothing happens when it finishes.
11509 */
11510 if ((vport->load_flag & FC_UNLOADING) &&
11511 (pring->ringno == LPFC_ELS_RING)) {
11512 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11513 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11514 else
11515 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11516 goto abort_iotag_exit;
11517 }
11518
11519 /* Now, we try to issue the abort to the cmdiocb out */
11520 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11521
11522 abort_iotag_exit:
11523 /*
11524 * Caller to this routine should check for IOCB_ERROR
11525 * and handle it properly. This routine no longer removes
11526 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11527 */
11528 return retval;
11529 }
11530
11531 /**
11532 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11533 * @phba: pointer to lpfc HBA data structure.
11534 *
11535 * This routine will abort all pending and outstanding iocbs to an HBA.
11536 **/
11537 void
lpfc_sli_hba_iocb_abort(struct lpfc_hba * phba)11538 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11539 {
11540 struct lpfc_sli *psli = &phba->sli;
11541 struct lpfc_sli_ring *pring;
11542 struct lpfc_queue *qp = NULL;
11543 int i;
11544
11545 if (phba->sli_rev != LPFC_SLI_REV4) {
11546 for (i = 0; i < psli->num_rings; i++) {
11547 pring = &psli->sli3_ring[i];
11548 lpfc_sli_abort_iocb_ring(phba, pring);
11549 }
11550 return;
11551 }
11552 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11553 pring = qp->pring;
11554 if (!pring)
11555 continue;
11556 lpfc_sli_abort_iocb_ring(phba, pring);
11557 }
11558 }
11559
11560 /**
11561 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11562 * @iocbq: Pointer to driver iocb object.
11563 * @vport: Pointer to driver virtual port object.
11564 * @tgt_id: SCSI ID of the target.
11565 * @lun_id: LUN ID of the scsi device.
11566 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11567 *
11568 * This function acts as an iocb filter for functions which abort or count
11569 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11570 * 0 if the filtering criteria is met for the given iocb and will return
11571 * 1 if the filtering criteria is not met.
11572 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11573 * given iocb is for the SCSI device specified by vport, tgt_id and
11574 * lun_id parameter.
11575 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11576 * given iocb is for the SCSI target specified by vport and tgt_id
11577 * parameters.
11578 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11579 * given iocb is for the SCSI host associated with the given vport.
11580 * This function is called with no locks held.
11581 **/
11582 static int
lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq * iocbq,struct lpfc_vport * vport,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd ctx_cmd)11583 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11584 uint16_t tgt_id, uint64_t lun_id,
11585 lpfc_ctx_cmd ctx_cmd)
11586 {
11587 struct lpfc_io_buf *lpfc_cmd;
11588 IOCB_t *icmd = NULL;
11589 int rc = 1;
11590
11591 if (iocbq->vport != vport)
11592 return rc;
11593
11594 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11595 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
11596 iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11597 return rc;
11598
11599 icmd = &iocbq->iocb;
11600 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11601 icmd->ulpCommand == CMD_CLOSE_XRI_CN)
11602 return rc;
11603
11604 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11605
11606 if (lpfc_cmd->pCmd == NULL)
11607 return rc;
11608
11609 switch (ctx_cmd) {
11610 case LPFC_CTX_LUN:
11611 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11612 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11613 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11614 rc = 0;
11615 break;
11616 case LPFC_CTX_TGT:
11617 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11618 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11619 rc = 0;
11620 break;
11621 case LPFC_CTX_HOST:
11622 rc = 0;
11623 break;
11624 default:
11625 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11626 __func__, ctx_cmd);
11627 break;
11628 }
11629
11630 return rc;
11631 }
11632
11633 /**
11634 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11635 * @vport: Pointer to virtual port.
11636 * @tgt_id: SCSI ID of the target.
11637 * @lun_id: LUN ID of the scsi device.
11638 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11639 *
11640 * This function returns number of FCP commands pending for the vport.
11641 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11642 * commands pending on the vport associated with SCSI device specified
11643 * by tgt_id and lun_id parameters.
11644 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11645 * commands pending on the vport associated with SCSI target specified
11646 * by tgt_id parameter.
11647 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11648 * commands pending on the vport.
11649 * This function returns the number of iocbs which satisfy the filter.
11650 * This function is called without any lock held.
11651 **/
11652 int
lpfc_sli_sum_iocb(struct lpfc_vport * vport,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd ctx_cmd)11653 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11654 lpfc_ctx_cmd ctx_cmd)
11655 {
11656 struct lpfc_hba *phba = vport->phba;
11657 struct lpfc_iocbq *iocbq;
11658 int sum, i;
11659
11660 spin_lock_irq(&phba->hbalock);
11661 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11662 iocbq = phba->sli.iocbq_lookup[i];
11663
11664 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11665 ctx_cmd) == 0)
11666 sum++;
11667 }
11668 spin_unlock_irq(&phba->hbalock);
11669
11670 return sum;
11671 }
11672
11673 /**
11674 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11675 * @phba: Pointer to HBA context object
11676 * @cmdiocb: Pointer to command iocb object.
11677 * @rspiocb: Pointer to response iocb object.
11678 *
11679 * This function is called when an aborted FCP iocb completes. This
11680 * function is called by the ring event handler with no lock held.
11681 * This function frees the iocb.
11682 **/
11683 void
lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)11684 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11685 struct lpfc_iocbq *rspiocb)
11686 {
11687 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11688 "3096 ABORT_XRI_CN completing on rpi x%x "
11689 "original iotag x%x, abort cmd iotag x%x "
11690 "status 0x%x, reason 0x%x\n",
11691 cmdiocb->iocb.un.acxri.abortContextTag,
11692 cmdiocb->iocb.un.acxri.abortIoTag,
11693 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11694 rspiocb->iocb.un.ulpWord[4]);
11695 lpfc_sli_release_iocbq(phba, cmdiocb);
11696 return;
11697 }
11698
11699 /**
11700 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11701 * @vport: Pointer to virtual port.
11702 * @pring: Pointer to driver SLI ring object.
11703 * @tgt_id: SCSI ID of the target.
11704 * @lun_id: LUN ID of the scsi device.
11705 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11706 *
11707 * This function sends an abort command for every SCSI command
11708 * associated with the given virtual port pending on the ring
11709 * filtered by lpfc_sli_validate_fcp_iocb function.
11710 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11711 * FCP iocbs associated with lun specified by tgt_id and lun_id
11712 * parameters
11713 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11714 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11715 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11716 * FCP iocbs associated with virtual port.
11717 * This function returns number of iocbs it failed to abort.
11718 * This function is called with no locks held.
11719 **/
11720 int
lpfc_sli_abort_iocb(struct lpfc_vport * vport,struct lpfc_sli_ring * pring,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd abort_cmd)11721 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11722 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11723 {
11724 struct lpfc_hba *phba = vport->phba;
11725 struct lpfc_iocbq *iocbq;
11726 struct lpfc_iocbq *abtsiocb;
11727 struct lpfc_sli_ring *pring_s4;
11728 IOCB_t *cmd = NULL;
11729 int errcnt = 0, ret_val = 0;
11730 int i;
11731
11732 /* all I/Os are in process of being flushed */
11733 if (phba->hba_flag & HBA_IOQ_FLUSH)
11734 return errcnt;
11735
11736 for (i = 1; i <= phba->sli.last_iotag; i++) {
11737 iocbq = phba->sli.iocbq_lookup[i];
11738
11739 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11740 abort_cmd) != 0)
11741 continue;
11742
11743 /*
11744 * If the iocbq is already being aborted, don't take a second
11745 * action, but do count it.
11746 */
11747 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11748 continue;
11749
11750 /* issue ABTS for this IOCB based on iotag */
11751 abtsiocb = lpfc_sli_get_iocbq(phba);
11752 if (abtsiocb == NULL) {
11753 errcnt++;
11754 continue;
11755 }
11756
11757 /* indicate the IO is being aborted by the driver. */
11758 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11759
11760 cmd = &iocbq->iocb;
11761 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11762 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11763 if (phba->sli_rev == LPFC_SLI_REV4)
11764 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11765 else
11766 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11767 abtsiocb->iocb.ulpLe = 1;
11768 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11769 abtsiocb->vport = vport;
11770
11771 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11772 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11773 if (iocbq->iocb_flag & LPFC_IO_FCP)
11774 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11775 if (iocbq->iocb_flag & LPFC_IO_FOF)
11776 abtsiocb->iocb_flag |= LPFC_IO_FOF;
11777
11778 if (lpfc_is_link_up(phba))
11779 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11780 else
11781 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11782
11783 /* Setup callback routine and issue the command. */
11784 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11785 if (phba->sli_rev == LPFC_SLI_REV4) {
11786 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11787 if (!pring_s4)
11788 continue;
11789 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11790 abtsiocb, 0);
11791 } else
11792 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11793 abtsiocb, 0);
11794 if (ret_val == IOCB_ERROR) {
11795 lpfc_sli_release_iocbq(phba, abtsiocb);
11796 errcnt++;
11797 continue;
11798 }
11799 }
11800
11801 return errcnt;
11802 }
11803
11804 /**
11805 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11806 * @vport: Pointer to virtual port.
11807 * @pring: Pointer to driver SLI ring object.
11808 * @tgt_id: SCSI ID of the target.
11809 * @lun_id: LUN ID of the scsi device.
11810 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11811 *
11812 * This function sends an abort command for every SCSI command
11813 * associated with the given virtual port pending on the ring
11814 * filtered by lpfc_sli_validate_fcp_iocb function.
11815 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11816 * FCP iocbs associated with lun specified by tgt_id and lun_id
11817 * parameters
11818 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11819 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11820 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11821 * FCP iocbs associated with virtual port.
11822 * This function returns number of iocbs it aborted .
11823 * This function is called with no locks held right after a taskmgmt
11824 * command is sent.
11825 **/
11826 int
lpfc_sli_abort_taskmgmt(struct lpfc_vport * vport,struct lpfc_sli_ring * pring,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd cmd)11827 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11828 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11829 {
11830 struct lpfc_hba *phba = vport->phba;
11831 struct lpfc_io_buf *lpfc_cmd;
11832 struct lpfc_iocbq *abtsiocbq;
11833 struct lpfc_nodelist *ndlp;
11834 struct lpfc_iocbq *iocbq;
11835 IOCB_t *icmd;
11836 int sum, i, ret_val;
11837 unsigned long iflags;
11838 struct lpfc_sli_ring *pring_s4 = NULL;
11839
11840 spin_lock_irqsave(&phba->hbalock, iflags);
11841
11842 /* all I/Os are in process of being flushed */
11843 if (phba->hba_flag & HBA_IOQ_FLUSH) {
11844 spin_unlock_irqrestore(&phba->hbalock, iflags);
11845 return 0;
11846 }
11847 sum = 0;
11848
11849 for (i = 1; i <= phba->sli.last_iotag; i++) {
11850 iocbq = phba->sli.iocbq_lookup[i];
11851
11852 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11853 cmd) != 0)
11854 continue;
11855
11856 /* Guard against IO completion being called at same time */
11857 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11858 spin_lock(&lpfc_cmd->buf_lock);
11859
11860 if (!lpfc_cmd->pCmd) {
11861 spin_unlock(&lpfc_cmd->buf_lock);
11862 continue;
11863 }
11864
11865 if (phba->sli_rev == LPFC_SLI_REV4) {
11866 pring_s4 =
11867 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
11868 if (!pring_s4) {
11869 spin_unlock(&lpfc_cmd->buf_lock);
11870 continue;
11871 }
11872 /* Note: both hbalock and ring_lock must be set here */
11873 spin_lock(&pring_s4->ring_lock);
11874 }
11875
11876 /*
11877 * If the iocbq is already being aborted, don't take a second
11878 * action, but do count it.
11879 */
11880 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11881 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11882 if (phba->sli_rev == LPFC_SLI_REV4)
11883 spin_unlock(&pring_s4->ring_lock);
11884 spin_unlock(&lpfc_cmd->buf_lock);
11885 continue;
11886 }
11887
11888 /* issue ABTS for this IOCB based on iotag */
11889 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11890 if (!abtsiocbq) {
11891 if (phba->sli_rev == LPFC_SLI_REV4)
11892 spin_unlock(&pring_s4->ring_lock);
11893 spin_unlock(&lpfc_cmd->buf_lock);
11894 continue;
11895 }
11896
11897 icmd = &iocbq->iocb;
11898 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11899 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11900 if (phba->sli_rev == LPFC_SLI_REV4)
11901 abtsiocbq->iocb.un.acxri.abortIoTag =
11902 iocbq->sli4_xritag;
11903 else
11904 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11905 abtsiocbq->iocb.ulpLe = 1;
11906 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11907 abtsiocbq->vport = vport;
11908
11909 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11910 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11911 if (iocbq->iocb_flag & LPFC_IO_FCP)
11912 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11913 if (iocbq->iocb_flag & LPFC_IO_FOF)
11914 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11915
11916 ndlp = lpfc_cmd->rdata->pnode;
11917
11918 if (lpfc_is_link_up(phba) &&
11919 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11920 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11921 else
11922 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11923
11924 /* Setup callback routine and issue the command. */
11925 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11926
11927 /*
11928 * Indicate the IO is being aborted by the driver and set
11929 * the caller's flag into the aborted IO.
11930 */
11931 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11932
11933 if (phba->sli_rev == LPFC_SLI_REV4) {
11934 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11935 abtsiocbq, 0);
11936 spin_unlock(&pring_s4->ring_lock);
11937 } else {
11938 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11939 abtsiocbq, 0);
11940 }
11941
11942 spin_unlock(&lpfc_cmd->buf_lock);
11943
11944 if (ret_val == IOCB_ERROR)
11945 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11946 else
11947 sum++;
11948 }
11949 spin_unlock_irqrestore(&phba->hbalock, iflags);
11950 return sum;
11951 }
11952
11953 /**
11954 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
11955 * @phba: Pointer to HBA context object.
11956 * @cmdiocbq: Pointer to command iocb.
11957 * @rspiocbq: Pointer to response iocb.
11958 *
11959 * This function is the completion handler for iocbs issued using
11960 * lpfc_sli_issue_iocb_wait function. This function is called by the
11961 * ring event handler function without any lock held. This function
11962 * can be called from both worker thread context and interrupt
11963 * context. This function also can be called from other thread which
11964 * cleans up the SLI layer objects.
11965 * This function copy the contents of the response iocb to the
11966 * response iocb memory object provided by the caller of
11967 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11968 * sleeps for the iocb completion.
11969 **/
11970 static void
lpfc_sli_wake_iocb_wait(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)11971 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11972 struct lpfc_iocbq *cmdiocbq,
11973 struct lpfc_iocbq *rspiocbq)
11974 {
11975 wait_queue_head_t *pdone_q;
11976 unsigned long iflags;
11977 struct lpfc_io_buf *lpfc_cmd;
11978
11979 spin_lock_irqsave(&phba->hbalock, iflags);
11980 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11981
11982 /*
11983 * A time out has occurred for the iocb. If a time out
11984 * completion handler has been supplied, call it. Otherwise,
11985 * just free the iocbq.
11986 */
11987
11988 spin_unlock_irqrestore(&phba->hbalock, iflags);
11989 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11990 cmdiocbq->wait_iocb_cmpl = NULL;
11991 if (cmdiocbq->iocb_cmpl)
11992 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11993 else
11994 lpfc_sli_release_iocbq(phba, cmdiocbq);
11995 return;
11996 }
11997
11998 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11999 if (cmdiocbq->context2 && rspiocbq)
12000 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
12001 &rspiocbq->iocb, sizeof(IOCB_t));
12002
12003 /* Set the exchange busy flag for task management commands */
12004 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
12005 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
12006 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
12007 cur_iocbq);
12008 if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
12009 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
12010 else
12011 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
12012 }
12013
12014 pdone_q = cmdiocbq->context_un.wait_queue;
12015 if (pdone_q)
12016 wake_up(pdone_q);
12017 spin_unlock_irqrestore(&phba->hbalock, iflags);
12018 return;
12019 }
12020
12021 /**
12022 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
12023 * @phba: Pointer to HBA context object..
12024 * @piocbq: Pointer to command iocb.
12025 * @flag: Flag to test.
12026 *
12027 * This routine grabs the hbalock and then test the iocb_flag to
12028 * see if the passed in flag is set.
12029 * Returns:
12030 * 1 if flag is set.
12031 * 0 if flag is not set.
12032 **/
12033 static int
lpfc_chk_iocb_flg(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq,uint32_t flag)12034 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
12035 struct lpfc_iocbq *piocbq, uint32_t flag)
12036 {
12037 unsigned long iflags;
12038 int ret;
12039
12040 spin_lock_irqsave(&phba->hbalock, iflags);
12041 ret = piocbq->iocb_flag & flag;
12042 spin_unlock_irqrestore(&phba->hbalock, iflags);
12043 return ret;
12044
12045 }
12046
12047 /**
12048 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
12049 * @phba: Pointer to HBA context object..
12050 * @ring_number: Ring number
12051 * @piocb: Pointer to command iocb.
12052 * @prspiocbq: Pointer to response iocb.
12053 * @timeout: Timeout in number of seconds.
12054 *
12055 * This function issues the iocb to firmware and waits for the
12056 * iocb to complete. The iocb_cmpl field of the shall be used
12057 * to handle iocbs which time out. If the field is NULL, the
12058 * function shall free the iocbq structure. If more clean up is
12059 * needed, the caller is expected to provide a completion function
12060 * that will provide the needed clean up. If the iocb command is
12061 * not completed within timeout seconds, the function will either
12062 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
12063 * completion function set in the iocb_cmpl field and then return
12064 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
12065 * resources if this function returns IOCB_TIMEDOUT.
12066 * The function waits for the iocb completion using an
12067 * non-interruptible wait.
12068 * This function will sleep while waiting for iocb completion.
12069 * So, this function should not be called from any context which
12070 * does not allow sleeping. Due to the same reason, this function
12071 * cannot be called with interrupt disabled.
12072 * This function assumes that the iocb completions occur while
12073 * this function sleep. So, this function cannot be called from
12074 * the thread which process iocb completion for this ring.
12075 * This function clears the iocb_flag of the iocb object before
12076 * issuing the iocb and the iocb completion handler sets this
12077 * flag and wakes this thread when the iocb completes.
12078 * The contents of the response iocb will be copied to prspiocbq
12079 * by the completion handler when the command completes.
12080 * This function returns IOCB_SUCCESS when success.
12081 * This function is called with no lock held.
12082 **/
12083 int
lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,struct lpfc_iocbq * prspiocbq,uint32_t timeout)12084 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
12085 uint32_t ring_number,
12086 struct lpfc_iocbq *piocb,
12087 struct lpfc_iocbq *prspiocbq,
12088 uint32_t timeout)
12089 {
12090 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
12091 long timeleft, timeout_req = 0;
12092 int retval = IOCB_SUCCESS;
12093 uint32_t creg_val;
12094 struct lpfc_iocbq *iocb;
12095 int txq_cnt = 0;
12096 int txcmplq_cnt = 0;
12097 struct lpfc_sli_ring *pring;
12098 unsigned long iflags;
12099 bool iocb_completed = true;
12100
12101 if (phba->sli_rev >= LPFC_SLI_REV4)
12102 pring = lpfc_sli4_calc_ring(phba, piocb);
12103 else
12104 pring = &phba->sli.sli3_ring[ring_number];
12105 /*
12106 * If the caller has provided a response iocbq buffer, then context2
12107 * is NULL or its an error.
12108 */
12109 if (prspiocbq) {
12110 if (piocb->context2)
12111 return IOCB_ERROR;
12112 piocb->context2 = prspiocbq;
12113 }
12114
12115 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
12116 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
12117 piocb->context_un.wait_queue = &done_q;
12118 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
12119
12120 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12121 if (lpfc_readl(phba->HCregaddr, &creg_val))
12122 return IOCB_ERROR;
12123 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
12124 writel(creg_val, phba->HCregaddr);
12125 readl(phba->HCregaddr); /* flush */
12126 }
12127
12128 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
12129 SLI_IOCB_RET_IOCB);
12130 if (retval == IOCB_SUCCESS) {
12131 timeout_req = msecs_to_jiffies(timeout * 1000);
12132 timeleft = wait_event_timeout(done_q,
12133 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
12134 timeout_req);
12135 spin_lock_irqsave(&phba->hbalock, iflags);
12136 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
12137
12138 /*
12139 * IOCB timed out. Inform the wake iocb wait
12140 * completion function and set local status
12141 */
12142
12143 iocb_completed = false;
12144 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
12145 }
12146 spin_unlock_irqrestore(&phba->hbalock, iflags);
12147 if (iocb_completed) {
12148 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12149 "0331 IOCB wake signaled\n");
12150 /* Note: we are not indicating if the IOCB has a success
12151 * status or not - that's for the caller to check.
12152 * IOCB_SUCCESS means just that the command was sent and
12153 * completed. Not that it completed successfully.
12154 * */
12155 } else if (timeleft == 0) {
12156 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12157 "0338 IOCB wait timeout error - no "
12158 "wake response Data x%x\n", timeout);
12159 retval = IOCB_TIMEDOUT;
12160 } else {
12161 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12162 "0330 IOCB wake NOT set, "
12163 "Data x%x x%lx\n",
12164 timeout, (timeleft / jiffies));
12165 retval = IOCB_TIMEDOUT;
12166 }
12167 } else if (retval == IOCB_BUSY) {
12168 if (phba->cfg_log_verbose & LOG_SLI) {
12169 list_for_each_entry(iocb, &pring->txq, list) {
12170 txq_cnt++;
12171 }
12172 list_for_each_entry(iocb, &pring->txcmplq, list) {
12173 txcmplq_cnt++;
12174 }
12175 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12176 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12177 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12178 }
12179 return retval;
12180 } else {
12181 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12182 "0332 IOCB wait issue failed, Data x%x\n",
12183 retval);
12184 retval = IOCB_ERROR;
12185 }
12186
12187 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12188 if (lpfc_readl(phba->HCregaddr, &creg_val))
12189 return IOCB_ERROR;
12190 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12191 writel(creg_val, phba->HCregaddr);
12192 readl(phba->HCregaddr); /* flush */
12193 }
12194
12195 if (prspiocbq)
12196 piocb->context2 = NULL;
12197
12198 piocb->context_un.wait_queue = NULL;
12199 piocb->iocb_cmpl = NULL;
12200 return retval;
12201 }
12202
12203 /**
12204 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
12205 * @phba: Pointer to HBA context object.
12206 * @pmboxq: Pointer to driver mailbox object.
12207 * @timeout: Timeout in number of seconds.
12208 *
12209 * This function issues the mailbox to firmware and waits for the
12210 * mailbox command to complete. If the mailbox command is not
12211 * completed within timeout seconds, it returns MBX_TIMEOUT.
12212 * The function waits for the mailbox completion using an
12213 * interruptible wait. If the thread is woken up due to a
12214 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12215 * should not free the mailbox resources, if this function returns
12216 * MBX_TIMEOUT.
12217 * This function will sleep while waiting for mailbox completion.
12218 * So, this function should not be called from any context which
12219 * does not allow sleeping. Due to the same reason, this function
12220 * cannot be called with interrupt disabled.
12221 * This function assumes that the mailbox completion occurs while
12222 * this function sleep. So, this function cannot be called from
12223 * the worker thread which processes mailbox completion.
12224 * This function is called in the context of HBA management
12225 * applications.
12226 * This function returns MBX_SUCCESS when successful.
12227 * This function is called with no lock held.
12228 **/
12229 int
lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq,uint32_t timeout)12230 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12231 uint32_t timeout)
12232 {
12233 struct completion mbox_done;
12234 int retval;
12235 unsigned long flag;
12236
12237 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12238 /* setup wake call as IOCB callback */
12239 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12240
12241 /* setup context3 field to pass wait_queue pointer to wake function */
12242 init_completion(&mbox_done);
12243 pmboxq->context3 = &mbox_done;
12244 /* now issue the command */
12245 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12246 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12247 wait_for_completion_timeout(&mbox_done,
12248 msecs_to_jiffies(timeout * 1000));
12249
12250 spin_lock_irqsave(&phba->hbalock, flag);
12251 pmboxq->context3 = NULL;
12252 /*
12253 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12254 * else do not free the resources.
12255 */
12256 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12257 retval = MBX_SUCCESS;
12258 } else {
12259 retval = MBX_TIMEOUT;
12260 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12261 }
12262 spin_unlock_irqrestore(&phba->hbalock, flag);
12263 }
12264 return retval;
12265 }
12266
12267 /**
12268 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
12269 * @phba: Pointer to HBA context.
12270 * @mbx_action: Mailbox shutdown options.
12271 *
12272 * This function is called to shutdown the driver's mailbox sub-system.
12273 * It first marks the mailbox sub-system is in a block state to prevent
12274 * the asynchronous mailbox command from issued off the pending mailbox
12275 * command queue. If the mailbox command sub-system shutdown is due to
12276 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12277 * the mailbox sub-system flush routine to forcefully bring down the
12278 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12279 * as with offline or HBA function reset), this routine will wait for the
12280 * outstanding mailbox command to complete before invoking the mailbox
12281 * sub-system flush routine to gracefully bring down mailbox sub-system.
12282 **/
12283 void
lpfc_sli_mbox_sys_shutdown(struct lpfc_hba * phba,int mbx_action)12284 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12285 {
12286 struct lpfc_sli *psli = &phba->sli;
12287 unsigned long timeout;
12288
12289 if (mbx_action == LPFC_MBX_NO_WAIT) {
12290 /* delay 100ms for port state */
12291 msleep(100);
12292 lpfc_sli_mbox_sys_flush(phba);
12293 return;
12294 }
12295 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12296
12297 /* Disable softirqs, including timers from obtaining phba->hbalock */
12298 local_bh_disable();
12299
12300 spin_lock_irq(&phba->hbalock);
12301 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12302
12303 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12304 /* Determine how long we might wait for the active mailbox
12305 * command to be gracefully completed by firmware.
12306 */
12307 if (phba->sli.mbox_active)
12308 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12309 phba->sli.mbox_active) *
12310 1000) + jiffies;
12311 spin_unlock_irq(&phba->hbalock);
12312
12313 /* Enable softirqs again, done with phba->hbalock */
12314 local_bh_enable();
12315
12316 while (phba->sli.mbox_active) {
12317 /* Check active mailbox complete status every 2ms */
12318 msleep(2);
12319 if (time_after(jiffies, timeout))
12320 /* Timeout, let the mailbox flush routine to
12321 * forcefully release active mailbox command
12322 */
12323 break;
12324 }
12325 } else {
12326 spin_unlock_irq(&phba->hbalock);
12327
12328 /* Enable softirqs again, done with phba->hbalock */
12329 local_bh_enable();
12330 }
12331
12332 lpfc_sli_mbox_sys_flush(phba);
12333 }
12334
12335 /**
12336 * lpfc_sli_eratt_read - read sli-3 error attention events
12337 * @phba: Pointer to HBA context.
12338 *
12339 * This function is called to read the SLI3 device error attention registers
12340 * for possible error attention events. The caller must hold the hostlock
12341 * with spin_lock_irq().
12342 *
12343 * This function returns 1 when there is Error Attention in the Host Attention
12344 * Register and returns 0 otherwise.
12345 **/
12346 static int
lpfc_sli_eratt_read(struct lpfc_hba * phba)12347 lpfc_sli_eratt_read(struct lpfc_hba *phba)
12348 {
12349 uint32_t ha_copy;
12350
12351 /* Read chip Host Attention (HA) register */
12352 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12353 goto unplug_err;
12354
12355 if (ha_copy & HA_ERATT) {
12356 /* Read host status register to retrieve error event */
12357 if (lpfc_sli_read_hs(phba))
12358 goto unplug_err;
12359
12360 /* Check if there is a deferred error condition is active */
12361 if ((HS_FFER1 & phba->work_hs) &&
12362 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12363 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12364 phba->hba_flag |= DEFER_ERATT;
12365 /* Clear all interrupt enable conditions */
12366 writel(0, phba->HCregaddr);
12367 readl(phba->HCregaddr);
12368 }
12369
12370 /* Set the driver HA work bitmap */
12371 phba->work_ha |= HA_ERATT;
12372 /* Indicate polling handles this ERATT */
12373 phba->hba_flag |= HBA_ERATT_HANDLED;
12374 return 1;
12375 }
12376 return 0;
12377
12378 unplug_err:
12379 /* Set the driver HS work bitmap */
12380 phba->work_hs |= UNPLUG_ERR;
12381 /* Set the driver HA work bitmap */
12382 phba->work_ha |= HA_ERATT;
12383 /* Indicate polling handles this ERATT */
12384 phba->hba_flag |= HBA_ERATT_HANDLED;
12385 return 1;
12386 }
12387
12388 /**
12389 * lpfc_sli4_eratt_read - read sli-4 error attention events
12390 * @phba: Pointer to HBA context.
12391 *
12392 * This function is called to read the SLI4 device error attention registers
12393 * for possible error attention events. The caller must hold the hostlock
12394 * with spin_lock_irq().
12395 *
12396 * This function returns 1 when there is Error Attention in the Host Attention
12397 * Register and returns 0 otherwise.
12398 **/
12399 static int
lpfc_sli4_eratt_read(struct lpfc_hba * phba)12400 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12401 {
12402 uint32_t uerr_sta_hi, uerr_sta_lo;
12403 uint32_t if_type, portsmphr;
12404 struct lpfc_register portstat_reg;
12405
12406 /*
12407 * For now, use the SLI4 device internal unrecoverable error
12408 * registers for error attention. This can be changed later.
12409 */
12410 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12411 switch (if_type) {
12412 case LPFC_SLI_INTF_IF_TYPE_0:
12413 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12414 &uerr_sta_lo) ||
12415 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12416 &uerr_sta_hi)) {
12417 phba->work_hs |= UNPLUG_ERR;
12418 phba->work_ha |= HA_ERATT;
12419 phba->hba_flag |= HBA_ERATT_HANDLED;
12420 return 1;
12421 }
12422 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12423 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12424 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12425 "1423 HBA Unrecoverable error: "
12426 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12427 "ue_mask_lo_reg=0x%x, "
12428 "ue_mask_hi_reg=0x%x\n",
12429 uerr_sta_lo, uerr_sta_hi,
12430 phba->sli4_hba.ue_mask_lo,
12431 phba->sli4_hba.ue_mask_hi);
12432 phba->work_status[0] = uerr_sta_lo;
12433 phba->work_status[1] = uerr_sta_hi;
12434 phba->work_ha |= HA_ERATT;
12435 phba->hba_flag |= HBA_ERATT_HANDLED;
12436 return 1;
12437 }
12438 break;
12439 case LPFC_SLI_INTF_IF_TYPE_2:
12440 case LPFC_SLI_INTF_IF_TYPE_6:
12441 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12442 &portstat_reg.word0) ||
12443 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12444 &portsmphr)){
12445 phba->work_hs |= UNPLUG_ERR;
12446 phba->work_ha |= HA_ERATT;
12447 phba->hba_flag |= HBA_ERATT_HANDLED;
12448 return 1;
12449 }
12450 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12451 phba->work_status[0] =
12452 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12453 phba->work_status[1] =
12454 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12455 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12456 "2885 Port Status Event: "
12457 "port status reg 0x%x, "
12458 "port smphr reg 0x%x, "
12459 "error 1=0x%x, error 2=0x%x\n",
12460 portstat_reg.word0,
12461 portsmphr,
12462 phba->work_status[0],
12463 phba->work_status[1]);
12464 phba->work_ha |= HA_ERATT;
12465 phba->hba_flag |= HBA_ERATT_HANDLED;
12466 return 1;
12467 }
12468 break;
12469 case LPFC_SLI_INTF_IF_TYPE_1:
12470 default:
12471 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12472 "2886 HBA Error Attention on unsupported "
12473 "if type %d.", if_type);
12474 return 1;
12475 }
12476
12477 return 0;
12478 }
12479
12480 /**
12481 * lpfc_sli_check_eratt - check error attention events
12482 * @phba: Pointer to HBA context.
12483 *
12484 * This function is called from timer soft interrupt context to check HBA's
12485 * error attention register bit for error attention events.
12486 *
12487 * This function returns 1 when there is Error Attention in the Host Attention
12488 * Register and returns 0 otherwise.
12489 **/
12490 int
lpfc_sli_check_eratt(struct lpfc_hba * phba)12491 lpfc_sli_check_eratt(struct lpfc_hba *phba)
12492 {
12493 uint32_t ha_copy;
12494
12495 /* If somebody is waiting to handle an eratt, don't process it
12496 * here. The brdkill function will do this.
12497 */
12498 if (phba->link_flag & LS_IGNORE_ERATT)
12499 return 0;
12500
12501 /* Check if interrupt handler handles this ERATT */
12502 spin_lock_irq(&phba->hbalock);
12503 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12504 /* Interrupt handler has handled ERATT */
12505 spin_unlock_irq(&phba->hbalock);
12506 return 0;
12507 }
12508
12509 /*
12510 * If there is deferred error attention, do not check for error
12511 * attention
12512 */
12513 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12514 spin_unlock_irq(&phba->hbalock);
12515 return 0;
12516 }
12517
12518 /* If PCI channel is offline, don't process it */
12519 if (unlikely(pci_channel_offline(phba->pcidev))) {
12520 spin_unlock_irq(&phba->hbalock);
12521 return 0;
12522 }
12523
12524 switch (phba->sli_rev) {
12525 case LPFC_SLI_REV2:
12526 case LPFC_SLI_REV3:
12527 /* Read chip Host Attention (HA) register */
12528 ha_copy = lpfc_sli_eratt_read(phba);
12529 break;
12530 case LPFC_SLI_REV4:
12531 /* Read device Uncoverable Error (UERR) registers */
12532 ha_copy = lpfc_sli4_eratt_read(phba);
12533 break;
12534 default:
12535 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12536 "0299 Invalid SLI revision (%d)\n",
12537 phba->sli_rev);
12538 ha_copy = 0;
12539 break;
12540 }
12541 spin_unlock_irq(&phba->hbalock);
12542
12543 return ha_copy;
12544 }
12545
12546 /**
12547 * lpfc_intr_state_check - Check device state for interrupt handling
12548 * @phba: Pointer to HBA context.
12549 *
12550 * This inline routine checks whether a device or its PCI slot is in a state
12551 * that the interrupt should be handled.
12552 *
12553 * This function returns 0 if the device or the PCI slot is in a state that
12554 * interrupt should be handled, otherwise -EIO.
12555 */
12556 static inline int
lpfc_intr_state_check(struct lpfc_hba * phba)12557 lpfc_intr_state_check(struct lpfc_hba *phba)
12558 {
12559 /* If the pci channel is offline, ignore all the interrupts */
12560 if (unlikely(pci_channel_offline(phba->pcidev)))
12561 return -EIO;
12562
12563 /* Update device level interrupt statistics */
12564 phba->sli.slistat.sli_intr++;
12565
12566 /* Ignore all interrupts during initialization. */
12567 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12568 return -EIO;
12569
12570 return 0;
12571 }
12572
12573 /**
12574 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
12575 * @irq: Interrupt number.
12576 * @dev_id: The device context pointer.
12577 *
12578 * This function is directly called from the PCI layer as an interrupt
12579 * service routine when device with SLI-3 interface spec is enabled with
12580 * MSI-X multi-message interrupt mode and there are slow-path events in
12581 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12582 * interrupt mode, this function is called as part of the device-level
12583 * interrupt handler. When the PCI slot is in error recovery or the HBA
12584 * is undergoing initialization, the interrupt handler will not process
12585 * the interrupt. The link attention and ELS ring attention events are
12586 * handled by the worker thread. The interrupt handler signals the worker
12587 * thread and returns for these events. This function is called without
12588 * any lock held. It gets the hbalock to access and update SLI data
12589 * structures.
12590 *
12591 * This function returns IRQ_HANDLED when interrupt is handled else it
12592 * returns IRQ_NONE.
12593 **/
12594 irqreturn_t
lpfc_sli_sp_intr_handler(int irq,void * dev_id)12595 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12596 {
12597 struct lpfc_hba *phba;
12598 uint32_t ha_copy, hc_copy;
12599 uint32_t work_ha_copy;
12600 unsigned long status;
12601 unsigned long iflag;
12602 uint32_t control;
12603
12604 MAILBOX_t *mbox, *pmbox;
12605 struct lpfc_vport *vport;
12606 struct lpfc_nodelist *ndlp;
12607 struct lpfc_dmabuf *mp;
12608 LPFC_MBOXQ_t *pmb;
12609 int rc;
12610
12611 /*
12612 * Get the driver's phba structure from the dev_id and
12613 * assume the HBA is not interrupting.
12614 */
12615 phba = (struct lpfc_hba *)dev_id;
12616
12617 if (unlikely(!phba))
12618 return IRQ_NONE;
12619
12620 /*
12621 * Stuff needs to be attented to when this function is invoked as an
12622 * individual interrupt handler in MSI-X multi-message interrupt mode
12623 */
12624 if (phba->intr_type == MSIX) {
12625 /* Check device state for handling interrupt */
12626 if (lpfc_intr_state_check(phba))
12627 return IRQ_NONE;
12628 /* Need to read HA REG for slow-path events */
12629 spin_lock_irqsave(&phba->hbalock, iflag);
12630 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12631 goto unplug_error;
12632 /* If somebody is waiting to handle an eratt don't process it
12633 * here. The brdkill function will do this.
12634 */
12635 if (phba->link_flag & LS_IGNORE_ERATT)
12636 ha_copy &= ~HA_ERATT;
12637 /* Check the need for handling ERATT in interrupt handler */
12638 if (ha_copy & HA_ERATT) {
12639 if (phba->hba_flag & HBA_ERATT_HANDLED)
12640 /* ERATT polling has handled ERATT */
12641 ha_copy &= ~HA_ERATT;
12642 else
12643 /* Indicate interrupt handler handles ERATT */
12644 phba->hba_flag |= HBA_ERATT_HANDLED;
12645 }
12646
12647 /*
12648 * If there is deferred error attention, do not check for any
12649 * interrupt.
12650 */
12651 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12652 spin_unlock_irqrestore(&phba->hbalock, iflag);
12653 return IRQ_NONE;
12654 }
12655
12656 /* Clear up only attention source related to slow-path */
12657 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12658 goto unplug_error;
12659
12660 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12661 HC_LAINT_ENA | HC_ERINT_ENA),
12662 phba->HCregaddr);
12663 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12664 phba->HAregaddr);
12665 writel(hc_copy, phba->HCregaddr);
12666 readl(phba->HAregaddr); /* flush */
12667 spin_unlock_irqrestore(&phba->hbalock, iflag);
12668 } else
12669 ha_copy = phba->ha_copy;
12670
12671 work_ha_copy = ha_copy & phba->work_ha_mask;
12672
12673 if (work_ha_copy) {
12674 if (work_ha_copy & HA_LATT) {
12675 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12676 /*
12677 * Turn off Link Attention interrupts
12678 * until CLEAR_LA done
12679 */
12680 spin_lock_irqsave(&phba->hbalock, iflag);
12681 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12682 if (lpfc_readl(phba->HCregaddr, &control))
12683 goto unplug_error;
12684 control &= ~HC_LAINT_ENA;
12685 writel(control, phba->HCregaddr);
12686 readl(phba->HCregaddr); /* flush */
12687 spin_unlock_irqrestore(&phba->hbalock, iflag);
12688 }
12689 else
12690 work_ha_copy &= ~HA_LATT;
12691 }
12692
12693 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12694 /*
12695 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12696 * the only slow ring.
12697 */
12698 status = (work_ha_copy &
12699 (HA_RXMASK << (4*LPFC_ELS_RING)));
12700 status >>= (4*LPFC_ELS_RING);
12701 if (status & HA_RXMASK) {
12702 spin_lock_irqsave(&phba->hbalock, iflag);
12703 if (lpfc_readl(phba->HCregaddr, &control))
12704 goto unplug_error;
12705
12706 lpfc_debugfs_slow_ring_trc(phba,
12707 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12708 control, status,
12709 (uint32_t)phba->sli.slistat.sli_intr);
12710
12711 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12712 lpfc_debugfs_slow_ring_trc(phba,
12713 "ISR Disable ring:"
12714 "pwork:x%x hawork:x%x wait:x%x",
12715 phba->work_ha, work_ha_copy,
12716 (uint32_t)((unsigned long)
12717 &phba->work_waitq));
12718
12719 control &=
12720 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12721 writel(control, phba->HCregaddr);
12722 readl(phba->HCregaddr); /* flush */
12723 }
12724 else {
12725 lpfc_debugfs_slow_ring_trc(phba,
12726 "ISR slow ring: pwork:"
12727 "x%x hawork:x%x wait:x%x",
12728 phba->work_ha, work_ha_copy,
12729 (uint32_t)((unsigned long)
12730 &phba->work_waitq));
12731 }
12732 spin_unlock_irqrestore(&phba->hbalock, iflag);
12733 }
12734 }
12735 spin_lock_irqsave(&phba->hbalock, iflag);
12736 if (work_ha_copy & HA_ERATT) {
12737 if (lpfc_sli_read_hs(phba))
12738 goto unplug_error;
12739 /*
12740 * Check if there is a deferred error condition
12741 * is active
12742 */
12743 if ((HS_FFER1 & phba->work_hs) &&
12744 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12745 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12746 phba->work_hs)) {
12747 phba->hba_flag |= DEFER_ERATT;
12748 /* Clear all interrupt enable conditions */
12749 writel(0, phba->HCregaddr);
12750 readl(phba->HCregaddr);
12751 }
12752 }
12753
12754 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12755 pmb = phba->sli.mbox_active;
12756 pmbox = &pmb->u.mb;
12757 mbox = phba->mbox;
12758 vport = pmb->vport;
12759
12760 /* First check out the status word */
12761 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12762 if (pmbox->mbxOwner != OWN_HOST) {
12763 spin_unlock_irqrestore(&phba->hbalock, iflag);
12764 /*
12765 * Stray Mailbox Interrupt, mbxCommand <cmd>
12766 * mbxStatus <status>
12767 */
12768 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12769 "(%d):0304 Stray Mailbox "
12770 "Interrupt mbxCommand x%x "
12771 "mbxStatus x%x\n",
12772 (vport ? vport->vpi : 0),
12773 pmbox->mbxCommand,
12774 pmbox->mbxStatus);
12775 /* clear mailbox attention bit */
12776 work_ha_copy &= ~HA_MBATT;
12777 } else {
12778 phba->sli.mbox_active = NULL;
12779 spin_unlock_irqrestore(&phba->hbalock, iflag);
12780 phba->last_completion_time = jiffies;
12781 del_timer(&phba->sli.mbox_tmo);
12782 if (pmb->mbox_cmpl) {
12783 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12784 MAILBOX_CMD_SIZE);
12785 if (pmb->out_ext_byte_len &&
12786 pmb->ctx_buf)
12787 lpfc_sli_pcimem_bcopy(
12788 phba->mbox_ext,
12789 pmb->ctx_buf,
12790 pmb->out_ext_byte_len);
12791 }
12792 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12793 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12794
12795 lpfc_debugfs_disc_trc(vport,
12796 LPFC_DISC_TRC_MBOX_VPORT,
12797 "MBOX dflt rpi: : "
12798 "status:x%x rpi:x%x",
12799 (uint32_t)pmbox->mbxStatus,
12800 pmbox->un.varWords[0], 0);
12801
12802 if (!pmbox->mbxStatus) {
12803 mp = (struct lpfc_dmabuf *)
12804 (pmb->ctx_buf);
12805 ndlp = (struct lpfc_nodelist *)
12806 pmb->ctx_ndlp;
12807
12808 /* Reg_LOGIN of dflt RPI was
12809 * successful. new lets get
12810 * rid of the RPI using the
12811 * same mbox buffer.
12812 */
12813 lpfc_unreg_login(phba,
12814 vport->vpi,
12815 pmbox->un.varWords[0],
12816 pmb);
12817 pmb->mbox_cmpl =
12818 lpfc_mbx_cmpl_dflt_rpi;
12819 pmb->ctx_buf = mp;
12820 pmb->ctx_ndlp = ndlp;
12821 pmb->vport = vport;
12822 rc = lpfc_sli_issue_mbox(phba,
12823 pmb,
12824 MBX_NOWAIT);
12825 if (rc != MBX_BUSY)
12826 lpfc_printf_log(phba,
12827 KERN_ERR,
12828 LOG_TRACE_EVENT,
12829 "0350 rc should have"
12830 "been MBX_BUSY\n");
12831 if (rc != MBX_NOT_FINISHED)
12832 goto send_current_mbox;
12833 }
12834 }
12835 spin_lock_irqsave(
12836 &phba->pport->work_port_lock,
12837 iflag);
12838 phba->pport->work_port_events &=
12839 ~WORKER_MBOX_TMO;
12840 spin_unlock_irqrestore(
12841 &phba->pport->work_port_lock,
12842 iflag);
12843 lpfc_mbox_cmpl_put(phba, pmb);
12844 }
12845 } else
12846 spin_unlock_irqrestore(&phba->hbalock, iflag);
12847
12848 if ((work_ha_copy & HA_MBATT) &&
12849 (phba->sli.mbox_active == NULL)) {
12850 send_current_mbox:
12851 /* Process next mailbox command if there is one */
12852 do {
12853 rc = lpfc_sli_issue_mbox(phba, NULL,
12854 MBX_NOWAIT);
12855 } while (rc == MBX_NOT_FINISHED);
12856 if (rc != MBX_SUCCESS)
12857 lpfc_printf_log(phba, KERN_ERR,
12858 LOG_TRACE_EVENT,
12859 "0349 rc should be "
12860 "MBX_SUCCESS\n");
12861 }
12862
12863 spin_lock_irqsave(&phba->hbalock, iflag);
12864 phba->work_ha |= work_ha_copy;
12865 spin_unlock_irqrestore(&phba->hbalock, iflag);
12866 lpfc_worker_wake_up(phba);
12867 }
12868 return IRQ_HANDLED;
12869 unplug_error:
12870 spin_unlock_irqrestore(&phba->hbalock, iflag);
12871 return IRQ_HANDLED;
12872
12873 } /* lpfc_sli_sp_intr_handler */
12874
12875 /**
12876 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
12877 * @irq: Interrupt number.
12878 * @dev_id: The device context pointer.
12879 *
12880 * This function is directly called from the PCI layer as an interrupt
12881 * service routine when device with SLI-3 interface spec is enabled with
12882 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12883 * ring event in the HBA. However, when the device is enabled with either
12884 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12885 * device-level interrupt handler. When the PCI slot is in error recovery
12886 * or the HBA is undergoing initialization, the interrupt handler will not
12887 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12888 * the intrrupt context. This function is called without any lock held.
12889 * It gets the hbalock to access and update SLI data structures.
12890 *
12891 * This function returns IRQ_HANDLED when interrupt is handled else it
12892 * returns IRQ_NONE.
12893 **/
12894 irqreturn_t
lpfc_sli_fp_intr_handler(int irq,void * dev_id)12895 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12896 {
12897 struct lpfc_hba *phba;
12898 uint32_t ha_copy;
12899 unsigned long status;
12900 unsigned long iflag;
12901 struct lpfc_sli_ring *pring;
12902
12903 /* Get the driver's phba structure from the dev_id and
12904 * assume the HBA is not interrupting.
12905 */
12906 phba = (struct lpfc_hba *) dev_id;
12907
12908 if (unlikely(!phba))
12909 return IRQ_NONE;
12910
12911 /*
12912 * Stuff needs to be attented to when this function is invoked as an
12913 * individual interrupt handler in MSI-X multi-message interrupt mode
12914 */
12915 if (phba->intr_type == MSIX) {
12916 /* Check device state for handling interrupt */
12917 if (lpfc_intr_state_check(phba))
12918 return IRQ_NONE;
12919 /* Need to read HA REG for FCP ring and other ring events */
12920 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12921 return IRQ_HANDLED;
12922 /* Clear up only attention source related to fast-path */
12923 spin_lock_irqsave(&phba->hbalock, iflag);
12924 /*
12925 * If there is deferred error attention, do not check for
12926 * any interrupt.
12927 */
12928 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12929 spin_unlock_irqrestore(&phba->hbalock, iflag);
12930 return IRQ_NONE;
12931 }
12932 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12933 phba->HAregaddr);
12934 readl(phba->HAregaddr); /* flush */
12935 spin_unlock_irqrestore(&phba->hbalock, iflag);
12936 } else
12937 ha_copy = phba->ha_copy;
12938
12939 /*
12940 * Process all events on FCP ring. Take the optimized path for FCP IO.
12941 */
12942 ha_copy &= ~(phba->work_ha_mask);
12943
12944 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12945 status >>= (4*LPFC_FCP_RING);
12946 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12947 if (status & HA_RXMASK)
12948 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12949
12950 if (phba->cfg_multi_ring_support == 2) {
12951 /*
12952 * Process all events on extra ring. Take the optimized path
12953 * for extra ring IO.
12954 */
12955 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12956 status >>= (4*LPFC_EXTRA_RING);
12957 if (status & HA_RXMASK) {
12958 lpfc_sli_handle_fast_ring_event(phba,
12959 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12960 status);
12961 }
12962 }
12963 return IRQ_HANDLED;
12964 } /* lpfc_sli_fp_intr_handler */
12965
12966 /**
12967 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
12968 * @irq: Interrupt number.
12969 * @dev_id: The device context pointer.
12970 *
12971 * This function is the HBA device-level interrupt handler to device with
12972 * SLI-3 interface spec, called from the PCI layer when either MSI or
12973 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12974 * requires driver attention. This function invokes the slow-path interrupt
12975 * attention handling function and fast-path interrupt attention handling
12976 * function in turn to process the relevant HBA attention events. This
12977 * function is called without any lock held. It gets the hbalock to access
12978 * and update SLI data structures.
12979 *
12980 * This function returns IRQ_HANDLED when interrupt is handled, else it
12981 * returns IRQ_NONE.
12982 **/
12983 irqreturn_t
lpfc_sli_intr_handler(int irq,void * dev_id)12984 lpfc_sli_intr_handler(int irq, void *dev_id)
12985 {
12986 struct lpfc_hba *phba;
12987 irqreturn_t sp_irq_rc, fp_irq_rc;
12988 unsigned long status1, status2;
12989 uint32_t hc_copy;
12990
12991 /*
12992 * Get the driver's phba structure from the dev_id and
12993 * assume the HBA is not interrupting.
12994 */
12995 phba = (struct lpfc_hba *) dev_id;
12996
12997 if (unlikely(!phba))
12998 return IRQ_NONE;
12999
13000 /* Check device state for handling interrupt */
13001 if (lpfc_intr_state_check(phba))
13002 return IRQ_NONE;
13003
13004 spin_lock(&phba->hbalock);
13005 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
13006 spin_unlock(&phba->hbalock);
13007 return IRQ_HANDLED;
13008 }
13009
13010 if (unlikely(!phba->ha_copy)) {
13011 spin_unlock(&phba->hbalock);
13012 return IRQ_NONE;
13013 } else if (phba->ha_copy & HA_ERATT) {
13014 if (phba->hba_flag & HBA_ERATT_HANDLED)
13015 /* ERATT polling has handled ERATT */
13016 phba->ha_copy &= ~HA_ERATT;
13017 else
13018 /* Indicate interrupt handler handles ERATT */
13019 phba->hba_flag |= HBA_ERATT_HANDLED;
13020 }
13021
13022 /*
13023 * If there is deferred error attention, do not check for any interrupt.
13024 */
13025 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13026 spin_unlock(&phba->hbalock);
13027 return IRQ_NONE;
13028 }
13029
13030 /* Clear attention sources except link and error attentions */
13031 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
13032 spin_unlock(&phba->hbalock);
13033 return IRQ_HANDLED;
13034 }
13035 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
13036 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
13037 phba->HCregaddr);
13038 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
13039 writel(hc_copy, phba->HCregaddr);
13040 readl(phba->HAregaddr); /* flush */
13041 spin_unlock(&phba->hbalock);
13042
13043 /*
13044 * Invokes slow-path host attention interrupt handling as appropriate.
13045 */
13046
13047 /* status of events with mailbox and link attention */
13048 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
13049
13050 /* status of events with ELS ring */
13051 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
13052 status2 >>= (4*LPFC_ELS_RING);
13053
13054 if (status1 || (status2 & HA_RXMASK))
13055 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
13056 else
13057 sp_irq_rc = IRQ_NONE;
13058
13059 /*
13060 * Invoke fast-path host attention interrupt handling as appropriate.
13061 */
13062
13063 /* status of events with FCP ring */
13064 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13065 status1 >>= (4*LPFC_FCP_RING);
13066
13067 /* status of events with extra ring */
13068 if (phba->cfg_multi_ring_support == 2) {
13069 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13070 status2 >>= (4*LPFC_EXTRA_RING);
13071 } else
13072 status2 = 0;
13073
13074 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
13075 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
13076 else
13077 fp_irq_rc = IRQ_NONE;
13078
13079 /* Return device-level interrupt handling status */
13080 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
13081 } /* lpfc_sli_intr_handler */
13082
13083 /**
13084 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
13085 * @phba: pointer to lpfc hba data structure.
13086 *
13087 * This routine is invoked by the worker thread to process all the pending
13088 * SLI4 els abort xri events.
13089 **/
lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba * phba)13090 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
13091 {
13092 struct lpfc_cq_event *cq_event;
13093 unsigned long iflags;
13094
13095 /* First, declare the els xri abort event has been handled */
13096 spin_lock_irqsave(&phba->hbalock, iflags);
13097 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
13098 spin_unlock_irqrestore(&phba->hbalock, iflags);
13099
13100 /* Now, handle all the els xri abort events */
13101 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13102 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
13103 /* Get the first event from the head of the event queue */
13104 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
13105 cq_event, struct lpfc_cq_event, list);
13106 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13107 iflags);
13108 /* Notify aborted XRI for ELS work queue */
13109 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
13110
13111 /* Free the event processed back to the free pool */
13112 lpfc_sli4_cq_event_release(phba, cq_event);
13113 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13114 iflags);
13115 }
13116 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13117 }
13118
13119 /**
13120 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
13121 * @phba: pointer to lpfc hba data structure
13122 * @pIocbIn: pointer to the rspiocbq
13123 * @pIocbOut: pointer to the cmdiocbq
13124 * @wcqe: pointer to the complete wcqe
13125 *
13126 * This routine transfers the fields of a command iocbq to a response iocbq
13127 * by copying all the IOCB fields from command iocbq and transferring the
13128 * completion status information from the complete wcqe.
13129 **/
13130 static void
lpfc_sli4_iocb_param_transfer(struct lpfc_hba * phba,struct lpfc_iocbq * pIocbIn,struct lpfc_iocbq * pIocbOut,struct lpfc_wcqe_complete * wcqe)13131 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
13132 struct lpfc_iocbq *pIocbIn,
13133 struct lpfc_iocbq *pIocbOut,
13134 struct lpfc_wcqe_complete *wcqe)
13135 {
13136 int numBdes, i;
13137 unsigned long iflags;
13138 uint32_t status, max_response;
13139 struct lpfc_dmabuf *dmabuf;
13140 struct ulp_bde64 *bpl, bde;
13141 size_t offset = offsetof(struct lpfc_iocbq, iocb);
13142
13143 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
13144 sizeof(struct lpfc_iocbq) - offset);
13145 /* Map WCQE parameters into irspiocb parameters */
13146 status = bf_get(lpfc_wcqe_c_status, wcqe);
13147 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
13148 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
13149 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
13150 pIocbIn->iocb.un.fcpi.fcpi_parm =
13151 pIocbOut->iocb.un.fcpi.fcpi_parm -
13152 wcqe->total_data_placed;
13153 else
13154 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13155 else {
13156 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13157 switch (pIocbOut->iocb.ulpCommand) {
13158 case CMD_ELS_REQUEST64_CR:
13159 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13160 bpl = (struct ulp_bde64 *)dmabuf->virt;
13161 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
13162 max_response = bde.tus.f.bdeSize;
13163 break;
13164 case CMD_GEN_REQUEST64_CR:
13165 max_response = 0;
13166 if (!pIocbOut->context3)
13167 break;
13168 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
13169 sizeof(struct ulp_bde64);
13170 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13171 bpl = (struct ulp_bde64 *)dmabuf->virt;
13172 for (i = 0; i < numBdes; i++) {
13173 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
13174 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
13175 max_response += bde.tus.f.bdeSize;
13176 }
13177 break;
13178 default:
13179 max_response = wcqe->total_data_placed;
13180 break;
13181 }
13182 if (max_response < wcqe->total_data_placed)
13183 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
13184 else
13185 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
13186 wcqe->total_data_placed;
13187 }
13188
13189 /* Convert BG errors for completion status */
13190 if (status == CQE_STATUS_DI_ERROR) {
13191 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13192
13193 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13194 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13195 else
13196 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13197
13198 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13199 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
13200 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13201 BGS_GUARD_ERR_MASK;
13202 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
13203 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13204 BGS_APPTAG_ERR_MASK;
13205 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
13206 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13207 BGS_REFTAG_ERR_MASK;
13208
13209 /* Check to see if there was any good data before the error */
13210 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13211 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13212 BGS_HI_WATER_MARK_PRESENT_MASK;
13213 pIocbIn->iocb.unsli3.sli3_bg.bghm =
13214 wcqe->total_data_placed;
13215 }
13216
13217 /*
13218 * Set ALL the error bits to indicate we don't know what
13219 * type of error it is.
13220 */
13221 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13222 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13223 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13224 BGS_GUARD_ERR_MASK);
13225 }
13226
13227 /* Pick up HBA exchange busy condition */
13228 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13229 spin_lock_irqsave(&phba->hbalock, iflags);
13230 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13231 spin_unlock_irqrestore(&phba->hbalock, iflags);
13232 }
13233 }
13234
13235 /**
13236 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13237 * @phba: Pointer to HBA context object.
13238 * @irspiocbq: Pointer to work-queue completion queue entry.
13239 *
13240 * This routine handles an ELS work-queue completion event and construct
13241 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13242 * discovery engine to handle.
13243 *
13244 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13245 **/
13246 static struct lpfc_iocbq *
lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba * phba,struct lpfc_iocbq * irspiocbq)13247 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13248 struct lpfc_iocbq *irspiocbq)
13249 {
13250 struct lpfc_sli_ring *pring;
13251 struct lpfc_iocbq *cmdiocbq;
13252 struct lpfc_wcqe_complete *wcqe;
13253 unsigned long iflags;
13254
13255 pring = lpfc_phba_elsring(phba);
13256 if (unlikely(!pring))
13257 return NULL;
13258
13259 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13260 pring->stats.iocb_event++;
13261 /* Look up the ELS command IOCB and create pseudo response IOCB */
13262 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13263 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13264 if (unlikely(!cmdiocbq)) {
13265 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13266 "0386 ELS complete with no corresponding "
13267 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13268 wcqe->word0, wcqe->total_data_placed,
13269 wcqe->parameter, wcqe->word3);
13270 lpfc_sli_release_iocbq(phba, irspiocbq);
13271 return NULL;
13272 }
13273
13274 spin_lock_irqsave(&pring->ring_lock, iflags);
13275 /* Put the iocb back on the txcmplq */
13276 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13277 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13278
13279 /* Fake the irspiocbq and copy necessary response information */
13280 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13281
13282 return irspiocbq;
13283 }
13284
13285 inline struct lpfc_cq_event *
lpfc_cq_event_setup(struct lpfc_hba * phba,void * entry,int size)13286 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13287 {
13288 struct lpfc_cq_event *cq_event;
13289
13290 /* Allocate a new internal CQ_EVENT entry */
13291 cq_event = lpfc_sli4_cq_event_alloc(phba);
13292 if (!cq_event) {
13293 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13294 "0602 Failed to alloc CQ_EVENT entry\n");
13295 return NULL;
13296 }
13297
13298 /* Move the CQE into the event */
13299 memcpy(&cq_event->cqe, entry, size);
13300 return cq_event;
13301 }
13302
13303 /**
13304 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
13305 * @phba: Pointer to HBA context object.
13306 * @mcqe: Pointer to mailbox completion queue entry.
13307 *
13308 * This routine process a mailbox completion queue entry with asynchronous
13309 * event.
13310 *
13311 * Return: true if work posted to worker thread, otherwise false.
13312 **/
13313 static bool
lpfc_sli4_sp_handle_async_event(struct lpfc_hba * phba,struct lpfc_mcqe * mcqe)13314 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13315 {
13316 struct lpfc_cq_event *cq_event;
13317 unsigned long iflags;
13318
13319 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13320 "0392 Async Event: word0:x%x, word1:x%x, "
13321 "word2:x%x, word3:x%x\n", mcqe->word0,
13322 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13323
13324 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13325 if (!cq_event)
13326 return false;
13327
13328 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
13329 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13330 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
13331
13332 /* Set the async event flag */
13333 spin_lock_irqsave(&phba->hbalock, iflags);
13334 phba->hba_flag |= ASYNC_EVENT;
13335 spin_unlock_irqrestore(&phba->hbalock, iflags);
13336
13337 return true;
13338 }
13339
13340 /**
13341 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13342 * @phba: Pointer to HBA context object.
13343 * @mcqe: Pointer to mailbox completion queue entry.
13344 *
13345 * This routine process a mailbox completion queue entry with mailbox
13346 * completion event.
13347 *
13348 * Return: true if work posted to worker thread, otherwise false.
13349 **/
13350 static bool
lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba * phba,struct lpfc_mcqe * mcqe)13351 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13352 {
13353 uint32_t mcqe_status;
13354 MAILBOX_t *mbox, *pmbox;
13355 struct lpfc_mqe *mqe;
13356 struct lpfc_vport *vport;
13357 struct lpfc_nodelist *ndlp;
13358 struct lpfc_dmabuf *mp;
13359 unsigned long iflags;
13360 LPFC_MBOXQ_t *pmb;
13361 bool workposted = false;
13362 int rc;
13363
13364 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13365 if (!bf_get(lpfc_trailer_completed, mcqe))
13366 goto out_no_mqe_complete;
13367
13368 /* Get the reference to the active mbox command */
13369 spin_lock_irqsave(&phba->hbalock, iflags);
13370 pmb = phba->sli.mbox_active;
13371 if (unlikely(!pmb)) {
13372 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13373 "1832 No pending MBOX command to handle\n");
13374 spin_unlock_irqrestore(&phba->hbalock, iflags);
13375 goto out_no_mqe_complete;
13376 }
13377 spin_unlock_irqrestore(&phba->hbalock, iflags);
13378 mqe = &pmb->u.mqe;
13379 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13380 mbox = phba->mbox;
13381 vport = pmb->vport;
13382
13383 /* Reset heartbeat timer */
13384 phba->last_completion_time = jiffies;
13385 del_timer(&phba->sli.mbox_tmo);
13386
13387 /* Move mbox data to caller's mailbox region, do endian swapping */
13388 if (pmb->mbox_cmpl && mbox)
13389 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13390
13391 /*
13392 * For mcqe errors, conditionally move a modified error code to
13393 * the mbox so that the error will not be missed.
13394 */
13395 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13396 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13397 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13398 bf_set(lpfc_mqe_status, mqe,
13399 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13400 }
13401 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13402 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13403 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13404 "MBOX dflt rpi: status:x%x rpi:x%x",
13405 mcqe_status,
13406 pmbox->un.varWords[0], 0);
13407 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13408 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13409 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13410 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13411 * RID of the PPI using the same mbox buffer.
13412 */
13413 lpfc_unreg_login(phba, vport->vpi,
13414 pmbox->un.varWords[0], pmb);
13415 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13416 pmb->ctx_buf = mp;
13417 pmb->ctx_ndlp = ndlp;
13418 pmb->vport = vport;
13419 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13420 if (rc != MBX_BUSY)
13421 lpfc_printf_log(phba, KERN_ERR,
13422 LOG_TRACE_EVENT,
13423 "0385 rc should "
13424 "have been MBX_BUSY\n");
13425 if (rc != MBX_NOT_FINISHED)
13426 goto send_current_mbox;
13427 }
13428 }
13429 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13430 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13431 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13432
13433 /* There is mailbox completion work to do */
13434 spin_lock_irqsave(&phba->hbalock, iflags);
13435 __lpfc_mbox_cmpl_put(phba, pmb);
13436 phba->work_ha |= HA_MBATT;
13437 spin_unlock_irqrestore(&phba->hbalock, iflags);
13438 workposted = true;
13439
13440 send_current_mbox:
13441 spin_lock_irqsave(&phba->hbalock, iflags);
13442 /* Release the mailbox command posting token */
13443 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13444 /* Setting active mailbox pointer need to be in sync to flag clear */
13445 phba->sli.mbox_active = NULL;
13446 if (bf_get(lpfc_trailer_consumed, mcqe))
13447 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13448 spin_unlock_irqrestore(&phba->hbalock, iflags);
13449 /* Wake up worker thread to post the next pending mailbox command */
13450 lpfc_worker_wake_up(phba);
13451 return workposted;
13452
13453 out_no_mqe_complete:
13454 spin_lock_irqsave(&phba->hbalock, iflags);
13455 if (bf_get(lpfc_trailer_consumed, mcqe))
13456 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13457 spin_unlock_irqrestore(&phba->hbalock, iflags);
13458 return false;
13459 }
13460
13461 /**
13462 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13463 * @phba: Pointer to HBA context object.
13464 * @cq: Pointer to associated CQ
13465 * @cqe: Pointer to mailbox completion queue entry.
13466 *
13467 * This routine process a mailbox completion queue entry, it invokes the
13468 * proper mailbox complete handling or asynchronous event handling routine
13469 * according to the MCQE's async bit.
13470 *
13471 * Return: true if work posted to worker thread, otherwise false.
13472 **/
13473 static bool
lpfc_sli4_sp_handle_mcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)13474 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13475 struct lpfc_cqe *cqe)
13476 {
13477 struct lpfc_mcqe mcqe;
13478 bool workposted;
13479
13480 cq->CQ_mbox++;
13481
13482 /* Copy the mailbox MCQE and convert endian order as needed */
13483 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13484
13485 /* Invoke the proper event handling routine */
13486 if (!bf_get(lpfc_trailer_async, &mcqe))
13487 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13488 else
13489 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13490 return workposted;
13491 }
13492
13493 /**
13494 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13495 * @phba: Pointer to HBA context object.
13496 * @cq: Pointer to associated CQ
13497 * @wcqe: Pointer to work-queue completion queue entry.
13498 *
13499 * This routine handles an ELS work-queue completion event.
13500 *
13501 * Return: true if work posted to worker thread, otherwise false.
13502 **/
13503 static bool
lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_wcqe_complete * wcqe)13504 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13505 struct lpfc_wcqe_complete *wcqe)
13506 {
13507 struct lpfc_iocbq *irspiocbq;
13508 unsigned long iflags;
13509 struct lpfc_sli_ring *pring = cq->pring;
13510 int txq_cnt = 0;
13511 int txcmplq_cnt = 0;
13512
13513 /* Check for response status */
13514 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13515 /* Log the error status */
13516 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13517 "0357 ELS CQE error: status=x%x: "
13518 "CQE: %08x %08x %08x %08x\n",
13519 bf_get(lpfc_wcqe_c_status, wcqe),
13520 wcqe->word0, wcqe->total_data_placed,
13521 wcqe->parameter, wcqe->word3);
13522 }
13523
13524 /* Get an irspiocbq for later ELS response processing use */
13525 irspiocbq = lpfc_sli_get_iocbq(phba);
13526 if (!irspiocbq) {
13527 if (!list_empty(&pring->txq))
13528 txq_cnt++;
13529 if (!list_empty(&pring->txcmplq))
13530 txcmplq_cnt++;
13531 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13532 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13533 "els_txcmplq_cnt=%d\n",
13534 txq_cnt, phba->iocb_cnt,
13535 txcmplq_cnt);
13536 return false;
13537 }
13538
13539 /* Save off the slow-path queue event for work thread to process */
13540 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13541 spin_lock_irqsave(&phba->hbalock, iflags);
13542 list_add_tail(&irspiocbq->cq_event.list,
13543 &phba->sli4_hba.sp_queue_event);
13544 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13545 spin_unlock_irqrestore(&phba->hbalock, iflags);
13546
13547 return true;
13548 }
13549
13550 /**
13551 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13552 * @phba: Pointer to HBA context object.
13553 * @wcqe: Pointer to work-queue completion queue entry.
13554 *
13555 * This routine handles slow-path WQ entry consumed event by invoking the
13556 * proper WQ release routine to the slow-path WQ.
13557 **/
13558 static void
lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba * phba,struct lpfc_wcqe_release * wcqe)13559 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13560 struct lpfc_wcqe_release *wcqe)
13561 {
13562 /* sanity check on queue memory */
13563 if (unlikely(!phba->sli4_hba.els_wq))
13564 return;
13565 /* Check for the slow-path ELS work queue */
13566 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13567 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13568 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13569 else
13570 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13571 "2579 Slow-path wqe consume event carries "
13572 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13573 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13574 phba->sli4_hba.els_wq->queue_id);
13575 }
13576
13577 /**
13578 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13579 * @phba: Pointer to HBA context object.
13580 * @cq: Pointer to a WQ completion queue.
13581 * @wcqe: Pointer to work-queue completion queue entry.
13582 *
13583 * This routine handles an XRI abort event.
13584 *
13585 * Return: true if work posted to worker thread, otherwise false.
13586 **/
13587 static bool
lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct sli4_wcqe_xri_aborted * wcqe)13588 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13589 struct lpfc_queue *cq,
13590 struct sli4_wcqe_xri_aborted *wcqe)
13591 {
13592 bool workposted = false;
13593 struct lpfc_cq_event *cq_event;
13594 unsigned long iflags;
13595
13596 switch (cq->subtype) {
13597 case LPFC_IO:
13598 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13599 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13600 /* Notify aborted XRI for NVME work queue */
13601 if (phba->nvmet_support)
13602 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13603 }
13604 workposted = false;
13605 break;
13606 case LPFC_NVME_LS: /* NVME LS uses ELS resources */
13607 case LPFC_ELS:
13608 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
13609 if (!cq_event) {
13610 workposted = false;
13611 break;
13612 }
13613 cq_event->hdwq = cq->hdwq;
13614 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13615 iflags);
13616 list_add_tail(&cq_event->list,
13617 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13618 /* Set the els xri abort event flag */
13619 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13620 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13621 iflags);
13622 workposted = true;
13623 break;
13624 default:
13625 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13626 "0603 Invalid CQ subtype %d: "
13627 "%08x %08x %08x %08x\n",
13628 cq->subtype, wcqe->word0, wcqe->parameter,
13629 wcqe->word2, wcqe->word3);
13630 workposted = false;
13631 break;
13632 }
13633 return workposted;
13634 }
13635
13636 #define FC_RCTL_MDS_DIAGS 0xF4
13637
13638 /**
13639 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13640 * @phba: Pointer to HBA context object.
13641 * @rcqe: Pointer to receive-queue completion queue entry.
13642 *
13643 * This routine process a receive-queue completion queue entry.
13644 *
13645 * Return: true if work posted to worker thread, otherwise false.
13646 **/
13647 static bool
lpfc_sli4_sp_handle_rcqe(struct lpfc_hba * phba,struct lpfc_rcqe * rcqe)13648 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13649 {
13650 bool workposted = false;
13651 struct fc_frame_header *fc_hdr;
13652 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13653 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13654 struct lpfc_nvmet_tgtport *tgtp;
13655 struct hbq_dmabuf *dma_buf;
13656 uint32_t status, rq_id;
13657 unsigned long iflags;
13658
13659 /* sanity check on queue memory */
13660 if (unlikely(!hrq) || unlikely(!drq))
13661 return workposted;
13662
13663 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13664 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13665 else
13666 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13667 if (rq_id != hrq->queue_id)
13668 goto out;
13669
13670 status = bf_get(lpfc_rcqe_status, rcqe);
13671 switch (status) {
13672 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13673 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13674 "2537 Receive Frame Truncated!!\n");
13675 fallthrough;
13676 case FC_STATUS_RQ_SUCCESS:
13677 spin_lock_irqsave(&phba->hbalock, iflags);
13678 lpfc_sli4_rq_release(hrq, drq);
13679 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13680 if (!dma_buf) {
13681 hrq->RQ_no_buf_found++;
13682 spin_unlock_irqrestore(&phba->hbalock, iflags);
13683 goto out;
13684 }
13685 hrq->RQ_rcv_buf++;
13686 hrq->RQ_buf_posted--;
13687 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13688
13689 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13690
13691 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13692 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13693 spin_unlock_irqrestore(&phba->hbalock, iflags);
13694 /* Handle MDS Loopback frames */
13695 if (!(phba->pport->load_flag & FC_UNLOADING))
13696 lpfc_sli4_handle_mds_loopback(phba->pport,
13697 dma_buf);
13698 else
13699 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13700 break;
13701 }
13702
13703 /* save off the frame for the work thread to process */
13704 list_add_tail(&dma_buf->cq_event.list,
13705 &phba->sli4_hba.sp_queue_event);
13706 /* Frame received */
13707 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13708 spin_unlock_irqrestore(&phba->hbalock, iflags);
13709 workposted = true;
13710 break;
13711 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13712 if (phba->nvmet_support) {
13713 tgtp = phba->targetport->private;
13714 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13715 "6402 RQE Error x%x, posted %d err_cnt "
13716 "%d: %x %x %x\n",
13717 status, hrq->RQ_buf_posted,
13718 hrq->RQ_no_posted_buf,
13719 atomic_read(&tgtp->rcv_fcp_cmd_in),
13720 atomic_read(&tgtp->rcv_fcp_cmd_out),
13721 atomic_read(&tgtp->xmt_fcp_release));
13722 }
13723 fallthrough;
13724
13725 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13726 hrq->RQ_no_posted_buf++;
13727 /* Post more buffers if possible */
13728 spin_lock_irqsave(&phba->hbalock, iflags);
13729 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13730 spin_unlock_irqrestore(&phba->hbalock, iflags);
13731 workposted = true;
13732 break;
13733 }
13734 out:
13735 return workposted;
13736 }
13737
13738 /**
13739 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13740 * @phba: Pointer to HBA context object.
13741 * @cq: Pointer to the completion queue.
13742 * @cqe: Pointer to a completion queue entry.
13743 *
13744 * This routine process a slow-path work-queue or receive queue completion queue
13745 * entry.
13746 *
13747 * Return: true if work posted to worker thread, otherwise false.
13748 **/
13749 static bool
lpfc_sli4_sp_handle_cqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)13750 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13751 struct lpfc_cqe *cqe)
13752 {
13753 struct lpfc_cqe cqevt;
13754 bool workposted = false;
13755
13756 /* Copy the work queue CQE and convert endian order if needed */
13757 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13758
13759 /* Check and process for different type of WCQE and dispatch */
13760 switch (bf_get(lpfc_cqe_code, &cqevt)) {
13761 case CQE_CODE_COMPL_WQE:
13762 /* Process the WQ/RQ complete event */
13763 phba->last_completion_time = jiffies;
13764 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13765 (struct lpfc_wcqe_complete *)&cqevt);
13766 break;
13767 case CQE_CODE_RELEASE_WQE:
13768 /* Process the WQ release event */
13769 lpfc_sli4_sp_handle_rel_wcqe(phba,
13770 (struct lpfc_wcqe_release *)&cqevt);
13771 break;
13772 case CQE_CODE_XRI_ABORTED:
13773 /* Process the WQ XRI abort event */
13774 phba->last_completion_time = jiffies;
13775 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13776 (struct sli4_wcqe_xri_aborted *)&cqevt);
13777 break;
13778 case CQE_CODE_RECEIVE:
13779 case CQE_CODE_RECEIVE_V1:
13780 /* Process the RQ event */
13781 phba->last_completion_time = jiffies;
13782 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13783 (struct lpfc_rcqe *)&cqevt);
13784 break;
13785 default:
13786 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13787 "0388 Not a valid WCQE code: x%x\n",
13788 bf_get(lpfc_cqe_code, &cqevt));
13789 break;
13790 }
13791 return workposted;
13792 }
13793
13794 /**
13795 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13796 * @phba: Pointer to HBA context object.
13797 * @eqe: Pointer to fast-path event queue entry.
13798 * @speq: Pointer to slow-path event queue.
13799 *
13800 * This routine process a event queue entry from the slow-path event queue.
13801 * It will check the MajorCode and MinorCode to determine this is for a
13802 * completion event on a completion queue, if not, an error shall be logged
13803 * and just return. Otherwise, it will get to the corresponding completion
13804 * queue and process all the entries on that completion queue, rearm the
13805 * completion queue, and then return.
13806 *
13807 **/
13808 static void
lpfc_sli4_sp_handle_eqe(struct lpfc_hba * phba,struct lpfc_eqe * eqe,struct lpfc_queue * speq)13809 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13810 struct lpfc_queue *speq)
13811 {
13812 struct lpfc_queue *cq = NULL, *childq;
13813 uint16_t cqid;
13814 int ret = 0;
13815
13816 /* Get the reference to the corresponding CQ */
13817 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13818
13819 list_for_each_entry(childq, &speq->child_list, list) {
13820 if (childq->queue_id == cqid) {
13821 cq = childq;
13822 break;
13823 }
13824 }
13825 if (unlikely(!cq)) {
13826 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13827 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13828 "0365 Slow-path CQ identifier "
13829 "(%d) does not exist\n", cqid);
13830 return;
13831 }
13832
13833 /* Save EQ associated with this CQ */
13834 cq->assoc_qp = speq;
13835
13836 if (is_kdump_kernel())
13837 ret = queue_work(phba->wq, &cq->spwork);
13838 else
13839 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
13840
13841 if (!ret)
13842 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13843 "0390 Cannot schedule queue work "
13844 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13845 cqid, cq->queue_id, raw_smp_processor_id());
13846 }
13847
13848 /**
13849 * __lpfc_sli4_process_cq - Process elements of a CQ
13850 * @phba: Pointer to HBA context object.
13851 * @cq: Pointer to CQ to be processed
13852 * @handler: Routine to process each cqe
13853 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
13854 * @poll_mode: Polling mode we were called from
13855 *
13856 * This routine processes completion queue entries in a CQ. While a valid
13857 * queue element is found, the handler is called. During processing checks
13858 * are made for periodic doorbell writes to let the hardware know of
13859 * element consumption.
13860 *
13861 * If the max limit on cqes to process is hit, or there are no more valid
13862 * entries, the loop stops. If we processed a sufficient number of elements,
13863 * meaning there is sufficient load, rather than rearming and generating
13864 * another interrupt, a cq rescheduling delay will be set. A delay of 0
13865 * indicates no rescheduling.
13866 *
13867 * Returns True if work scheduled, False otherwise.
13868 **/
13869 static bool
__lpfc_sli4_process_cq(struct lpfc_hba * phba,struct lpfc_queue * cq,bool (* handler)(struct lpfc_hba *,struct lpfc_queue *,struct lpfc_cqe *),unsigned long * delay,enum lpfc_poll_mode poll_mode)13870 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13871 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13872 struct lpfc_cqe *), unsigned long *delay,
13873 enum lpfc_poll_mode poll_mode)
13874 {
13875 struct lpfc_cqe *cqe;
13876 bool workposted = false;
13877 int count = 0, consumed = 0;
13878 bool arm = true;
13879
13880 /* default - no reschedule */
13881 *delay = 0;
13882
13883 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13884 goto rearm_and_exit;
13885
13886 /* Process all the entries to the CQ */
13887 cq->q_flag = 0;
13888 cqe = lpfc_sli4_cq_get(cq);
13889 while (cqe) {
13890 workposted |= handler(phba, cq, cqe);
13891 __lpfc_sli4_consume_cqe(phba, cq, cqe);
13892
13893 consumed++;
13894 if (!(++count % cq->max_proc_limit))
13895 break;
13896
13897 if (!(count % cq->notify_interval)) {
13898 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13899 LPFC_QUEUE_NOARM);
13900 consumed = 0;
13901 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
13902 }
13903
13904 if (count == LPFC_NVMET_CQ_NOTIFY)
13905 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
13906
13907 cqe = lpfc_sli4_cq_get(cq);
13908 }
13909 if (count >= phba->cfg_cq_poll_threshold) {
13910 *delay = 1;
13911 arm = false;
13912 }
13913
13914 /* Note: complete the irq_poll softirq before rearming CQ */
13915 if (poll_mode == LPFC_IRQ_POLL)
13916 irq_poll_complete(&cq->iop);
13917
13918 /* Track the max number of CQEs processed in 1 EQ */
13919 if (count > cq->CQ_max_cqe)
13920 cq->CQ_max_cqe = count;
13921
13922 cq->assoc_qp->EQ_cqe_cnt += count;
13923
13924 /* Catch the no cq entry condition */
13925 if (unlikely(count == 0))
13926 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13927 "0369 No entry from completion queue "
13928 "qid=%d\n", cq->queue_id);
13929
13930 xchg(&cq->queue_claimed, 0);
13931
13932 rearm_and_exit:
13933 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13934 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13935
13936 return workposted;
13937 }
13938
13939 /**
13940 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13941 * @cq: pointer to CQ to process
13942 *
13943 * This routine calls the cq processing routine with a handler specific
13944 * to the type of queue bound to it.
13945 *
13946 * The CQ routine returns two values: the first is the calling status,
13947 * which indicates whether work was queued to the background discovery
13948 * thread. If true, the routine should wakeup the discovery thread;
13949 * the second is the delay parameter. If non-zero, rather than rearming
13950 * the CQ and yet another interrupt, the CQ handler should be queued so
13951 * that it is processed in a subsequent polling action. The value of
13952 * the delay indicates when to reschedule it.
13953 **/
13954 static void
__lpfc_sli4_sp_process_cq(struct lpfc_queue * cq)13955 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13956 {
13957 struct lpfc_hba *phba = cq->phba;
13958 unsigned long delay;
13959 bool workposted = false;
13960 int ret = 0;
13961
13962 /* Process and rearm the CQ */
13963 switch (cq->type) {
13964 case LPFC_MCQ:
13965 workposted |= __lpfc_sli4_process_cq(phba, cq,
13966 lpfc_sli4_sp_handle_mcqe,
13967 &delay, LPFC_QUEUE_WORK);
13968 break;
13969 case LPFC_WCQ:
13970 if (cq->subtype == LPFC_IO)
13971 workposted |= __lpfc_sli4_process_cq(phba, cq,
13972 lpfc_sli4_fp_handle_cqe,
13973 &delay, LPFC_QUEUE_WORK);
13974 else
13975 workposted |= __lpfc_sli4_process_cq(phba, cq,
13976 lpfc_sli4_sp_handle_cqe,
13977 &delay, LPFC_QUEUE_WORK);
13978 break;
13979 default:
13980 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13981 "0370 Invalid completion queue type (%d)\n",
13982 cq->type);
13983 return;
13984 }
13985
13986 if (delay) {
13987 if (is_kdump_kernel())
13988 ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
13989 delay);
13990 else
13991 ret = queue_delayed_work_on(cq->chann, phba->wq,
13992 &cq->sched_spwork, delay);
13993 if (!ret)
13994 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13995 "0394 Cannot schedule queue work "
13996 "for cqid=%d on CPU %d\n",
13997 cq->queue_id, cq->chann);
13998 }
13999
14000 /* wake up worker thread if there are works to be done */
14001 if (workposted)
14002 lpfc_worker_wake_up(phba);
14003 }
14004
14005 /**
14006 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14007 * interrupt
14008 * @work: pointer to work element
14009 *
14010 * translates from the work handler and calls the slow-path handler.
14011 **/
14012 static void
lpfc_sli4_sp_process_cq(struct work_struct * work)14013 lpfc_sli4_sp_process_cq(struct work_struct *work)
14014 {
14015 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14016
14017 __lpfc_sli4_sp_process_cq(cq);
14018 }
14019
14020 /**
14021 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
14022 * @work: pointer to work element
14023 *
14024 * translates from the work handler and calls the slow-path handler.
14025 **/
14026 static void
lpfc_sli4_dly_sp_process_cq(struct work_struct * work)14027 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
14028 {
14029 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14030 struct lpfc_queue, sched_spwork);
14031
14032 __lpfc_sli4_sp_process_cq(cq);
14033 }
14034
14035 /**
14036 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
14037 * @phba: Pointer to HBA context object.
14038 * @cq: Pointer to associated CQ
14039 * @wcqe: Pointer to work-queue completion queue entry.
14040 *
14041 * This routine process a fast-path work queue completion entry from fast-path
14042 * event queue for FCP command response completion.
14043 **/
14044 static void
lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_wcqe_complete * wcqe)14045 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14046 struct lpfc_wcqe_complete *wcqe)
14047 {
14048 struct lpfc_sli_ring *pring = cq->pring;
14049 struct lpfc_iocbq *cmdiocbq;
14050 struct lpfc_iocbq irspiocbq;
14051 unsigned long iflags;
14052
14053 /* Check for response status */
14054 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14055 /* If resource errors reported from HBA, reduce queue
14056 * depth of the SCSI device.
14057 */
14058 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
14059 IOSTAT_LOCAL_REJECT)) &&
14060 ((wcqe->parameter & IOERR_PARAM_MASK) ==
14061 IOERR_NO_RESOURCES))
14062 phba->lpfc_rampdown_queue_depth(phba);
14063
14064 /* Log the cmpl status */
14065 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14066 "0373 FCP CQE cmpl: status=x%x: "
14067 "CQE: %08x %08x %08x %08x\n",
14068 bf_get(lpfc_wcqe_c_status, wcqe),
14069 wcqe->word0, wcqe->total_data_placed,
14070 wcqe->parameter, wcqe->word3);
14071 }
14072
14073 /* Look up the FCP command IOCB and create pseudo response IOCB */
14074 spin_lock_irqsave(&pring->ring_lock, iflags);
14075 pring->stats.iocb_event++;
14076 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14077 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14078 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14079 if (unlikely(!cmdiocbq)) {
14080 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14081 "0374 FCP complete with no corresponding "
14082 "cmdiocb: iotag (%d)\n",
14083 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14084 return;
14085 }
14086 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
14087 cmdiocbq->isr_timestamp = cq->isr_timestamp;
14088 #endif
14089 if (cmdiocbq->iocb_cmpl == NULL) {
14090 if (cmdiocbq->wqe_cmpl) {
14091 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
14092 spin_lock_irqsave(&phba->hbalock, iflags);
14093 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14094 spin_unlock_irqrestore(&phba->hbalock, iflags);
14095 }
14096
14097 /* Pass the cmd_iocb and the wcqe to the upper layer */
14098 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
14099 return;
14100 }
14101 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14102 "0375 FCP cmdiocb not callback function "
14103 "iotag: (%d)\n",
14104 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14105 return;
14106 }
14107
14108 /* Fake the irspiocb and copy necessary response information */
14109 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
14110
14111 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
14112 spin_lock_irqsave(&phba->hbalock, iflags);
14113 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14114 spin_unlock_irqrestore(&phba->hbalock, iflags);
14115 }
14116
14117 /* Pass the cmd_iocb and the rsp state to the upper layer */
14118 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
14119 }
14120
14121 /**
14122 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
14123 * @phba: Pointer to HBA context object.
14124 * @cq: Pointer to completion queue.
14125 * @wcqe: Pointer to work-queue completion queue entry.
14126 *
14127 * This routine handles an fast-path WQ entry consumed event by invoking the
14128 * proper WQ release routine to the slow-path WQ.
14129 **/
14130 static void
lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_wcqe_release * wcqe)14131 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14132 struct lpfc_wcqe_release *wcqe)
14133 {
14134 struct lpfc_queue *childwq;
14135 bool wqid_matched = false;
14136 uint16_t hba_wqid;
14137
14138 /* Check for fast-path FCP work queue release */
14139 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
14140 list_for_each_entry(childwq, &cq->child_list, list) {
14141 if (childwq->queue_id == hba_wqid) {
14142 lpfc_sli4_wq_release(childwq,
14143 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14144 if (childwq->q_flag & HBA_NVMET_WQFULL)
14145 lpfc_nvmet_wqfull_process(phba, childwq);
14146 wqid_matched = true;
14147 break;
14148 }
14149 }
14150 /* Report warning log message if no match found */
14151 if (wqid_matched != true)
14152 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14153 "2580 Fast-path wqe consume event carries "
14154 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
14155 }
14156
14157 /**
14158 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
14159 * @phba: Pointer to HBA context object.
14160 * @cq: Pointer to completion queue.
14161 * @rcqe: Pointer to receive-queue completion queue entry.
14162 *
14163 * This routine process a receive-queue completion queue entry.
14164 *
14165 * Return: true if work posted to worker thread, otherwise false.
14166 **/
14167 static bool
lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_rcqe * rcqe)14168 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14169 struct lpfc_rcqe *rcqe)
14170 {
14171 bool workposted = false;
14172 struct lpfc_queue *hrq;
14173 struct lpfc_queue *drq;
14174 struct rqb_dmabuf *dma_buf;
14175 struct fc_frame_header *fc_hdr;
14176 struct lpfc_nvmet_tgtport *tgtp;
14177 uint32_t status, rq_id;
14178 unsigned long iflags;
14179 uint32_t fctl, idx;
14180
14181 if ((phba->nvmet_support == 0) ||
14182 (phba->sli4_hba.nvmet_cqset == NULL))
14183 return workposted;
14184
14185 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
14186 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
14187 drq = phba->sli4_hba.nvmet_mrq_data[idx];
14188
14189 /* sanity check on queue memory */
14190 if (unlikely(!hrq) || unlikely(!drq))
14191 return workposted;
14192
14193 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14194 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14195 else
14196 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14197
14198 if ((phba->nvmet_support == 0) ||
14199 (rq_id != hrq->queue_id))
14200 return workposted;
14201
14202 status = bf_get(lpfc_rcqe_status, rcqe);
14203 switch (status) {
14204 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14205 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14206 "6126 Receive Frame Truncated!!\n");
14207 fallthrough;
14208 case FC_STATUS_RQ_SUCCESS:
14209 spin_lock_irqsave(&phba->hbalock, iflags);
14210 lpfc_sli4_rq_release(hrq, drq);
14211 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
14212 if (!dma_buf) {
14213 hrq->RQ_no_buf_found++;
14214 spin_unlock_irqrestore(&phba->hbalock, iflags);
14215 goto out;
14216 }
14217 spin_unlock_irqrestore(&phba->hbalock, iflags);
14218 hrq->RQ_rcv_buf++;
14219 hrq->RQ_buf_posted--;
14220 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14221
14222 /* Just some basic sanity checks on FCP Command frame */
14223 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
14224 fc_hdr->fh_f_ctl[1] << 8 |
14225 fc_hdr->fh_f_ctl[2]);
14226 if (((fctl &
14227 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14228 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14229 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
14230 goto drop;
14231
14232 if (fc_hdr->fh_type == FC_TYPE_FCP) {
14233 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
14234 lpfc_nvmet_unsol_fcp_event(
14235 phba, idx, dma_buf, cq->isr_timestamp,
14236 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
14237 return false;
14238 }
14239 drop:
14240 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
14241 break;
14242 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14243 if (phba->nvmet_support) {
14244 tgtp = phba->targetport->private;
14245 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14246 "6401 RQE Error x%x, posted %d err_cnt "
14247 "%d: %x %x %x\n",
14248 status, hrq->RQ_buf_posted,
14249 hrq->RQ_no_posted_buf,
14250 atomic_read(&tgtp->rcv_fcp_cmd_in),
14251 atomic_read(&tgtp->rcv_fcp_cmd_out),
14252 atomic_read(&tgtp->xmt_fcp_release));
14253 }
14254 fallthrough;
14255
14256 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14257 hrq->RQ_no_posted_buf++;
14258 /* Post more buffers if possible */
14259 break;
14260 }
14261 out:
14262 return workposted;
14263 }
14264
14265 /**
14266 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
14267 * @phba: adapter with cq
14268 * @cq: Pointer to the completion queue.
14269 * @cqe: Pointer to fast-path completion queue entry.
14270 *
14271 * This routine process a fast-path work queue completion entry from fast-path
14272 * event queue for FCP command response completion.
14273 *
14274 * Return: true if work posted to worker thread, otherwise false.
14275 **/
14276 static bool
lpfc_sli4_fp_handle_cqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)14277 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14278 struct lpfc_cqe *cqe)
14279 {
14280 struct lpfc_wcqe_release wcqe;
14281 bool workposted = false;
14282
14283 /* Copy the work queue CQE and convert endian order if needed */
14284 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14285
14286 /* Check and process for different type of WCQE and dispatch */
14287 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14288 case CQE_CODE_COMPL_WQE:
14289 case CQE_CODE_NVME_ERSP:
14290 cq->CQ_wq++;
14291 /* Process the WQ complete event */
14292 phba->last_completion_time = jiffies;
14293 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
14294 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14295 (struct lpfc_wcqe_complete *)&wcqe);
14296 break;
14297 case CQE_CODE_RELEASE_WQE:
14298 cq->CQ_release_wqe++;
14299 /* Process the WQ release event */
14300 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14301 (struct lpfc_wcqe_release *)&wcqe);
14302 break;
14303 case CQE_CODE_XRI_ABORTED:
14304 cq->CQ_xri_aborted++;
14305 /* Process the WQ XRI abort event */
14306 phba->last_completion_time = jiffies;
14307 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14308 (struct sli4_wcqe_xri_aborted *)&wcqe);
14309 break;
14310 case CQE_CODE_RECEIVE_V1:
14311 case CQE_CODE_RECEIVE:
14312 phba->last_completion_time = jiffies;
14313 if (cq->subtype == LPFC_NVMET) {
14314 workposted = lpfc_sli4_nvmet_handle_rcqe(
14315 phba, cq, (struct lpfc_rcqe *)&wcqe);
14316 }
14317 break;
14318 default:
14319 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14320 "0144 Not a valid CQE code: x%x\n",
14321 bf_get(lpfc_wcqe_c_code, &wcqe));
14322 break;
14323 }
14324 return workposted;
14325 }
14326
14327 /**
14328 * lpfc_sli4_sched_cq_work - Schedules cq work
14329 * @phba: Pointer to HBA context object.
14330 * @cq: Pointer to CQ
14331 * @cqid: CQ ID
14332 *
14333 * This routine checks the poll mode of the CQ corresponding to
14334 * cq->chann, then either schedules a softirq or queue_work to complete
14335 * cq work.
14336 *
14337 * queue_work path is taken if in NVMET mode, or if poll_mode is in
14338 * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken.
14339 *
14340 **/
lpfc_sli4_sched_cq_work(struct lpfc_hba * phba,struct lpfc_queue * cq,uint16_t cqid)14341 static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
14342 struct lpfc_queue *cq, uint16_t cqid)
14343 {
14344 int ret = 0;
14345
14346 switch (cq->poll_mode) {
14347 case LPFC_IRQ_POLL:
14348 irq_poll_sched(&cq->iop);
14349 break;
14350 case LPFC_QUEUE_WORK:
14351 default:
14352 if (is_kdump_kernel())
14353 ret = queue_work(phba->wq, &cq->irqwork);
14354 else
14355 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
14356 if (!ret)
14357 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14358 "0383 Cannot schedule queue work "
14359 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14360 cqid, cq->queue_id,
14361 raw_smp_processor_id());
14362 }
14363 }
14364
14365 /**
14366 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
14367 * @phba: Pointer to HBA context object.
14368 * @eq: Pointer to the queue structure.
14369 * @eqe: Pointer to fast-path event queue entry.
14370 *
14371 * This routine process a event queue entry from the fast-path event queue.
14372 * It will check the MajorCode and MinorCode to determine this is for a
14373 * completion event on a completion queue, if not, an error shall be logged
14374 * and just return. Otherwise, it will get to the corresponding completion
14375 * queue and process all the entries on the completion queue, rearm the
14376 * completion queue, and then return.
14377 **/
14378 static void
lpfc_sli4_hba_handle_eqe(struct lpfc_hba * phba,struct lpfc_queue * eq,struct lpfc_eqe * eqe)14379 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14380 struct lpfc_eqe *eqe)
14381 {
14382 struct lpfc_queue *cq = NULL;
14383 uint32_t qidx = eq->hdwq;
14384 uint16_t cqid, id;
14385
14386 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14387 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14388 "0366 Not a valid completion "
14389 "event: majorcode=x%x, minorcode=x%x\n",
14390 bf_get_le32(lpfc_eqe_major_code, eqe),
14391 bf_get_le32(lpfc_eqe_minor_code, eqe));
14392 return;
14393 }
14394
14395 /* Get the reference to the corresponding CQ */
14396 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14397
14398 /* Use the fast lookup method first */
14399 if (cqid <= phba->sli4_hba.cq_max) {
14400 cq = phba->sli4_hba.cq_lookup[cqid];
14401 if (cq)
14402 goto work_cq;
14403 }
14404
14405 /* Next check for NVMET completion */
14406 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14407 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14408 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14409 /* Process NVMET unsol rcv */
14410 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14411 goto process_cq;
14412 }
14413 }
14414
14415 if (phba->sli4_hba.nvmels_cq &&
14416 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14417 /* Process NVME unsol rcv */
14418 cq = phba->sli4_hba.nvmels_cq;
14419 }
14420
14421 /* Otherwise this is a Slow path event */
14422 if (cq == NULL) {
14423 lpfc_sli4_sp_handle_eqe(phba, eqe,
14424 phba->sli4_hba.hdwq[qidx].hba_eq);
14425 return;
14426 }
14427
14428 process_cq:
14429 if (unlikely(cqid != cq->queue_id)) {
14430 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14431 "0368 Miss-matched fast-path completion "
14432 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14433 cqid, cq->queue_id);
14434 return;
14435 }
14436
14437 work_cq:
14438 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14439 if (phba->ktime_on)
14440 cq->isr_timestamp = ktime_get_ns();
14441 else
14442 cq->isr_timestamp = 0;
14443 #endif
14444 lpfc_sli4_sched_cq_work(phba, cq, cqid);
14445 }
14446
14447 /**
14448 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14449 * @cq: Pointer to CQ to be processed
14450 * @poll_mode: Enum lpfc_poll_state to determine poll mode
14451 *
14452 * This routine calls the cq processing routine with the handler for
14453 * fast path CQEs.
14454 *
14455 * The CQ routine returns two values: the first is the calling status,
14456 * which indicates whether work was queued to the background discovery
14457 * thread. If true, the routine should wakeup the discovery thread;
14458 * the second is the delay parameter. If non-zero, rather than rearming
14459 * the CQ and yet another interrupt, the CQ handler should be queued so
14460 * that it is processed in a subsequent polling action. The value of
14461 * the delay indicates when to reschedule it.
14462 **/
14463 static void
__lpfc_sli4_hba_process_cq(struct lpfc_queue * cq,enum lpfc_poll_mode poll_mode)14464 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
14465 enum lpfc_poll_mode poll_mode)
14466 {
14467 struct lpfc_hba *phba = cq->phba;
14468 unsigned long delay;
14469 bool workposted = false;
14470 int ret = 0;
14471
14472 /* process and rearm the CQ */
14473 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14474 &delay, poll_mode);
14475
14476 if (delay) {
14477 if (is_kdump_kernel())
14478 ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
14479 delay);
14480 else
14481 ret = queue_delayed_work_on(cq->chann, phba->wq,
14482 &cq->sched_irqwork, delay);
14483 if (!ret)
14484 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14485 "0367 Cannot schedule queue work "
14486 "for cqid=%d on CPU %d\n",
14487 cq->queue_id, cq->chann);
14488 }
14489
14490 /* wake up worker thread if there are works to be done */
14491 if (workposted)
14492 lpfc_worker_wake_up(phba);
14493 }
14494
14495 /**
14496 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14497 * interrupt
14498 * @work: pointer to work element
14499 *
14500 * translates from the work handler and calls the fast-path handler.
14501 **/
14502 static void
lpfc_sli4_hba_process_cq(struct work_struct * work)14503 lpfc_sli4_hba_process_cq(struct work_struct *work)
14504 {
14505 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14506
14507 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
14508 }
14509
14510 /**
14511 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14512 * @work: pointer to work element
14513 *
14514 * translates from the work handler and calls the fast-path handler.
14515 **/
14516 static void
lpfc_sli4_dly_hba_process_cq(struct work_struct * work)14517 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14518 {
14519 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14520 struct lpfc_queue, sched_irqwork);
14521
14522 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
14523 }
14524
14525 /**
14526 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
14527 * @irq: Interrupt number.
14528 * @dev_id: The device context pointer.
14529 *
14530 * This function is directly called from the PCI layer as an interrupt
14531 * service routine when device with SLI-4 interface spec is enabled with
14532 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14533 * ring event in the HBA. However, when the device is enabled with either
14534 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14535 * device-level interrupt handler. When the PCI slot is in error recovery
14536 * or the HBA is undergoing initialization, the interrupt handler will not
14537 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14538 * the intrrupt context. This function is called without any lock held.
14539 * It gets the hbalock to access and update SLI data structures. Note that,
14540 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14541 * equal to that of FCP CQ index.
14542 *
14543 * The link attention and ELS ring attention events are handled
14544 * by the worker thread. The interrupt handler signals the worker thread
14545 * and returns for these events. This function is called without any lock
14546 * held. It gets the hbalock to access and update SLI data structures.
14547 *
14548 * This function returns IRQ_HANDLED when interrupt is handled else it
14549 * returns IRQ_NONE.
14550 **/
14551 irqreturn_t
lpfc_sli4_hba_intr_handler(int irq,void * dev_id)14552 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14553 {
14554 struct lpfc_hba *phba;
14555 struct lpfc_hba_eq_hdl *hba_eq_hdl;
14556 struct lpfc_queue *fpeq;
14557 unsigned long iflag;
14558 int ecount = 0;
14559 int hba_eqidx;
14560 struct lpfc_eq_intr_info *eqi;
14561
14562 /* Get the driver's phba structure from the dev_id */
14563 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14564 phba = hba_eq_hdl->phba;
14565 hba_eqidx = hba_eq_hdl->idx;
14566
14567 if (unlikely(!phba))
14568 return IRQ_NONE;
14569 if (unlikely(!phba->sli4_hba.hdwq))
14570 return IRQ_NONE;
14571
14572 /* Get to the EQ struct associated with this vector */
14573 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
14574 if (unlikely(!fpeq))
14575 return IRQ_NONE;
14576
14577 /* Check device state for handling interrupt */
14578 if (unlikely(lpfc_intr_state_check(phba))) {
14579 /* Check again for link_state with lock held */
14580 spin_lock_irqsave(&phba->hbalock, iflag);
14581 if (phba->link_state < LPFC_LINK_DOWN)
14582 /* Flush, clear interrupt, and rearm the EQ */
14583 lpfc_sli4_eqcq_flush(phba, fpeq);
14584 spin_unlock_irqrestore(&phba->hbalock, iflag);
14585 return IRQ_NONE;
14586 }
14587
14588 eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
14589 eqi->icnt++;
14590
14591 fpeq->last_cpu = raw_smp_processor_id();
14592
14593 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
14594 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
14595 phba->cfg_auto_imax &&
14596 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14597 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14598 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14599
14600 /* process and rearm the EQ */
14601 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
14602
14603 if (unlikely(ecount == 0)) {
14604 fpeq->EQ_no_entry++;
14605 if (phba->intr_type == MSIX)
14606 /* MSI-X treated interrupt served as no EQ share INT */
14607 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14608 "0358 MSI-X interrupt with no EQE\n");
14609 else
14610 /* Non MSI-X treated on interrupt as EQ share INT */
14611 return IRQ_NONE;
14612 }
14613
14614 return IRQ_HANDLED;
14615 } /* lpfc_sli4_fp_intr_handler */
14616
14617 /**
14618 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14619 * @irq: Interrupt number.
14620 * @dev_id: The device context pointer.
14621 *
14622 * This function is the device-level interrupt handler to device with SLI-4
14623 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14624 * interrupt mode is enabled and there is an event in the HBA which requires
14625 * driver attention. This function invokes the slow-path interrupt attention
14626 * handling function and fast-path interrupt attention handling function in
14627 * turn to process the relevant HBA attention events. This function is called
14628 * without any lock held. It gets the hbalock to access and update SLI data
14629 * structures.
14630 *
14631 * This function returns IRQ_HANDLED when interrupt is handled, else it
14632 * returns IRQ_NONE.
14633 **/
14634 irqreturn_t
lpfc_sli4_intr_handler(int irq,void * dev_id)14635 lpfc_sli4_intr_handler(int irq, void *dev_id)
14636 {
14637 struct lpfc_hba *phba;
14638 irqreturn_t hba_irq_rc;
14639 bool hba_handled = false;
14640 int qidx;
14641
14642 /* Get the driver's phba structure from the dev_id */
14643 phba = (struct lpfc_hba *)dev_id;
14644
14645 if (unlikely(!phba))
14646 return IRQ_NONE;
14647
14648 /*
14649 * Invoke fast-path host attention interrupt handling as appropriate.
14650 */
14651 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14652 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14653 &phba->sli4_hba.hba_eq_hdl[qidx]);
14654 if (hba_irq_rc == IRQ_HANDLED)
14655 hba_handled |= true;
14656 }
14657
14658 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14659 } /* lpfc_sli4_intr_handler */
14660
lpfc_sli4_poll_hbtimer(struct timer_list * t)14661 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
14662 {
14663 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
14664 struct lpfc_queue *eq;
14665 int i = 0;
14666
14667 rcu_read_lock();
14668
14669 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
14670 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
14671 if (!list_empty(&phba->poll_list))
14672 mod_timer(&phba->cpuhp_poll_timer,
14673 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14674
14675 rcu_read_unlock();
14676 }
14677
lpfc_sli4_poll_eq(struct lpfc_queue * eq,uint8_t path)14678 inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
14679 {
14680 struct lpfc_hba *phba = eq->phba;
14681 int i = 0;
14682
14683 /*
14684 * Unlocking an irq is one of the entry point to check
14685 * for re-schedule, but we are good for io submission
14686 * path as midlayer does a get_cpu to glue us in. Flush
14687 * out the invalidate queue so we can see the updated
14688 * value for flag.
14689 */
14690 smp_rmb();
14691
14692 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
14693 /* We will not likely get the completion for the caller
14694 * during this iteration but i guess that's fine.
14695 * Future io's coming on this eq should be able to
14696 * pick it up. As for the case of single io's, they
14697 * will be handled through a sched from polling timer
14698 * function which is currently triggered every 1msec.
14699 */
14700 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
14701
14702 return i;
14703 }
14704
lpfc_sli4_add_to_poll_list(struct lpfc_queue * eq)14705 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
14706 {
14707 struct lpfc_hba *phba = eq->phba;
14708
14709 /* kickstart slowpath processing if needed */
14710 if (list_empty(&phba->poll_list))
14711 mod_timer(&phba->cpuhp_poll_timer,
14712 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14713
14714 list_add_rcu(&eq->_poll_list, &phba->poll_list);
14715 synchronize_rcu();
14716 }
14717
lpfc_sli4_remove_from_poll_list(struct lpfc_queue * eq)14718 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
14719 {
14720 struct lpfc_hba *phba = eq->phba;
14721
14722 /* Disable slowpath processing for this eq. Kick start the eq
14723 * by RE-ARMING the eq's ASAP
14724 */
14725 list_del_rcu(&eq->_poll_list);
14726 synchronize_rcu();
14727
14728 if (list_empty(&phba->poll_list))
14729 del_timer_sync(&phba->cpuhp_poll_timer);
14730 }
14731
lpfc_sli4_cleanup_poll_list(struct lpfc_hba * phba)14732 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
14733 {
14734 struct lpfc_queue *eq, *next;
14735
14736 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
14737 list_del(&eq->_poll_list);
14738
14739 INIT_LIST_HEAD(&phba->poll_list);
14740 synchronize_rcu();
14741 }
14742
14743 static inline void
__lpfc_sli4_switch_eqmode(struct lpfc_queue * eq,uint8_t mode)14744 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
14745 {
14746 if (mode == eq->mode)
14747 return;
14748 /*
14749 * currently this function is only called during a hotplug
14750 * event and the cpu on which this function is executing
14751 * is going offline. By now the hotplug has instructed
14752 * the scheduler to remove this cpu from cpu active mask.
14753 * So we don't need to work about being put aside by the
14754 * scheduler for a high priority process. Yes, the inte-
14755 * rrupts could come but they are known to retire ASAP.
14756 */
14757
14758 /* Disable polling in the fastpath */
14759 WRITE_ONCE(eq->mode, mode);
14760 /* flush out the store buffer */
14761 smp_wmb();
14762
14763 /*
14764 * Add this eq to the polling list and start polling. For
14765 * a grace period both interrupt handler and poller will
14766 * try to process the eq _but_ that's fine. We have a
14767 * synchronization mechanism in place (queue_claimed) to
14768 * deal with it. This is just a draining phase for int-
14769 * errupt handler (not eq's) as we have guranteed through
14770 * barrier that all the CPUs have seen the new CQ_POLLED
14771 * state. which will effectively disable the REARMING of
14772 * the EQ. The whole idea is eq's die off eventually as
14773 * we are not rearming EQ's anymore.
14774 */
14775 mode ? lpfc_sli4_add_to_poll_list(eq) :
14776 lpfc_sli4_remove_from_poll_list(eq);
14777 }
14778
lpfc_sli4_start_polling(struct lpfc_queue * eq)14779 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
14780 {
14781 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
14782 }
14783
lpfc_sli4_stop_polling(struct lpfc_queue * eq)14784 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
14785 {
14786 struct lpfc_hba *phba = eq->phba;
14787
14788 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
14789
14790 /* Kick start for the pending io's in h/w.
14791 * Once we switch back to interrupt processing on a eq
14792 * the io path completion will only arm eq's when it
14793 * receives a completion. But since eq's are in disa-
14794 * rmed state it doesn't receive a completion. This
14795 * creates a deadlock scenaro.
14796 */
14797 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
14798 }
14799
14800 /**
14801 * lpfc_sli4_queue_free - free a queue structure and associated memory
14802 * @queue: The queue structure to free.
14803 *
14804 * This function frees a queue structure and the DMAable memory used for
14805 * the host resident queue. This function must be called after destroying the
14806 * queue on the HBA.
14807 **/
14808 void
lpfc_sli4_queue_free(struct lpfc_queue * queue)14809 lpfc_sli4_queue_free(struct lpfc_queue *queue)
14810 {
14811 struct lpfc_dmabuf *dmabuf;
14812
14813 if (!queue)
14814 return;
14815
14816 if (!list_empty(&queue->wq_list))
14817 list_del(&queue->wq_list);
14818
14819 while (!list_empty(&queue->page_list)) {
14820 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14821 list);
14822 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14823 dmabuf->virt, dmabuf->phys);
14824 kfree(dmabuf);
14825 }
14826 if (queue->rqbp) {
14827 lpfc_free_rq_buffer(queue->phba, queue);
14828 kfree(queue->rqbp);
14829 }
14830
14831 if (!list_empty(&queue->cpu_list))
14832 list_del(&queue->cpu_list);
14833
14834 kfree(queue);
14835 return;
14836 }
14837
14838 /**
14839 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14840 * @phba: The HBA that this queue is being created on.
14841 * @page_size: The size of a queue page
14842 * @entry_size: The size of each queue entry for this queue.
14843 * @entry_count: The number of entries that this queue will handle.
14844 * @cpu: The cpu that will primarily utilize this queue.
14845 *
14846 * This function allocates a queue structure and the DMAable memory used for
14847 * the host resident queue. This function must be called before creating the
14848 * queue on the HBA.
14849 **/
14850 struct lpfc_queue *
lpfc_sli4_queue_alloc(struct lpfc_hba * phba,uint32_t page_size,uint32_t entry_size,uint32_t entry_count,int cpu)14851 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14852 uint32_t entry_size, uint32_t entry_count, int cpu)
14853 {
14854 struct lpfc_queue *queue;
14855 struct lpfc_dmabuf *dmabuf;
14856 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14857 uint16_t x, pgcnt;
14858
14859 if (!phba->sli4_hba.pc_sli4_params.supported)
14860 hw_page_size = page_size;
14861
14862 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
14863
14864 /* If needed, Adjust page count to match the max the adapter supports */
14865 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14866 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14867
14868 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14869 GFP_KERNEL, cpu_to_node(cpu));
14870 if (!queue)
14871 return NULL;
14872
14873 INIT_LIST_HEAD(&queue->list);
14874 INIT_LIST_HEAD(&queue->_poll_list);
14875 INIT_LIST_HEAD(&queue->wq_list);
14876 INIT_LIST_HEAD(&queue->wqfull_list);
14877 INIT_LIST_HEAD(&queue->page_list);
14878 INIT_LIST_HEAD(&queue->child_list);
14879 INIT_LIST_HEAD(&queue->cpu_list);
14880
14881 /* Set queue parameters now. If the system cannot provide memory
14882 * resources, the free routine needs to know what was allocated.
14883 */
14884 queue->page_count = pgcnt;
14885 queue->q_pgs = (void **)&queue[1];
14886 queue->entry_cnt_per_pg = hw_page_size / entry_size;
14887 queue->entry_size = entry_size;
14888 queue->entry_count = entry_count;
14889 queue->page_size = hw_page_size;
14890 queue->phba = phba;
14891
14892 for (x = 0; x < queue->page_count; x++) {
14893 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14894 dev_to_node(&phba->pcidev->dev));
14895 if (!dmabuf)
14896 goto out_fail;
14897 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14898 hw_page_size, &dmabuf->phys,
14899 GFP_KERNEL);
14900 if (!dmabuf->virt) {
14901 kfree(dmabuf);
14902 goto out_fail;
14903 }
14904 dmabuf->buffer_tag = x;
14905 list_add_tail(&dmabuf->list, &queue->page_list);
14906 /* use lpfc_sli4_qe to index a paritcular entry in this page */
14907 queue->q_pgs[x] = dmabuf->virt;
14908 }
14909 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14910 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14911 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14912 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
14913
14914 /* notify_interval will be set during q creation */
14915
14916 return queue;
14917 out_fail:
14918 lpfc_sli4_queue_free(queue);
14919 return NULL;
14920 }
14921
14922 /**
14923 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14924 * @phba: HBA structure that indicates port to create a queue on.
14925 * @pci_barset: PCI BAR set flag.
14926 *
14927 * This function shall perform iomap of the specified PCI BAR address to host
14928 * memory address if not already done so and return it. The returned host
14929 * memory address can be NULL.
14930 */
14931 static void __iomem *
lpfc_dual_chute_pci_bar_map(struct lpfc_hba * phba,uint16_t pci_barset)14932 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14933 {
14934 if (!phba->pcidev)
14935 return NULL;
14936
14937 switch (pci_barset) {
14938 case WQ_PCI_BAR_0_AND_1:
14939 return phba->pci_bar0_memmap_p;
14940 case WQ_PCI_BAR_2_AND_3:
14941 return phba->pci_bar2_memmap_p;
14942 case WQ_PCI_BAR_4_AND_5:
14943 return phba->pci_bar4_memmap_p;
14944 default:
14945 break;
14946 }
14947 return NULL;
14948 }
14949
14950 /**
14951 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
14952 * @phba: HBA structure that EQs are on.
14953 * @startq: The starting EQ index to modify
14954 * @numq: The number of EQs (consecutive indexes) to modify
14955 * @usdelay: amount of delay
14956 *
14957 * This function revises the EQ delay on 1 or more EQs. The EQ delay
14958 * is set either by writing to a register (if supported by the SLI Port)
14959 * or by mailbox command. The mailbox command allows several EQs to be
14960 * updated at once.
14961 *
14962 * The @phba struct is used to send a mailbox command to HBA. The @startq
14963 * is used to get the starting EQ index to change. The @numq value is
14964 * used to specify how many consecutive EQ indexes, starting at EQ index,
14965 * are to be changed. This function is asynchronous and will wait for any
14966 * mailbox commands to finish before returning.
14967 *
14968 * On success this function will return a zero. If unable to allocate
14969 * enough memory this function will return -ENOMEM. If a mailbox command
14970 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
14971 * have had their delay multipler changed.
14972 **/
14973 void
lpfc_modify_hba_eq_delay(struct lpfc_hba * phba,uint32_t startq,uint32_t numq,uint32_t usdelay)14974 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14975 uint32_t numq, uint32_t usdelay)
14976 {
14977 struct lpfc_mbx_modify_eq_delay *eq_delay;
14978 LPFC_MBOXQ_t *mbox;
14979 struct lpfc_queue *eq;
14980 int cnt = 0, rc, length;
14981 uint32_t shdr_status, shdr_add_status;
14982 uint32_t dmult;
14983 int qidx;
14984 union lpfc_sli4_cfg_shdr *shdr;
14985
14986 if (startq >= phba->cfg_irq_chann)
14987 return;
14988
14989 if (usdelay > 0xFFFF) {
14990 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
14991 "6429 usdelay %d too large. Scaled down to "
14992 "0xFFFF.\n", usdelay);
14993 usdelay = 0xFFFF;
14994 }
14995
14996 /* set values by EQ_DELAY register if supported */
14997 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14998 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14999 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15000 if (!eq)
15001 continue;
15002
15003 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15004
15005 if (++cnt >= numq)
15006 break;
15007 }
15008 return;
15009 }
15010
15011 /* Otherwise, set values by mailbox cmd */
15012
15013 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15014 if (!mbox) {
15015 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15016 "6428 Failed allocating mailbox cmd buffer."
15017 " EQ delay was not set.\n");
15018 return;
15019 }
15020 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15021 sizeof(struct lpfc_sli4_cfg_mhdr));
15022 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15023 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
15024 length, LPFC_SLI4_MBX_EMBED);
15025 eq_delay = &mbox->u.mqe.un.eq_delay;
15026
15027 /* Calculate delay multiper from maximum interrupt per second */
15028 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15029 if (dmult)
15030 dmult--;
15031 if (dmult > LPFC_DMULT_MAX)
15032 dmult = LPFC_DMULT_MAX;
15033
15034 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15035 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15036 if (!eq)
15037 continue;
15038 eq->q_mode = usdelay;
15039 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
15040 eq_delay->u.request.eq[cnt].phase = 0;
15041 eq_delay->u.request.eq[cnt].delay_multi = dmult;
15042
15043 if (++cnt >= numq)
15044 break;
15045 }
15046 eq_delay->u.request.num_eq = cnt;
15047
15048 mbox->vport = phba->pport;
15049 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15050 mbox->ctx_buf = NULL;
15051 mbox->ctx_ndlp = NULL;
15052 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15053 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
15054 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15055 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15056 if (shdr_status || shdr_add_status || rc) {
15057 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15058 "2512 MODIFY_EQ_DELAY mailbox failed with "
15059 "status x%x add_status x%x, mbx status x%x\n",
15060 shdr_status, shdr_add_status, rc);
15061 }
15062 mempool_free(mbox, phba->mbox_mem_pool);
15063 return;
15064 }
15065
15066 /**
15067 * lpfc_eq_create - Create an Event Queue on the HBA
15068 * @phba: HBA structure that indicates port to create a queue on.
15069 * @eq: The queue structure to use to create the event queue.
15070 * @imax: The maximum interrupt per second limit.
15071 *
15072 * This function creates an event queue, as detailed in @eq, on a port,
15073 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
15074 *
15075 * The @phba struct is used to send mailbox command to HBA. The @eq struct
15076 * is used to get the entry count and entry size that are necessary to
15077 * determine the number of pages to allocate and use for this queue. This
15078 * function will send the EQ_CREATE mailbox command to the HBA to setup the
15079 * event queue. This function is asynchronous and will wait for the mailbox
15080 * command to finish before continuing.
15081 *
15082 * On success this function will return a zero. If unable to allocate enough
15083 * memory this function will return -ENOMEM. If the queue create mailbox command
15084 * fails this function will return -ENXIO.
15085 **/
15086 int
lpfc_eq_create(struct lpfc_hba * phba,struct lpfc_queue * eq,uint32_t imax)15087 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
15088 {
15089 struct lpfc_mbx_eq_create *eq_create;
15090 LPFC_MBOXQ_t *mbox;
15091 int rc, length, status = 0;
15092 struct lpfc_dmabuf *dmabuf;
15093 uint32_t shdr_status, shdr_add_status;
15094 union lpfc_sli4_cfg_shdr *shdr;
15095 uint16_t dmult;
15096 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15097
15098 /* sanity check on queue memory */
15099 if (!eq)
15100 return -ENODEV;
15101 if (!phba->sli4_hba.pc_sli4_params.supported)
15102 hw_page_size = SLI4_PAGE_SIZE;
15103
15104 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15105 if (!mbox)
15106 return -ENOMEM;
15107 length = (sizeof(struct lpfc_mbx_eq_create) -
15108 sizeof(struct lpfc_sli4_cfg_mhdr));
15109 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15110 LPFC_MBOX_OPCODE_EQ_CREATE,
15111 length, LPFC_SLI4_MBX_EMBED);
15112 eq_create = &mbox->u.mqe.un.eq_create;
15113 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
15114 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
15115 eq->page_count);
15116 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
15117 LPFC_EQE_SIZE);
15118 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
15119
15120 /* Use version 2 of CREATE_EQ if eqav is set */
15121 if (phba->sli4_hba.pc_sli4_params.eqav) {
15122 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15123 LPFC_Q_CREATE_VERSION_2);
15124 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
15125 phba->sli4_hba.pc_sli4_params.eqav);
15126 }
15127
15128 /* don't setup delay multiplier using EQ_CREATE */
15129 dmult = 0;
15130 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
15131 dmult);
15132 switch (eq->entry_count) {
15133 default:
15134 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15135 "0360 Unsupported EQ count. (%d)\n",
15136 eq->entry_count);
15137 if (eq->entry_count < 256) {
15138 status = -EINVAL;
15139 goto out;
15140 }
15141 fallthrough; /* otherwise default to smallest count */
15142 case 256:
15143 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15144 LPFC_EQ_CNT_256);
15145 break;
15146 case 512:
15147 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15148 LPFC_EQ_CNT_512);
15149 break;
15150 case 1024:
15151 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15152 LPFC_EQ_CNT_1024);
15153 break;
15154 case 2048:
15155 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15156 LPFC_EQ_CNT_2048);
15157 break;
15158 case 4096:
15159 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15160 LPFC_EQ_CNT_4096);
15161 break;
15162 }
15163 list_for_each_entry(dmabuf, &eq->page_list, list) {
15164 memset(dmabuf->virt, 0, hw_page_size);
15165 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15166 putPaddrLow(dmabuf->phys);
15167 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15168 putPaddrHigh(dmabuf->phys);
15169 }
15170 mbox->vport = phba->pport;
15171 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15172 mbox->ctx_buf = NULL;
15173 mbox->ctx_ndlp = NULL;
15174 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15175 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15176 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15177 if (shdr_status || shdr_add_status || rc) {
15178 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15179 "2500 EQ_CREATE mailbox failed with "
15180 "status x%x add_status x%x, mbx status x%x\n",
15181 shdr_status, shdr_add_status, rc);
15182 status = -ENXIO;
15183 }
15184 eq->type = LPFC_EQ;
15185 eq->subtype = LPFC_NONE;
15186 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
15187 if (eq->queue_id == 0xFFFF)
15188 status = -ENXIO;
15189 eq->host_index = 0;
15190 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
15191 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
15192 out:
15193 mempool_free(mbox, phba->mbox_mem_pool);
15194 return status;
15195 }
15196
lpfc_cq_poll_hdler(struct irq_poll * iop,int budget)15197 static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
15198 {
15199 struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
15200
15201 __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
15202
15203 return 1;
15204 }
15205
15206 /**
15207 * lpfc_cq_create - Create a Completion Queue on the HBA
15208 * @phba: HBA structure that indicates port to create a queue on.
15209 * @cq: The queue structure to use to create the completion queue.
15210 * @eq: The event queue to bind this completion queue to.
15211 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15212 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
15213 *
15214 * This function creates a completion queue, as detailed in @wq, on a port,
15215 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
15216 *
15217 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15218 * is used to get the entry count and entry size that are necessary to
15219 * determine the number of pages to allocate and use for this queue. The @eq
15220 * is used to indicate which event queue to bind this completion queue to. This
15221 * function will send the CQ_CREATE mailbox command to the HBA to setup the
15222 * completion queue. This function is asynchronous and will wait for the mailbox
15223 * command to finish before continuing.
15224 *
15225 * On success this function will return a zero. If unable to allocate enough
15226 * memory this function will return -ENOMEM. If the queue create mailbox command
15227 * fails this function will return -ENXIO.
15228 **/
15229 int
lpfc_cq_create(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_queue * eq,uint32_t type,uint32_t subtype)15230 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
15231 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
15232 {
15233 struct lpfc_mbx_cq_create *cq_create;
15234 struct lpfc_dmabuf *dmabuf;
15235 LPFC_MBOXQ_t *mbox;
15236 int rc, length, status = 0;
15237 uint32_t shdr_status, shdr_add_status;
15238 union lpfc_sli4_cfg_shdr *shdr;
15239
15240 /* sanity check on queue memory */
15241 if (!cq || !eq)
15242 return -ENODEV;
15243
15244 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15245 if (!mbox)
15246 return -ENOMEM;
15247 length = (sizeof(struct lpfc_mbx_cq_create) -
15248 sizeof(struct lpfc_sli4_cfg_mhdr));
15249 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15250 LPFC_MBOX_OPCODE_CQ_CREATE,
15251 length, LPFC_SLI4_MBX_EMBED);
15252 cq_create = &mbox->u.mqe.un.cq_create;
15253 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
15254 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
15255 cq->page_count);
15256 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
15257 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
15258 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15259 phba->sli4_hba.pc_sli4_params.cqv);
15260 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
15261 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
15262 (cq->page_size / SLI4_PAGE_SIZE));
15263 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
15264 eq->queue_id);
15265 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
15266 phba->sli4_hba.pc_sli4_params.cqav);
15267 } else {
15268 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
15269 eq->queue_id);
15270 }
15271 switch (cq->entry_count) {
15272 case 2048:
15273 case 4096:
15274 if (phba->sli4_hba.pc_sli4_params.cqv ==
15275 LPFC_Q_CREATE_VERSION_2) {
15276 cq_create->u.request.context.lpfc_cq_context_count =
15277 cq->entry_count;
15278 bf_set(lpfc_cq_context_count,
15279 &cq_create->u.request.context,
15280 LPFC_CQ_CNT_WORD7);
15281 break;
15282 }
15283 fallthrough;
15284 default:
15285 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15286 "0361 Unsupported CQ count: "
15287 "entry cnt %d sz %d pg cnt %d\n",
15288 cq->entry_count, cq->entry_size,
15289 cq->page_count);
15290 if (cq->entry_count < 256) {
15291 status = -EINVAL;
15292 goto out;
15293 }
15294 fallthrough; /* otherwise default to smallest count */
15295 case 256:
15296 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15297 LPFC_CQ_CNT_256);
15298 break;
15299 case 512:
15300 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15301 LPFC_CQ_CNT_512);
15302 break;
15303 case 1024:
15304 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15305 LPFC_CQ_CNT_1024);
15306 break;
15307 }
15308 list_for_each_entry(dmabuf, &cq->page_list, list) {
15309 memset(dmabuf->virt, 0, cq->page_size);
15310 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15311 putPaddrLow(dmabuf->phys);
15312 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15313 putPaddrHigh(dmabuf->phys);
15314 }
15315 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15316
15317 /* The IOCTL status is embedded in the mailbox subheader. */
15318 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15319 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15320 if (shdr_status || shdr_add_status || rc) {
15321 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15322 "2501 CQ_CREATE mailbox failed with "
15323 "status x%x add_status x%x, mbx status x%x\n",
15324 shdr_status, shdr_add_status, rc);
15325 status = -ENXIO;
15326 goto out;
15327 }
15328 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15329 if (cq->queue_id == 0xFFFF) {
15330 status = -ENXIO;
15331 goto out;
15332 }
15333 /* link the cq onto the parent eq child list */
15334 list_add_tail(&cq->list, &eq->child_list);
15335 /* Set up completion queue's type and subtype */
15336 cq->type = type;
15337 cq->subtype = subtype;
15338 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15339 cq->assoc_qid = eq->queue_id;
15340 cq->assoc_qp = eq;
15341 cq->host_index = 0;
15342 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15343 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
15344
15345 if (cq->queue_id > phba->sli4_hba.cq_max)
15346 phba->sli4_hba.cq_max = cq->queue_id;
15347
15348 irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
15349 out:
15350 mempool_free(mbox, phba->mbox_mem_pool);
15351 return status;
15352 }
15353
15354 /**
15355 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
15356 * @phba: HBA structure that indicates port to create a queue on.
15357 * @cqp: The queue structure array to use to create the completion queues.
15358 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
15359 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15360 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
15361 *
15362 * This function creates a set of completion queue, s to support MRQ
15363 * as detailed in @cqp, on a port,
15364 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
15365 *
15366 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15367 * is used to get the entry count and entry size that are necessary to
15368 * determine the number of pages to allocate and use for this queue. The @eq
15369 * is used to indicate which event queue to bind this completion queue to. This
15370 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
15371 * completion queue. This function is asynchronous and will wait for the mailbox
15372 * command to finish before continuing.
15373 *
15374 * On success this function will return a zero. If unable to allocate enough
15375 * memory this function will return -ENOMEM. If the queue create mailbox command
15376 * fails this function will return -ENXIO.
15377 **/
15378 int
lpfc_cq_create_set(struct lpfc_hba * phba,struct lpfc_queue ** cqp,struct lpfc_sli4_hdw_queue * hdwq,uint32_t type,uint32_t subtype)15379 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15380 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15381 uint32_t subtype)
15382 {
15383 struct lpfc_queue *cq;
15384 struct lpfc_queue *eq;
15385 struct lpfc_mbx_cq_create_set *cq_set;
15386 struct lpfc_dmabuf *dmabuf;
15387 LPFC_MBOXQ_t *mbox;
15388 int rc, length, alloclen, status = 0;
15389 int cnt, idx, numcq, page_idx = 0;
15390 uint32_t shdr_status, shdr_add_status;
15391 union lpfc_sli4_cfg_shdr *shdr;
15392 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15393
15394 /* sanity check on queue memory */
15395 numcq = phba->cfg_nvmet_mrq;
15396 if (!cqp || !hdwq || !numcq)
15397 return -ENODEV;
15398
15399 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15400 if (!mbox)
15401 return -ENOMEM;
15402
15403 length = sizeof(struct lpfc_mbx_cq_create_set);
15404 length += ((numcq * cqp[0]->page_count) *
15405 sizeof(struct dma_address));
15406 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15407 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15408 LPFC_SLI4_MBX_NEMBED);
15409 if (alloclen < length) {
15410 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15411 "3098 Allocated DMA memory size (%d) is "
15412 "less than the requested DMA memory size "
15413 "(%d)\n", alloclen, length);
15414 status = -ENOMEM;
15415 goto out;
15416 }
15417 cq_set = mbox->sge_array->addr[0];
15418 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15419 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15420
15421 for (idx = 0; idx < numcq; idx++) {
15422 cq = cqp[idx];
15423 eq = hdwq[idx].hba_eq;
15424 if (!cq || !eq) {
15425 status = -ENOMEM;
15426 goto out;
15427 }
15428 if (!phba->sli4_hba.pc_sli4_params.supported)
15429 hw_page_size = cq->page_size;
15430
15431 switch (idx) {
15432 case 0:
15433 bf_set(lpfc_mbx_cq_create_set_page_size,
15434 &cq_set->u.request,
15435 (hw_page_size / SLI4_PAGE_SIZE));
15436 bf_set(lpfc_mbx_cq_create_set_num_pages,
15437 &cq_set->u.request, cq->page_count);
15438 bf_set(lpfc_mbx_cq_create_set_evt,
15439 &cq_set->u.request, 1);
15440 bf_set(lpfc_mbx_cq_create_set_valid,
15441 &cq_set->u.request, 1);
15442 bf_set(lpfc_mbx_cq_create_set_cqe_size,
15443 &cq_set->u.request, 0);
15444 bf_set(lpfc_mbx_cq_create_set_num_cq,
15445 &cq_set->u.request, numcq);
15446 bf_set(lpfc_mbx_cq_create_set_autovalid,
15447 &cq_set->u.request,
15448 phba->sli4_hba.pc_sli4_params.cqav);
15449 switch (cq->entry_count) {
15450 case 2048:
15451 case 4096:
15452 if (phba->sli4_hba.pc_sli4_params.cqv ==
15453 LPFC_Q_CREATE_VERSION_2) {
15454 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15455 &cq_set->u.request,
15456 cq->entry_count);
15457 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15458 &cq_set->u.request,
15459 LPFC_CQ_CNT_WORD7);
15460 break;
15461 }
15462 fallthrough;
15463 default:
15464 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15465 "3118 Bad CQ count. (%d)\n",
15466 cq->entry_count);
15467 if (cq->entry_count < 256) {
15468 status = -EINVAL;
15469 goto out;
15470 }
15471 fallthrough; /* otherwise default to smallest */
15472 case 256:
15473 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15474 &cq_set->u.request, LPFC_CQ_CNT_256);
15475 break;
15476 case 512:
15477 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15478 &cq_set->u.request, LPFC_CQ_CNT_512);
15479 break;
15480 case 1024:
15481 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15482 &cq_set->u.request, LPFC_CQ_CNT_1024);
15483 break;
15484 }
15485 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15486 &cq_set->u.request, eq->queue_id);
15487 break;
15488 case 1:
15489 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15490 &cq_set->u.request, eq->queue_id);
15491 break;
15492 case 2:
15493 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15494 &cq_set->u.request, eq->queue_id);
15495 break;
15496 case 3:
15497 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15498 &cq_set->u.request, eq->queue_id);
15499 break;
15500 case 4:
15501 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15502 &cq_set->u.request, eq->queue_id);
15503 break;
15504 case 5:
15505 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15506 &cq_set->u.request, eq->queue_id);
15507 break;
15508 case 6:
15509 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15510 &cq_set->u.request, eq->queue_id);
15511 break;
15512 case 7:
15513 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15514 &cq_set->u.request, eq->queue_id);
15515 break;
15516 case 8:
15517 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15518 &cq_set->u.request, eq->queue_id);
15519 break;
15520 case 9:
15521 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15522 &cq_set->u.request, eq->queue_id);
15523 break;
15524 case 10:
15525 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15526 &cq_set->u.request, eq->queue_id);
15527 break;
15528 case 11:
15529 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15530 &cq_set->u.request, eq->queue_id);
15531 break;
15532 case 12:
15533 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15534 &cq_set->u.request, eq->queue_id);
15535 break;
15536 case 13:
15537 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15538 &cq_set->u.request, eq->queue_id);
15539 break;
15540 case 14:
15541 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15542 &cq_set->u.request, eq->queue_id);
15543 break;
15544 case 15:
15545 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15546 &cq_set->u.request, eq->queue_id);
15547 break;
15548 }
15549
15550 /* link the cq onto the parent eq child list */
15551 list_add_tail(&cq->list, &eq->child_list);
15552 /* Set up completion queue's type and subtype */
15553 cq->type = type;
15554 cq->subtype = subtype;
15555 cq->assoc_qid = eq->queue_id;
15556 cq->assoc_qp = eq;
15557 cq->host_index = 0;
15558 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15559 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15560 cq->entry_count);
15561 cq->chann = idx;
15562
15563 rc = 0;
15564 list_for_each_entry(dmabuf, &cq->page_list, list) {
15565 memset(dmabuf->virt, 0, hw_page_size);
15566 cnt = page_idx + dmabuf->buffer_tag;
15567 cq_set->u.request.page[cnt].addr_lo =
15568 putPaddrLow(dmabuf->phys);
15569 cq_set->u.request.page[cnt].addr_hi =
15570 putPaddrHigh(dmabuf->phys);
15571 rc++;
15572 }
15573 page_idx += rc;
15574 }
15575
15576 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15577
15578 /* The IOCTL status is embedded in the mailbox subheader. */
15579 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15580 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15581 if (shdr_status || shdr_add_status || rc) {
15582 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15583 "3119 CQ_CREATE_SET mailbox failed with "
15584 "status x%x add_status x%x, mbx status x%x\n",
15585 shdr_status, shdr_add_status, rc);
15586 status = -ENXIO;
15587 goto out;
15588 }
15589 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15590 if (rc == 0xFFFF) {
15591 status = -ENXIO;
15592 goto out;
15593 }
15594
15595 for (idx = 0; idx < numcq; idx++) {
15596 cq = cqp[idx];
15597 cq->queue_id = rc + idx;
15598 if (cq->queue_id > phba->sli4_hba.cq_max)
15599 phba->sli4_hba.cq_max = cq->queue_id;
15600 }
15601
15602 out:
15603 lpfc_sli4_mbox_cmd_free(phba, mbox);
15604 return status;
15605 }
15606
15607 /**
15608 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15609 * @phba: HBA structure that indicates port to create a queue on.
15610 * @mq: The queue structure to use to create the mailbox queue.
15611 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15612 * @cq: The completion queue to associate with this cq.
15613 *
15614 * This function provides failback (fb) functionality when the
15615 * mq_create_ext fails on older FW generations. It's purpose is identical
15616 * to mq_create_ext otherwise.
15617 *
15618 * This routine cannot fail as all attributes were previously accessed and
15619 * initialized in mq_create_ext.
15620 **/
15621 static void
lpfc_mq_create_fb_init(struct lpfc_hba * phba,struct lpfc_queue * mq,LPFC_MBOXQ_t * mbox,struct lpfc_queue * cq)15622 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15623 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15624 {
15625 struct lpfc_mbx_mq_create *mq_create;
15626 struct lpfc_dmabuf *dmabuf;
15627 int length;
15628
15629 length = (sizeof(struct lpfc_mbx_mq_create) -
15630 sizeof(struct lpfc_sli4_cfg_mhdr));
15631 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15632 LPFC_MBOX_OPCODE_MQ_CREATE,
15633 length, LPFC_SLI4_MBX_EMBED);
15634 mq_create = &mbox->u.mqe.un.mq_create;
15635 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15636 mq->page_count);
15637 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15638 cq->queue_id);
15639 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15640 switch (mq->entry_count) {
15641 case 16:
15642 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15643 LPFC_MQ_RING_SIZE_16);
15644 break;
15645 case 32:
15646 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15647 LPFC_MQ_RING_SIZE_32);
15648 break;
15649 case 64:
15650 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15651 LPFC_MQ_RING_SIZE_64);
15652 break;
15653 case 128:
15654 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15655 LPFC_MQ_RING_SIZE_128);
15656 break;
15657 }
15658 list_for_each_entry(dmabuf, &mq->page_list, list) {
15659 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15660 putPaddrLow(dmabuf->phys);
15661 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15662 putPaddrHigh(dmabuf->phys);
15663 }
15664 }
15665
15666 /**
15667 * lpfc_mq_create - Create a mailbox Queue on the HBA
15668 * @phba: HBA structure that indicates port to create a queue on.
15669 * @mq: The queue structure to use to create the mailbox queue.
15670 * @cq: The completion queue to associate with this cq.
15671 * @subtype: The queue's subtype.
15672 *
15673 * This function creates a mailbox queue, as detailed in @mq, on a port,
15674 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15675 *
15676 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15677 * is used to get the entry count and entry size that are necessary to
15678 * determine the number of pages to allocate and use for this queue. This
15679 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15680 * mailbox queue. This function is asynchronous and will wait for the mailbox
15681 * command to finish before continuing.
15682 *
15683 * On success this function will return a zero. If unable to allocate enough
15684 * memory this function will return -ENOMEM. If the queue create mailbox command
15685 * fails this function will return -ENXIO.
15686 **/
15687 int32_t
lpfc_mq_create(struct lpfc_hba * phba,struct lpfc_queue * mq,struct lpfc_queue * cq,uint32_t subtype)15688 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15689 struct lpfc_queue *cq, uint32_t subtype)
15690 {
15691 struct lpfc_mbx_mq_create *mq_create;
15692 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15693 struct lpfc_dmabuf *dmabuf;
15694 LPFC_MBOXQ_t *mbox;
15695 int rc, length, status = 0;
15696 uint32_t shdr_status, shdr_add_status;
15697 union lpfc_sli4_cfg_shdr *shdr;
15698 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15699
15700 /* sanity check on queue memory */
15701 if (!mq || !cq)
15702 return -ENODEV;
15703 if (!phba->sli4_hba.pc_sli4_params.supported)
15704 hw_page_size = SLI4_PAGE_SIZE;
15705
15706 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15707 if (!mbox)
15708 return -ENOMEM;
15709 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15710 sizeof(struct lpfc_sli4_cfg_mhdr));
15711 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15712 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15713 length, LPFC_SLI4_MBX_EMBED);
15714
15715 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15716 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15717 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15718 &mq_create_ext->u.request, mq->page_count);
15719 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15720 &mq_create_ext->u.request, 1);
15721 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
15722 &mq_create_ext->u.request, 1);
15723 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15724 &mq_create_ext->u.request, 1);
15725 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15726 &mq_create_ext->u.request, 1);
15727 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15728 &mq_create_ext->u.request, 1);
15729 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
15730 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15731 phba->sli4_hba.pc_sli4_params.mqv);
15732 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15733 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15734 cq->queue_id);
15735 else
15736 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15737 cq->queue_id);
15738 switch (mq->entry_count) {
15739 default:
15740 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15741 "0362 Unsupported MQ count. (%d)\n",
15742 mq->entry_count);
15743 if (mq->entry_count < 16) {
15744 status = -EINVAL;
15745 goto out;
15746 }
15747 fallthrough; /* otherwise default to smallest count */
15748 case 16:
15749 bf_set(lpfc_mq_context_ring_size,
15750 &mq_create_ext->u.request.context,
15751 LPFC_MQ_RING_SIZE_16);
15752 break;
15753 case 32:
15754 bf_set(lpfc_mq_context_ring_size,
15755 &mq_create_ext->u.request.context,
15756 LPFC_MQ_RING_SIZE_32);
15757 break;
15758 case 64:
15759 bf_set(lpfc_mq_context_ring_size,
15760 &mq_create_ext->u.request.context,
15761 LPFC_MQ_RING_SIZE_64);
15762 break;
15763 case 128:
15764 bf_set(lpfc_mq_context_ring_size,
15765 &mq_create_ext->u.request.context,
15766 LPFC_MQ_RING_SIZE_128);
15767 break;
15768 }
15769 list_for_each_entry(dmabuf, &mq->page_list, list) {
15770 memset(dmabuf->virt, 0, hw_page_size);
15771 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15772 putPaddrLow(dmabuf->phys);
15773 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15774 putPaddrHigh(dmabuf->phys);
15775 }
15776 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15777 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15778 &mq_create_ext->u.response);
15779 if (rc != MBX_SUCCESS) {
15780 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15781 "2795 MQ_CREATE_EXT failed with "
15782 "status x%x. Failback to MQ_CREATE.\n",
15783 rc);
15784 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15785 mq_create = &mbox->u.mqe.un.mq_create;
15786 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15787 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15788 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15789 &mq_create->u.response);
15790 }
15791
15792 /* The IOCTL status is embedded in the mailbox subheader. */
15793 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15794 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15795 if (shdr_status || shdr_add_status || rc) {
15796 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15797 "2502 MQ_CREATE mailbox failed with "
15798 "status x%x add_status x%x, mbx status x%x\n",
15799 shdr_status, shdr_add_status, rc);
15800 status = -ENXIO;
15801 goto out;
15802 }
15803 if (mq->queue_id == 0xFFFF) {
15804 status = -ENXIO;
15805 goto out;
15806 }
15807 mq->type = LPFC_MQ;
15808 mq->assoc_qid = cq->queue_id;
15809 mq->subtype = subtype;
15810 mq->host_index = 0;
15811 mq->hba_index = 0;
15812
15813 /* link the mq onto the parent cq child list */
15814 list_add_tail(&mq->list, &cq->child_list);
15815 out:
15816 mempool_free(mbox, phba->mbox_mem_pool);
15817 return status;
15818 }
15819
15820 /**
15821 * lpfc_wq_create - Create a Work Queue on the HBA
15822 * @phba: HBA structure that indicates port to create a queue on.
15823 * @wq: The queue structure to use to create the work queue.
15824 * @cq: The completion queue to bind this work queue to.
15825 * @subtype: The subtype of the work queue indicating its functionality.
15826 *
15827 * This function creates a work queue, as detailed in @wq, on a port, described
15828 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15829 *
15830 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15831 * is used to get the entry count and entry size that are necessary to
15832 * determine the number of pages to allocate and use for this queue. The @cq
15833 * is used to indicate which completion queue to bind this work queue to. This
15834 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15835 * work queue. This function is asynchronous and will wait for the mailbox
15836 * command to finish before continuing.
15837 *
15838 * On success this function will return a zero. If unable to allocate enough
15839 * memory this function will return -ENOMEM. If the queue create mailbox command
15840 * fails this function will return -ENXIO.
15841 **/
15842 int
lpfc_wq_create(struct lpfc_hba * phba,struct lpfc_queue * wq,struct lpfc_queue * cq,uint32_t subtype)15843 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15844 struct lpfc_queue *cq, uint32_t subtype)
15845 {
15846 struct lpfc_mbx_wq_create *wq_create;
15847 struct lpfc_dmabuf *dmabuf;
15848 LPFC_MBOXQ_t *mbox;
15849 int rc, length, status = 0;
15850 uint32_t shdr_status, shdr_add_status;
15851 union lpfc_sli4_cfg_shdr *shdr;
15852 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15853 struct dma_address *page;
15854 void __iomem *bar_memmap_p;
15855 uint32_t db_offset;
15856 uint16_t pci_barset;
15857 uint8_t dpp_barset;
15858 uint32_t dpp_offset;
15859 uint8_t wq_create_version;
15860 #ifdef CONFIG_X86
15861 unsigned long pg_addr;
15862 #endif
15863
15864 /* sanity check on queue memory */
15865 if (!wq || !cq)
15866 return -ENODEV;
15867 if (!phba->sli4_hba.pc_sli4_params.supported)
15868 hw_page_size = wq->page_size;
15869
15870 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15871 if (!mbox)
15872 return -ENOMEM;
15873 length = (sizeof(struct lpfc_mbx_wq_create) -
15874 sizeof(struct lpfc_sli4_cfg_mhdr));
15875 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15876 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15877 length, LPFC_SLI4_MBX_EMBED);
15878 wq_create = &mbox->u.mqe.un.wq_create;
15879 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15880 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15881 wq->page_count);
15882 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15883 cq->queue_id);
15884
15885 /* wqv is the earliest version supported, NOT the latest */
15886 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15887 phba->sli4_hba.pc_sli4_params.wqv);
15888
15889 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15890 (wq->page_size > SLI4_PAGE_SIZE))
15891 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15892 else
15893 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15894
15895
15896 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15897 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15898 else
15899 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15900
15901 switch (wq_create_version) {
15902 case LPFC_Q_CREATE_VERSION_1:
15903 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15904 wq->entry_count);
15905 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15906 LPFC_Q_CREATE_VERSION_1);
15907
15908 switch (wq->entry_size) {
15909 default:
15910 case 64:
15911 bf_set(lpfc_mbx_wq_create_wqe_size,
15912 &wq_create->u.request_1,
15913 LPFC_WQ_WQE_SIZE_64);
15914 break;
15915 case 128:
15916 bf_set(lpfc_mbx_wq_create_wqe_size,
15917 &wq_create->u.request_1,
15918 LPFC_WQ_WQE_SIZE_128);
15919 break;
15920 }
15921 /* Request DPP by default */
15922 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15923 bf_set(lpfc_mbx_wq_create_page_size,
15924 &wq_create->u.request_1,
15925 (wq->page_size / SLI4_PAGE_SIZE));
15926 page = wq_create->u.request_1.page;
15927 break;
15928 default:
15929 page = wq_create->u.request.page;
15930 break;
15931 }
15932
15933 list_for_each_entry(dmabuf, &wq->page_list, list) {
15934 memset(dmabuf->virt, 0, hw_page_size);
15935 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15936 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15937 }
15938
15939 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15940 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15941
15942 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15943 /* The IOCTL status is embedded in the mailbox subheader. */
15944 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15945 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15946 if (shdr_status || shdr_add_status || rc) {
15947 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15948 "2503 WQ_CREATE mailbox failed with "
15949 "status x%x add_status x%x, mbx status x%x\n",
15950 shdr_status, shdr_add_status, rc);
15951 status = -ENXIO;
15952 goto out;
15953 }
15954
15955 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15956 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15957 &wq_create->u.response);
15958 else
15959 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15960 &wq_create->u.response_1);
15961
15962 if (wq->queue_id == 0xFFFF) {
15963 status = -ENXIO;
15964 goto out;
15965 }
15966
15967 wq->db_format = LPFC_DB_LIST_FORMAT;
15968 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15969 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15970 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15971 &wq_create->u.response);
15972 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15973 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15974 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15975 "3265 WQ[%d] doorbell format "
15976 "not supported: x%x\n",
15977 wq->queue_id, wq->db_format);
15978 status = -EINVAL;
15979 goto out;
15980 }
15981 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15982 &wq_create->u.response);
15983 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15984 pci_barset);
15985 if (!bar_memmap_p) {
15986 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15987 "3263 WQ[%d] failed to memmap "
15988 "pci barset:x%x\n",
15989 wq->queue_id, pci_barset);
15990 status = -ENOMEM;
15991 goto out;
15992 }
15993 db_offset = wq_create->u.response.doorbell_offset;
15994 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15995 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15996 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15997 "3252 WQ[%d] doorbell offset "
15998 "not supported: x%x\n",
15999 wq->queue_id, db_offset);
16000 status = -EINVAL;
16001 goto out;
16002 }
16003 wq->db_regaddr = bar_memmap_p + db_offset;
16004 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16005 "3264 WQ[%d]: barset:x%x, offset:x%x, "
16006 "format:x%x\n", wq->queue_id,
16007 pci_barset, db_offset, wq->db_format);
16008 } else
16009 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16010 } else {
16011 /* Check if DPP was honored by the firmware */
16012 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
16013 &wq_create->u.response_1);
16014 if (wq->dpp_enable) {
16015 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
16016 &wq_create->u.response_1);
16017 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16018 pci_barset);
16019 if (!bar_memmap_p) {
16020 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16021 "3267 WQ[%d] failed to memmap "
16022 "pci barset:x%x\n",
16023 wq->queue_id, pci_barset);
16024 status = -ENOMEM;
16025 goto out;
16026 }
16027 db_offset = wq_create->u.response_1.doorbell_offset;
16028 wq->db_regaddr = bar_memmap_p + db_offset;
16029 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
16030 &wq_create->u.response_1);
16031 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
16032 &wq_create->u.response_1);
16033 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16034 dpp_barset);
16035 if (!bar_memmap_p) {
16036 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16037 "3268 WQ[%d] failed to memmap "
16038 "pci barset:x%x\n",
16039 wq->queue_id, dpp_barset);
16040 status = -ENOMEM;
16041 goto out;
16042 }
16043 dpp_offset = wq_create->u.response_1.dpp_offset;
16044 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
16045 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16046 "3271 WQ[%d]: barset:x%x, offset:x%x, "
16047 "dpp_id:x%x dpp_barset:x%x "
16048 "dpp_offset:x%x\n",
16049 wq->queue_id, pci_barset, db_offset,
16050 wq->dpp_id, dpp_barset, dpp_offset);
16051
16052 #ifdef CONFIG_X86
16053 /* Enable combined writes for DPP aperture */
16054 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
16055 rc = set_memory_wc(pg_addr, 1);
16056 if (rc) {
16057 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16058 "3272 Cannot setup Combined "
16059 "Write on WQ[%d] - disable DPP\n",
16060 wq->queue_id);
16061 phba->cfg_enable_dpp = 0;
16062 }
16063 #else
16064 phba->cfg_enable_dpp = 0;
16065 #endif
16066 } else
16067 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16068 }
16069 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
16070 if (wq->pring == NULL) {
16071 status = -ENOMEM;
16072 goto out;
16073 }
16074 wq->type = LPFC_WQ;
16075 wq->assoc_qid = cq->queue_id;
16076 wq->subtype = subtype;
16077 wq->host_index = 0;
16078 wq->hba_index = 0;
16079 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
16080
16081 /* link the wq onto the parent cq child list */
16082 list_add_tail(&wq->list, &cq->child_list);
16083 out:
16084 mempool_free(mbox, phba->mbox_mem_pool);
16085 return status;
16086 }
16087
16088 /**
16089 * lpfc_rq_create - Create a Receive Queue on the HBA
16090 * @phba: HBA structure that indicates port to create a queue on.
16091 * @hrq: The queue structure to use to create the header receive queue.
16092 * @drq: The queue structure to use to create the data receive queue.
16093 * @cq: The completion queue to bind this work queue to.
16094 * @subtype: The subtype of the work queue indicating its functionality.
16095 *
16096 * This function creates a receive buffer queue pair , as detailed in @hrq and
16097 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16098 * to the HBA.
16099 *
16100 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16101 * struct is used to get the entry count that is necessary to determine the
16102 * number of pages to use for this queue. The @cq is used to indicate which
16103 * completion queue to bind received buffers that are posted to these queues to.
16104 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16105 * receive queue pair. This function is asynchronous and will wait for the
16106 * mailbox command to finish before continuing.
16107 *
16108 * On success this function will return a zero. If unable to allocate enough
16109 * memory this function will return -ENOMEM. If the queue create mailbox command
16110 * fails this function will return -ENXIO.
16111 **/
16112 int
lpfc_rq_create(struct lpfc_hba * phba,struct lpfc_queue * hrq,struct lpfc_queue * drq,struct lpfc_queue * cq,uint32_t subtype)16113 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16114 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
16115 {
16116 struct lpfc_mbx_rq_create *rq_create;
16117 struct lpfc_dmabuf *dmabuf;
16118 LPFC_MBOXQ_t *mbox;
16119 int rc, length, status = 0;
16120 uint32_t shdr_status, shdr_add_status;
16121 union lpfc_sli4_cfg_shdr *shdr;
16122 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16123 void __iomem *bar_memmap_p;
16124 uint32_t db_offset;
16125 uint16_t pci_barset;
16126
16127 /* sanity check on queue memory */
16128 if (!hrq || !drq || !cq)
16129 return -ENODEV;
16130 if (!phba->sli4_hba.pc_sli4_params.supported)
16131 hw_page_size = SLI4_PAGE_SIZE;
16132
16133 if (hrq->entry_count != drq->entry_count)
16134 return -EINVAL;
16135 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16136 if (!mbox)
16137 return -ENOMEM;
16138 length = (sizeof(struct lpfc_mbx_rq_create) -
16139 sizeof(struct lpfc_sli4_cfg_mhdr));
16140 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16141 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16142 length, LPFC_SLI4_MBX_EMBED);
16143 rq_create = &mbox->u.mqe.un.rq_create;
16144 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16145 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16146 phba->sli4_hba.pc_sli4_params.rqv);
16147 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16148 bf_set(lpfc_rq_context_rqe_count_1,
16149 &rq_create->u.request.context,
16150 hrq->entry_count);
16151 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
16152 bf_set(lpfc_rq_context_rqe_size,
16153 &rq_create->u.request.context,
16154 LPFC_RQE_SIZE_8);
16155 bf_set(lpfc_rq_context_page_size,
16156 &rq_create->u.request.context,
16157 LPFC_RQ_PAGE_SIZE_4096);
16158 } else {
16159 switch (hrq->entry_count) {
16160 default:
16161 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16162 "2535 Unsupported RQ count. (%d)\n",
16163 hrq->entry_count);
16164 if (hrq->entry_count < 512) {
16165 status = -EINVAL;
16166 goto out;
16167 }
16168 fallthrough; /* otherwise default to smallest count */
16169 case 512:
16170 bf_set(lpfc_rq_context_rqe_count,
16171 &rq_create->u.request.context,
16172 LPFC_RQ_RING_SIZE_512);
16173 break;
16174 case 1024:
16175 bf_set(lpfc_rq_context_rqe_count,
16176 &rq_create->u.request.context,
16177 LPFC_RQ_RING_SIZE_1024);
16178 break;
16179 case 2048:
16180 bf_set(lpfc_rq_context_rqe_count,
16181 &rq_create->u.request.context,
16182 LPFC_RQ_RING_SIZE_2048);
16183 break;
16184 case 4096:
16185 bf_set(lpfc_rq_context_rqe_count,
16186 &rq_create->u.request.context,
16187 LPFC_RQ_RING_SIZE_4096);
16188 break;
16189 }
16190 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
16191 LPFC_HDR_BUF_SIZE);
16192 }
16193 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16194 cq->queue_id);
16195 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16196 hrq->page_count);
16197 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16198 memset(dmabuf->virt, 0, hw_page_size);
16199 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16200 putPaddrLow(dmabuf->phys);
16201 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16202 putPaddrHigh(dmabuf->phys);
16203 }
16204 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16205 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16206
16207 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16208 /* The IOCTL status is embedded in the mailbox subheader. */
16209 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16210 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16211 if (shdr_status || shdr_add_status || rc) {
16212 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16213 "2504 RQ_CREATE mailbox failed with "
16214 "status x%x add_status x%x, mbx status x%x\n",
16215 shdr_status, shdr_add_status, rc);
16216 status = -ENXIO;
16217 goto out;
16218 }
16219 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16220 if (hrq->queue_id == 0xFFFF) {
16221 status = -ENXIO;
16222 goto out;
16223 }
16224
16225 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16226 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
16227 &rq_create->u.response);
16228 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
16229 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
16230 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16231 "3262 RQ [%d] doorbell format not "
16232 "supported: x%x\n", hrq->queue_id,
16233 hrq->db_format);
16234 status = -EINVAL;
16235 goto out;
16236 }
16237
16238 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
16239 &rq_create->u.response);
16240 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
16241 if (!bar_memmap_p) {
16242 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16243 "3269 RQ[%d] failed to memmap pci "
16244 "barset:x%x\n", hrq->queue_id,
16245 pci_barset);
16246 status = -ENOMEM;
16247 goto out;
16248 }
16249
16250 db_offset = rq_create->u.response.doorbell_offset;
16251 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
16252 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
16253 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16254 "3270 RQ[%d] doorbell offset not "
16255 "supported: x%x\n", hrq->queue_id,
16256 db_offset);
16257 status = -EINVAL;
16258 goto out;
16259 }
16260 hrq->db_regaddr = bar_memmap_p + db_offset;
16261 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16262 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
16263 "format:x%x\n", hrq->queue_id, pci_barset,
16264 db_offset, hrq->db_format);
16265 } else {
16266 hrq->db_format = LPFC_DB_RING_FORMAT;
16267 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16268 }
16269 hrq->type = LPFC_HRQ;
16270 hrq->assoc_qid = cq->queue_id;
16271 hrq->subtype = subtype;
16272 hrq->host_index = 0;
16273 hrq->hba_index = 0;
16274 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16275
16276 /* now create the data queue */
16277 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16278 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16279 length, LPFC_SLI4_MBX_EMBED);
16280 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16281 phba->sli4_hba.pc_sli4_params.rqv);
16282 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16283 bf_set(lpfc_rq_context_rqe_count_1,
16284 &rq_create->u.request.context, hrq->entry_count);
16285 if (subtype == LPFC_NVMET)
16286 rq_create->u.request.context.buffer_size =
16287 LPFC_NVMET_DATA_BUF_SIZE;
16288 else
16289 rq_create->u.request.context.buffer_size =
16290 LPFC_DATA_BUF_SIZE;
16291 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
16292 LPFC_RQE_SIZE_8);
16293 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
16294 (PAGE_SIZE/SLI4_PAGE_SIZE));
16295 } else {
16296 switch (drq->entry_count) {
16297 default:
16298 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16299 "2536 Unsupported RQ count. (%d)\n",
16300 drq->entry_count);
16301 if (drq->entry_count < 512) {
16302 status = -EINVAL;
16303 goto out;
16304 }
16305 fallthrough; /* otherwise default to smallest count */
16306 case 512:
16307 bf_set(lpfc_rq_context_rqe_count,
16308 &rq_create->u.request.context,
16309 LPFC_RQ_RING_SIZE_512);
16310 break;
16311 case 1024:
16312 bf_set(lpfc_rq_context_rqe_count,
16313 &rq_create->u.request.context,
16314 LPFC_RQ_RING_SIZE_1024);
16315 break;
16316 case 2048:
16317 bf_set(lpfc_rq_context_rqe_count,
16318 &rq_create->u.request.context,
16319 LPFC_RQ_RING_SIZE_2048);
16320 break;
16321 case 4096:
16322 bf_set(lpfc_rq_context_rqe_count,
16323 &rq_create->u.request.context,
16324 LPFC_RQ_RING_SIZE_4096);
16325 break;
16326 }
16327 if (subtype == LPFC_NVMET)
16328 bf_set(lpfc_rq_context_buf_size,
16329 &rq_create->u.request.context,
16330 LPFC_NVMET_DATA_BUF_SIZE);
16331 else
16332 bf_set(lpfc_rq_context_buf_size,
16333 &rq_create->u.request.context,
16334 LPFC_DATA_BUF_SIZE);
16335 }
16336 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16337 cq->queue_id);
16338 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16339 drq->page_count);
16340 list_for_each_entry(dmabuf, &drq->page_list, list) {
16341 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16342 putPaddrLow(dmabuf->phys);
16343 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16344 putPaddrHigh(dmabuf->phys);
16345 }
16346 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16347 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16348 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16349 /* The IOCTL status is embedded in the mailbox subheader. */
16350 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16351 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16352 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16353 if (shdr_status || shdr_add_status || rc) {
16354 status = -ENXIO;
16355 goto out;
16356 }
16357 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16358 if (drq->queue_id == 0xFFFF) {
16359 status = -ENXIO;
16360 goto out;
16361 }
16362 drq->type = LPFC_DRQ;
16363 drq->assoc_qid = cq->queue_id;
16364 drq->subtype = subtype;
16365 drq->host_index = 0;
16366 drq->hba_index = 0;
16367 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16368
16369 /* link the header and data RQs onto the parent cq child list */
16370 list_add_tail(&hrq->list, &cq->child_list);
16371 list_add_tail(&drq->list, &cq->child_list);
16372
16373 out:
16374 mempool_free(mbox, phba->mbox_mem_pool);
16375 return status;
16376 }
16377
16378 /**
16379 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
16380 * @phba: HBA structure that indicates port to create a queue on.
16381 * @hrqp: The queue structure array to use to create the header receive queues.
16382 * @drqp: The queue structure array to use to create the data receive queues.
16383 * @cqp: The completion queue array to bind these receive queues to.
16384 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16385 *
16386 * This function creates a receive buffer queue pair , as detailed in @hrq and
16387 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16388 * to the HBA.
16389 *
16390 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16391 * struct is used to get the entry count that is necessary to determine the
16392 * number of pages to use for this queue. The @cq is used to indicate which
16393 * completion queue to bind received buffers that are posted to these queues to.
16394 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16395 * receive queue pair. This function is asynchronous and will wait for the
16396 * mailbox command to finish before continuing.
16397 *
16398 * On success this function will return a zero. If unable to allocate enough
16399 * memory this function will return -ENOMEM. If the queue create mailbox command
16400 * fails this function will return -ENXIO.
16401 **/
16402 int
lpfc_mrq_create(struct lpfc_hba * phba,struct lpfc_queue ** hrqp,struct lpfc_queue ** drqp,struct lpfc_queue ** cqp,uint32_t subtype)16403 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16404 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16405 uint32_t subtype)
16406 {
16407 struct lpfc_queue *hrq, *drq, *cq;
16408 struct lpfc_mbx_rq_create_v2 *rq_create;
16409 struct lpfc_dmabuf *dmabuf;
16410 LPFC_MBOXQ_t *mbox;
16411 int rc, length, alloclen, status = 0;
16412 int cnt, idx, numrq, page_idx = 0;
16413 uint32_t shdr_status, shdr_add_status;
16414 union lpfc_sli4_cfg_shdr *shdr;
16415 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16416
16417 numrq = phba->cfg_nvmet_mrq;
16418 /* sanity check on array memory */
16419 if (!hrqp || !drqp || !cqp || !numrq)
16420 return -ENODEV;
16421 if (!phba->sli4_hba.pc_sli4_params.supported)
16422 hw_page_size = SLI4_PAGE_SIZE;
16423
16424 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16425 if (!mbox)
16426 return -ENOMEM;
16427
16428 length = sizeof(struct lpfc_mbx_rq_create_v2);
16429 length += ((2 * numrq * hrqp[0]->page_count) *
16430 sizeof(struct dma_address));
16431
16432 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16433 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16434 LPFC_SLI4_MBX_NEMBED);
16435 if (alloclen < length) {
16436 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16437 "3099 Allocated DMA memory size (%d) is "
16438 "less than the requested DMA memory size "
16439 "(%d)\n", alloclen, length);
16440 status = -ENOMEM;
16441 goto out;
16442 }
16443
16444
16445
16446 rq_create = mbox->sge_array->addr[0];
16447 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16448
16449 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16450 cnt = 0;
16451
16452 for (idx = 0; idx < numrq; idx++) {
16453 hrq = hrqp[idx];
16454 drq = drqp[idx];
16455 cq = cqp[idx];
16456
16457 /* sanity check on queue memory */
16458 if (!hrq || !drq || !cq) {
16459 status = -ENODEV;
16460 goto out;
16461 }
16462
16463 if (hrq->entry_count != drq->entry_count) {
16464 status = -EINVAL;
16465 goto out;
16466 }
16467
16468 if (idx == 0) {
16469 bf_set(lpfc_mbx_rq_create_num_pages,
16470 &rq_create->u.request,
16471 hrq->page_count);
16472 bf_set(lpfc_mbx_rq_create_rq_cnt,
16473 &rq_create->u.request, (numrq * 2));
16474 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16475 1);
16476 bf_set(lpfc_rq_context_base_cq,
16477 &rq_create->u.request.context,
16478 cq->queue_id);
16479 bf_set(lpfc_rq_context_data_size,
16480 &rq_create->u.request.context,
16481 LPFC_NVMET_DATA_BUF_SIZE);
16482 bf_set(lpfc_rq_context_hdr_size,
16483 &rq_create->u.request.context,
16484 LPFC_HDR_BUF_SIZE);
16485 bf_set(lpfc_rq_context_rqe_count_1,
16486 &rq_create->u.request.context,
16487 hrq->entry_count);
16488 bf_set(lpfc_rq_context_rqe_size,
16489 &rq_create->u.request.context,
16490 LPFC_RQE_SIZE_8);
16491 bf_set(lpfc_rq_context_page_size,
16492 &rq_create->u.request.context,
16493 (PAGE_SIZE/SLI4_PAGE_SIZE));
16494 }
16495 rc = 0;
16496 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16497 memset(dmabuf->virt, 0, hw_page_size);
16498 cnt = page_idx + dmabuf->buffer_tag;
16499 rq_create->u.request.page[cnt].addr_lo =
16500 putPaddrLow(dmabuf->phys);
16501 rq_create->u.request.page[cnt].addr_hi =
16502 putPaddrHigh(dmabuf->phys);
16503 rc++;
16504 }
16505 page_idx += rc;
16506
16507 rc = 0;
16508 list_for_each_entry(dmabuf, &drq->page_list, list) {
16509 memset(dmabuf->virt, 0, hw_page_size);
16510 cnt = page_idx + dmabuf->buffer_tag;
16511 rq_create->u.request.page[cnt].addr_lo =
16512 putPaddrLow(dmabuf->phys);
16513 rq_create->u.request.page[cnt].addr_hi =
16514 putPaddrHigh(dmabuf->phys);
16515 rc++;
16516 }
16517 page_idx += rc;
16518
16519 hrq->db_format = LPFC_DB_RING_FORMAT;
16520 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16521 hrq->type = LPFC_HRQ;
16522 hrq->assoc_qid = cq->queue_id;
16523 hrq->subtype = subtype;
16524 hrq->host_index = 0;
16525 hrq->hba_index = 0;
16526 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16527
16528 drq->db_format = LPFC_DB_RING_FORMAT;
16529 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16530 drq->type = LPFC_DRQ;
16531 drq->assoc_qid = cq->queue_id;
16532 drq->subtype = subtype;
16533 drq->host_index = 0;
16534 drq->hba_index = 0;
16535 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16536
16537 list_add_tail(&hrq->list, &cq->child_list);
16538 list_add_tail(&drq->list, &cq->child_list);
16539 }
16540
16541 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16542 /* The IOCTL status is embedded in the mailbox subheader. */
16543 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16544 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16545 if (shdr_status || shdr_add_status || rc) {
16546 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16547 "3120 RQ_CREATE mailbox failed with "
16548 "status x%x add_status x%x, mbx status x%x\n",
16549 shdr_status, shdr_add_status, rc);
16550 status = -ENXIO;
16551 goto out;
16552 }
16553 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16554 if (rc == 0xFFFF) {
16555 status = -ENXIO;
16556 goto out;
16557 }
16558
16559 /* Initialize all RQs with associated queue id */
16560 for (idx = 0; idx < numrq; idx++) {
16561 hrq = hrqp[idx];
16562 hrq->queue_id = rc + (2 * idx);
16563 drq = drqp[idx];
16564 drq->queue_id = rc + (2 * idx) + 1;
16565 }
16566
16567 out:
16568 lpfc_sli4_mbox_cmd_free(phba, mbox);
16569 return status;
16570 }
16571
16572 /**
16573 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16574 * @phba: HBA structure that indicates port to destroy a queue on.
16575 * @eq: The queue structure associated with the queue to destroy.
16576 *
16577 * This function destroys a queue, as detailed in @eq by sending an mailbox
16578 * command, specific to the type of queue, to the HBA.
16579 *
16580 * The @eq struct is used to get the queue ID of the queue to destroy.
16581 *
16582 * On success this function will return a zero. If the queue destroy mailbox
16583 * command fails this function will return -ENXIO.
16584 **/
16585 int
lpfc_eq_destroy(struct lpfc_hba * phba,struct lpfc_queue * eq)16586 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16587 {
16588 LPFC_MBOXQ_t *mbox;
16589 int rc, length, status = 0;
16590 uint32_t shdr_status, shdr_add_status;
16591 union lpfc_sli4_cfg_shdr *shdr;
16592
16593 /* sanity check on queue memory */
16594 if (!eq)
16595 return -ENODEV;
16596
16597 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16598 if (!mbox)
16599 return -ENOMEM;
16600 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16601 sizeof(struct lpfc_sli4_cfg_mhdr));
16602 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16603 LPFC_MBOX_OPCODE_EQ_DESTROY,
16604 length, LPFC_SLI4_MBX_EMBED);
16605 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16606 eq->queue_id);
16607 mbox->vport = eq->phba->pport;
16608 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16609
16610 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16611 /* The IOCTL status is embedded in the mailbox subheader. */
16612 shdr = (union lpfc_sli4_cfg_shdr *)
16613 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16614 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16615 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16616 if (shdr_status || shdr_add_status || rc) {
16617 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16618 "2505 EQ_DESTROY mailbox failed with "
16619 "status x%x add_status x%x, mbx status x%x\n",
16620 shdr_status, shdr_add_status, rc);
16621 status = -ENXIO;
16622 }
16623
16624 /* Remove eq from any list */
16625 list_del_init(&eq->list);
16626 mempool_free(mbox, eq->phba->mbox_mem_pool);
16627 return status;
16628 }
16629
16630 /**
16631 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16632 * @phba: HBA structure that indicates port to destroy a queue on.
16633 * @cq: The queue structure associated with the queue to destroy.
16634 *
16635 * This function destroys a queue, as detailed in @cq by sending an mailbox
16636 * command, specific to the type of queue, to the HBA.
16637 *
16638 * The @cq struct is used to get the queue ID of the queue to destroy.
16639 *
16640 * On success this function will return a zero. If the queue destroy mailbox
16641 * command fails this function will return -ENXIO.
16642 **/
16643 int
lpfc_cq_destroy(struct lpfc_hba * phba,struct lpfc_queue * cq)16644 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16645 {
16646 LPFC_MBOXQ_t *mbox;
16647 int rc, length, status = 0;
16648 uint32_t shdr_status, shdr_add_status;
16649 union lpfc_sli4_cfg_shdr *shdr;
16650
16651 /* sanity check on queue memory */
16652 if (!cq)
16653 return -ENODEV;
16654 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16655 if (!mbox)
16656 return -ENOMEM;
16657 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16658 sizeof(struct lpfc_sli4_cfg_mhdr));
16659 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16660 LPFC_MBOX_OPCODE_CQ_DESTROY,
16661 length, LPFC_SLI4_MBX_EMBED);
16662 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16663 cq->queue_id);
16664 mbox->vport = cq->phba->pport;
16665 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16666 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16667 /* The IOCTL status is embedded in the mailbox subheader. */
16668 shdr = (union lpfc_sli4_cfg_shdr *)
16669 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16670 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16671 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16672 if (shdr_status || shdr_add_status || rc) {
16673 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16674 "2506 CQ_DESTROY mailbox failed with "
16675 "status x%x add_status x%x, mbx status x%x\n",
16676 shdr_status, shdr_add_status, rc);
16677 status = -ENXIO;
16678 }
16679 /* Remove cq from any list */
16680 list_del_init(&cq->list);
16681 mempool_free(mbox, cq->phba->mbox_mem_pool);
16682 return status;
16683 }
16684
16685 /**
16686 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16687 * @phba: HBA structure that indicates port to destroy a queue on.
16688 * @mq: The queue structure associated with the queue to destroy.
16689 *
16690 * This function destroys a queue, as detailed in @mq by sending an mailbox
16691 * command, specific to the type of queue, to the HBA.
16692 *
16693 * The @mq struct is used to get the queue ID of the queue to destroy.
16694 *
16695 * On success this function will return a zero. If the queue destroy mailbox
16696 * command fails this function will return -ENXIO.
16697 **/
16698 int
lpfc_mq_destroy(struct lpfc_hba * phba,struct lpfc_queue * mq)16699 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16700 {
16701 LPFC_MBOXQ_t *mbox;
16702 int rc, length, status = 0;
16703 uint32_t shdr_status, shdr_add_status;
16704 union lpfc_sli4_cfg_shdr *shdr;
16705
16706 /* sanity check on queue memory */
16707 if (!mq)
16708 return -ENODEV;
16709 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16710 if (!mbox)
16711 return -ENOMEM;
16712 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16713 sizeof(struct lpfc_sli4_cfg_mhdr));
16714 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16715 LPFC_MBOX_OPCODE_MQ_DESTROY,
16716 length, LPFC_SLI4_MBX_EMBED);
16717 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16718 mq->queue_id);
16719 mbox->vport = mq->phba->pport;
16720 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16721 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16722 /* The IOCTL status is embedded in the mailbox subheader. */
16723 shdr = (union lpfc_sli4_cfg_shdr *)
16724 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16725 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16726 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16727 if (shdr_status || shdr_add_status || rc) {
16728 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16729 "2507 MQ_DESTROY mailbox failed with "
16730 "status x%x add_status x%x, mbx status x%x\n",
16731 shdr_status, shdr_add_status, rc);
16732 status = -ENXIO;
16733 }
16734 /* Remove mq from any list */
16735 list_del_init(&mq->list);
16736 mempool_free(mbox, mq->phba->mbox_mem_pool);
16737 return status;
16738 }
16739
16740 /**
16741 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16742 * @phba: HBA structure that indicates port to destroy a queue on.
16743 * @wq: The queue structure associated with the queue to destroy.
16744 *
16745 * This function destroys a queue, as detailed in @wq by sending an mailbox
16746 * command, specific to the type of queue, to the HBA.
16747 *
16748 * The @wq struct is used to get the queue ID of the queue to destroy.
16749 *
16750 * On success this function will return a zero. If the queue destroy mailbox
16751 * command fails this function will return -ENXIO.
16752 **/
16753 int
lpfc_wq_destroy(struct lpfc_hba * phba,struct lpfc_queue * wq)16754 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16755 {
16756 LPFC_MBOXQ_t *mbox;
16757 int rc, length, status = 0;
16758 uint32_t shdr_status, shdr_add_status;
16759 union lpfc_sli4_cfg_shdr *shdr;
16760
16761 /* sanity check on queue memory */
16762 if (!wq)
16763 return -ENODEV;
16764 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16765 if (!mbox)
16766 return -ENOMEM;
16767 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16768 sizeof(struct lpfc_sli4_cfg_mhdr));
16769 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16770 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16771 length, LPFC_SLI4_MBX_EMBED);
16772 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16773 wq->queue_id);
16774 mbox->vport = wq->phba->pport;
16775 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16776 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16777 shdr = (union lpfc_sli4_cfg_shdr *)
16778 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16779 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16780 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16781 if (shdr_status || shdr_add_status || rc) {
16782 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16783 "2508 WQ_DESTROY mailbox failed with "
16784 "status x%x add_status x%x, mbx status x%x\n",
16785 shdr_status, shdr_add_status, rc);
16786 status = -ENXIO;
16787 }
16788 /* Remove wq from any list */
16789 list_del_init(&wq->list);
16790 kfree(wq->pring);
16791 wq->pring = NULL;
16792 mempool_free(mbox, wq->phba->mbox_mem_pool);
16793 return status;
16794 }
16795
16796 /**
16797 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16798 * @phba: HBA structure that indicates port to destroy a queue on.
16799 * @hrq: The queue structure associated with the queue to destroy.
16800 * @drq: The queue structure associated with the queue to destroy.
16801 *
16802 * This function destroys a queue, as detailed in @rq by sending an mailbox
16803 * command, specific to the type of queue, to the HBA.
16804 *
16805 * The @rq struct is used to get the queue ID of the queue to destroy.
16806 *
16807 * On success this function will return a zero. If the queue destroy mailbox
16808 * command fails this function will return -ENXIO.
16809 **/
16810 int
lpfc_rq_destroy(struct lpfc_hba * phba,struct lpfc_queue * hrq,struct lpfc_queue * drq)16811 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16812 struct lpfc_queue *drq)
16813 {
16814 LPFC_MBOXQ_t *mbox;
16815 int rc, length, status = 0;
16816 uint32_t shdr_status, shdr_add_status;
16817 union lpfc_sli4_cfg_shdr *shdr;
16818
16819 /* sanity check on queue memory */
16820 if (!hrq || !drq)
16821 return -ENODEV;
16822 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16823 if (!mbox)
16824 return -ENOMEM;
16825 length = (sizeof(struct lpfc_mbx_rq_destroy) -
16826 sizeof(struct lpfc_sli4_cfg_mhdr));
16827 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16828 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16829 length, LPFC_SLI4_MBX_EMBED);
16830 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16831 hrq->queue_id);
16832 mbox->vport = hrq->phba->pport;
16833 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16834 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16835 /* The IOCTL status is embedded in the mailbox subheader. */
16836 shdr = (union lpfc_sli4_cfg_shdr *)
16837 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16838 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16839 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16840 if (shdr_status || shdr_add_status || rc) {
16841 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16842 "2509 RQ_DESTROY mailbox failed with "
16843 "status x%x add_status x%x, mbx status x%x\n",
16844 shdr_status, shdr_add_status, rc);
16845 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16846 return -ENXIO;
16847 }
16848 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16849 drq->queue_id);
16850 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16851 shdr = (union lpfc_sli4_cfg_shdr *)
16852 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16853 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16854 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16855 if (shdr_status || shdr_add_status || rc) {
16856 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16857 "2510 RQ_DESTROY mailbox failed with "
16858 "status x%x add_status x%x, mbx status x%x\n",
16859 shdr_status, shdr_add_status, rc);
16860 status = -ENXIO;
16861 }
16862 list_del_init(&hrq->list);
16863 list_del_init(&drq->list);
16864 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16865 return status;
16866 }
16867
16868 /**
16869 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16870 * @phba: The virtual port for which this call being executed.
16871 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16872 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16873 * @xritag: the xritag that ties this io to the SGL pages.
16874 *
16875 * This routine will post the sgl pages for the IO that has the xritag
16876 * that is in the iocbq structure. The xritag is assigned during iocbq
16877 * creation and persists for as long as the driver is loaded.
16878 * if the caller has fewer than 256 scatter gather segments to map then
16879 * pdma_phys_addr1 should be 0.
16880 * If the caller needs to map more than 256 scatter gather segment then
16881 * pdma_phys_addr1 should be a valid physical address.
16882 * physical address for SGLs must be 64 byte aligned.
16883 * If you are going to map 2 SGL's then the first one must have 256 entries
16884 * the second sgl can have between 1 and 256 entries.
16885 *
16886 * Return codes:
16887 * 0 - Success
16888 * -ENXIO, -ENOMEM - Failure
16889 **/
16890 int
lpfc_sli4_post_sgl(struct lpfc_hba * phba,dma_addr_t pdma_phys_addr0,dma_addr_t pdma_phys_addr1,uint16_t xritag)16891 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16892 dma_addr_t pdma_phys_addr0,
16893 dma_addr_t pdma_phys_addr1,
16894 uint16_t xritag)
16895 {
16896 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16897 LPFC_MBOXQ_t *mbox;
16898 int rc;
16899 uint32_t shdr_status, shdr_add_status;
16900 uint32_t mbox_tmo;
16901 union lpfc_sli4_cfg_shdr *shdr;
16902
16903 if (xritag == NO_XRI) {
16904 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16905 "0364 Invalid param:\n");
16906 return -EINVAL;
16907 }
16908
16909 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16910 if (!mbox)
16911 return -ENOMEM;
16912
16913 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16914 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16915 sizeof(struct lpfc_mbx_post_sgl_pages) -
16916 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16917
16918 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16919 &mbox->u.mqe.un.post_sgl_pages;
16920 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16921 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16922
16923 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16924 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16925 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16926 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16927
16928 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16929 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16930 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16931 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16932 if (!phba->sli4_hba.intr_enable)
16933 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16934 else {
16935 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16936 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16937 }
16938 /* The IOCTL status is embedded in the mailbox subheader. */
16939 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16940 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16941 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16942 if (!phba->sli4_hba.intr_enable)
16943 mempool_free(mbox, phba->mbox_mem_pool);
16944 else if (rc != MBX_TIMEOUT)
16945 mempool_free(mbox, phba->mbox_mem_pool);
16946 if (shdr_status || shdr_add_status || rc) {
16947 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16948 "2511 POST_SGL mailbox failed with "
16949 "status x%x add_status x%x, mbx status x%x\n",
16950 shdr_status, shdr_add_status, rc);
16951 }
16952 return 0;
16953 }
16954
16955 /**
16956 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
16957 * @phba: pointer to lpfc hba data structure.
16958 *
16959 * This routine is invoked to post rpi header templates to the
16960 * HBA consistent with the SLI-4 interface spec. This routine
16961 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16962 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
16963 *
16964 * Returns
16965 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16966 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16967 **/
16968 static uint16_t
lpfc_sli4_alloc_xri(struct lpfc_hba * phba)16969 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16970 {
16971 unsigned long xri;
16972
16973 /*
16974 * Fetch the next logical xri. Because this index is logical,
16975 * the driver starts at 0 each time.
16976 */
16977 spin_lock_irq(&phba->hbalock);
16978 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16979 phba->sli4_hba.max_cfg_param.max_xri, 0);
16980 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16981 spin_unlock_irq(&phba->hbalock);
16982 return NO_XRI;
16983 } else {
16984 set_bit(xri, phba->sli4_hba.xri_bmask);
16985 phba->sli4_hba.max_cfg_param.xri_used++;
16986 }
16987 spin_unlock_irq(&phba->hbalock);
16988 return xri;
16989 }
16990
16991 /**
16992 * lpfc_sli4_free_xri - Release an xri for reuse.
16993 * @phba: pointer to lpfc hba data structure.
16994 * @xri: xri to release.
16995 *
16996 * This routine is invoked to release an xri to the pool of
16997 * available rpis maintained by the driver.
16998 **/
16999 static void
__lpfc_sli4_free_xri(struct lpfc_hba * phba,int xri)17000 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17001 {
17002 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
17003 phba->sli4_hba.max_cfg_param.xri_used--;
17004 }
17005 }
17006
17007 /**
17008 * lpfc_sli4_free_xri - Release an xri for reuse.
17009 * @phba: pointer to lpfc hba data structure.
17010 * @xri: xri to release.
17011 *
17012 * This routine is invoked to release an xri to the pool of
17013 * available rpis maintained by the driver.
17014 **/
17015 void
lpfc_sli4_free_xri(struct lpfc_hba * phba,int xri)17016 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17017 {
17018 spin_lock_irq(&phba->hbalock);
17019 __lpfc_sli4_free_xri(phba, xri);
17020 spin_unlock_irq(&phba->hbalock);
17021 }
17022
17023 /**
17024 * lpfc_sli4_next_xritag - Get an xritag for the io
17025 * @phba: Pointer to HBA context object.
17026 *
17027 * This function gets an xritag for the iocb. If there is no unused xritag
17028 * it will return 0xffff.
17029 * The function returns the allocated xritag if successful, else returns zero.
17030 * Zero is not a valid xritag.
17031 * The caller is not required to hold any lock.
17032 **/
17033 uint16_t
lpfc_sli4_next_xritag(struct lpfc_hba * phba)17034 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
17035 {
17036 uint16_t xri_index;
17037
17038 xri_index = lpfc_sli4_alloc_xri(phba);
17039 if (xri_index == NO_XRI)
17040 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17041 "2004 Failed to allocate XRI.last XRITAG is %d"
17042 " Max XRI is %d, Used XRI is %d\n",
17043 xri_index,
17044 phba->sli4_hba.max_cfg_param.max_xri,
17045 phba->sli4_hba.max_cfg_param.xri_used);
17046 return xri_index;
17047 }
17048
17049 /**
17050 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
17051 * @phba: pointer to lpfc hba data structure.
17052 * @post_sgl_list: pointer to els sgl entry list.
17053 * @post_cnt: number of els sgl entries on the list.
17054 *
17055 * This routine is invoked to post a block of driver's sgl pages to the
17056 * HBA using non-embedded mailbox command. No Lock is held. This routine
17057 * is only called when the driver is loading and after all IO has been
17058 * stopped.
17059 **/
17060 static int
lpfc_sli4_post_sgl_list(struct lpfc_hba * phba,struct list_head * post_sgl_list,int post_cnt)17061 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
17062 struct list_head *post_sgl_list,
17063 int post_cnt)
17064 {
17065 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
17066 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17067 struct sgl_page_pairs *sgl_pg_pairs;
17068 void *viraddr;
17069 LPFC_MBOXQ_t *mbox;
17070 uint32_t reqlen, alloclen, pg_pairs;
17071 uint32_t mbox_tmo;
17072 uint16_t xritag_start = 0;
17073 int rc = 0;
17074 uint32_t shdr_status, shdr_add_status;
17075 union lpfc_sli4_cfg_shdr *shdr;
17076
17077 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
17078 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17079 if (reqlen > SLI4_PAGE_SIZE) {
17080 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17081 "2559 Block sgl registration required DMA "
17082 "size (%d) great than a page\n", reqlen);
17083 return -ENOMEM;
17084 }
17085
17086 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17087 if (!mbox)
17088 return -ENOMEM;
17089
17090 /* Allocate DMA memory and set up the non-embedded mailbox command */
17091 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17092 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
17093 LPFC_SLI4_MBX_NEMBED);
17094
17095 if (alloclen < reqlen) {
17096 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17097 "0285 Allocated DMA memory size (%d) is "
17098 "less than the requested DMA memory "
17099 "size (%d)\n", alloclen, reqlen);
17100 lpfc_sli4_mbox_cmd_free(phba, mbox);
17101 return -ENOMEM;
17102 }
17103 /* Set up the SGL pages in the non-embedded DMA pages */
17104 viraddr = mbox->sge_array->addr[0];
17105 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17106 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17107
17108 pg_pairs = 0;
17109 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
17110 /* Set up the sge entry */
17111 sgl_pg_pairs->sgl_pg0_addr_lo =
17112 cpu_to_le32(putPaddrLow(sglq_entry->phys));
17113 sgl_pg_pairs->sgl_pg0_addr_hi =
17114 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
17115 sgl_pg_pairs->sgl_pg1_addr_lo =
17116 cpu_to_le32(putPaddrLow(0));
17117 sgl_pg_pairs->sgl_pg1_addr_hi =
17118 cpu_to_le32(putPaddrHigh(0));
17119
17120 /* Keep the first xritag on the list */
17121 if (pg_pairs == 0)
17122 xritag_start = sglq_entry->sli4_xritag;
17123 sgl_pg_pairs++;
17124 pg_pairs++;
17125 }
17126
17127 /* Complete initialization and perform endian conversion. */
17128 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17129 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
17130 sgl->word0 = cpu_to_le32(sgl->word0);
17131
17132 if (!phba->sli4_hba.intr_enable)
17133 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17134 else {
17135 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17136 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17137 }
17138 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
17139 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17140 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17141 if (!phba->sli4_hba.intr_enable)
17142 lpfc_sli4_mbox_cmd_free(phba, mbox);
17143 else if (rc != MBX_TIMEOUT)
17144 lpfc_sli4_mbox_cmd_free(phba, mbox);
17145 if (shdr_status || shdr_add_status || rc) {
17146 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17147 "2513 POST_SGL_BLOCK mailbox command failed "
17148 "status x%x add_status x%x mbx status x%x\n",
17149 shdr_status, shdr_add_status, rc);
17150 rc = -ENXIO;
17151 }
17152 return rc;
17153 }
17154
17155 /**
17156 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
17157 * @phba: pointer to lpfc hba data structure.
17158 * @nblist: pointer to nvme buffer list.
17159 * @count: number of scsi buffers on the list.
17160 *
17161 * This routine is invoked to post a block of @count scsi sgl pages from a
17162 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
17163 * No Lock is held.
17164 *
17165 **/
17166 static int
lpfc_sli4_post_io_sgl_block(struct lpfc_hba * phba,struct list_head * nblist,int count)17167 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
17168 int count)
17169 {
17170 struct lpfc_io_buf *lpfc_ncmd;
17171 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17172 struct sgl_page_pairs *sgl_pg_pairs;
17173 void *viraddr;
17174 LPFC_MBOXQ_t *mbox;
17175 uint32_t reqlen, alloclen, pg_pairs;
17176 uint32_t mbox_tmo;
17177 uint16_t xritag_start = 0;
17178 int rc = 0;
17179 uint32_t shdr_status, shdr_add_status;
17180 dma_addr_t pdma_phys_bpl1;
17181 union lpfc_sli4_cfg_shdr *shdr;
17182
17183 /* Calculate the requested length of the dma memory */
17184 reqlen = count * sizeof(struct sgl_page_pairs) +
17185 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17186 if (reqlen > SLI4_PAGE_SIZE) {
17187 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
17188 "6118 Block sgl registration required DMA "
17189 "size (%d) great than a page\n", reqlen);
17190 return -ENOMEM;
17191 }
17192 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17193 if (!mbox) {
17194 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17195 "6119 Failed to allocate mbox cmd memory\n");
17196 return -ENOMEM;
17197 }
17198
17199 /* Allocate DMA memory and set up the non-embedded mailbox command */
17200 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17201 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17202 reqlen, LPFC_SLI4_MBX_NEMBED);
17203
17204 if (alloclen < reqlen) {
17205 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17206 "6120 Allocated DMA memory size (%d) is "
17207 "less than the requested DMA memory "
17208 "size (%d)\n", alloclen, reqlen);
17209 lpfc_sli4_mbox_cmd_free(phba, mbox);
17210 return -ENOMEM;
17211 }
17212
17213 /* Get the first SGE entry from the non-embedded DMA memory */
17214 viraddr = mbox->sge_array->addr[0];
17215
17216 /* Set up the SGL pages in the non-embedded DMA pages */
17217 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17218 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17219
17220 pg_pairs = 0;
17221 list_for_each_entry(lpfc_ncmd, nblist, list) {
17222 /* Set up the sge entry */
17223 sgl_pg_pairs->sgl_pg0_addr_lo =
17224 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
17225 sgl_pg_pairs->sgl_pg0_addr_hi =
17226 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
17227 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
17228 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
17229 SGL_PAGE_SIZE;
17230 else
17231 pdma_phys_bpl1 = 0;
17232 sgl_pg_pairs->sgl_pg1_addr_lo =
17233 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
17234 sgl_pg_pairs->sgl_pg1_addr_hi =
17235 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
17236 /* Keep the first xritag on the list */
17237 if (pg_pairs == 0)
17238 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
17239 sgl_pg_pairs++;
17240 pg_pairs++;
17241 }
17242 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17243 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
17244 /* Perform endian conversion if necessary */
17245 sgl->word0 = cpu_to_le32(sgl->word0);
17246
17247 if (!phba->sli4_hba.intr_enable) {
17248 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17249 } else {
17250 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17251 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17252 }
17253 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
17254 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17255 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17256 if (!phba->sli4_hba.intr_enable)
17257 lpfc_sli4_mbox_cmd_free(phba, mbox);
17258 else if (rc != MBX_TIMEOUT)
17259 lpfc_sli4_mbox_cmd_free(phba, mbox);
17260 if (shdr_status || shdr_add_status || rc) {
17261 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17262 "6125 POST_SGL_BLOCK mailbox command failed "
17263 "status x%x add_status x%x mbx status x%x\n",
17264 shdr_status, shdr_add_status, rc);
17265 rc = -ENXIO;
17266 }
17267 return rc;
17268 }
17269
17270 /**
17271 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
17272 * @phba: pointer to lpfc hba data structure.
17273 * @post_nblist: pointer to the nvme buffer list.
17274 * @sb_count: number of nvme buffers.
17275 *
17276 * This routine walks a list of nvme buffers that was passed in. It attempts
17277 * to construct blocks of nvme buffer sgls which contains contiguous xris and
17278 * uses the non-embedded SGL block post mailbox commands to post to the port.
17279 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
17280 * embedded SGL post mailbox command for posting. The @post_nblist passed in
17281 * must be local list, thus no lock is needed when manipulate the list.
17282 *
17283 * Returns: 0 = failure, non-zero number of successfully posted buffers.
17284 **/
17285 int
lpfc_sli4_post_io_sgl_list(struct lpfc_hba * phba,struct list_head * post_nblist,int sb_count)17286 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
17287 struct list_head *post_nblist, int sb_count)
17288 {
17289 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
17290 int status, sgl_size;
17291 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
17292 dma_addr_t pdma_phys_sgl1;
17293 int last_xritag = NO_XRI;
17294 int cur_xritag;
17295 LIST_HEAD(prep_nblist);
17296 LIST_HEAD(blck_nblist);
17297 LIST_HEAD(nvme_nblist);
17298
17299 /* sanity check */
17300 if (sb_count <= 0)
17301 return -EINVAL;
17302
17303 sgl_size = phba->cfg_sg_dma_buf_size;
17304 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
17305 list_del_init(&lpfc_ncmd->list);
17306 block_cnt++;
17307 if ((last_xritag != NO_XRI) &&
17308 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
17309 /* a hole in xri block, form a sgl posting block */
17310 list_splice_init(&prep_nblist, &blck_nblist);
17311 post_cnt = block_cnt - 1;
17312 /* prepare list for next posting block */
17313 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17314 block_cnt = 1;
17315 } else {
17316 /* prepare list for next posting block */
17317 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17318 /* enough sgls for non-embed sgl mbox command */
17319 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
17320 list_splice_init(&prep_nblist, &blck_nblist);
17321 post_cnt = block_cnt;
17322 block_cnt = 0;
17323 }
17324 }
17325 num_posting++;
17326 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17327
17328 /* end of repost sgl list condition for NVME buffers */
17329 if (num_posting == sb_count) {
17330 if (post_cnt == 0) {
17331 /* last sgl posting block */
17332 list_splice_init(&prep_nblist, &blck_nblist);
17333 post_cnt = block_cnt;
17334 } else if (block_cnt == 1) {
17335 /* last single sgl with non-contiguous xri */
17336 if (sgl_size > SGL_PAGE_SIZE)
17337 pdma_phys_sgl1 =
17338 lpfc_ncmd->dma_phys_sgl +
17339 SGL_PAGE_SIZE;
17340 else
17341 pdma_phys_sgl1 = 0;
17342 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17343 status = lpfc_sli4_post_sgl(
17344 phba, lpfc_ncmd->dma_phys_sgl,
17345 pdma_phys_sgl1, cur_xritag);
17346 if (status) {
17347 /* Post error. Buffer unavailable. */
17348 lpfc_ncmd->flags |=
17349 LPFC_SBUF_NOT_POSTED;
17350 } else {
17351 /* Post success. Bffer available. */
17352 lpfc_ncmd->flags &=
17353 ~LPFC_SBUF_NOT_POSTED;
17354 lpfc_ncmd->status = IOSTAT_SUCCESS;
17355 num_posted++;
17356 }
17357 /* success, put on NVME buffer sgl list */
17358 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17359 }
17360 }
17361
17362 /* continue until a nembed page worth of sgls */
17363 if (post_cnt == 0)
17364 continue;
17365
17366 /* post block of NVME buffer list sgls */
17367 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
17368 post_cnt);
17369
17370 /* don't reset xirtag due to hole in xri block */
17371 if (block_cnt == 0)
17372 last_xritag = NO_XRI;
17373
17374 /* reset NVME buffer post count for next round of posting */
17375 post_cnt = 0;
17376
17377 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
17378 while (!list_empty(&blck_nblist)) {
17379 list_remove_head(&blck_nblist, lpfc_ncmd,
17380 struct lpfc_io_buf, list);
17381 if (status) {
17382 /* Post error. Mark buffer unavailable. */
17383 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
17384 } else {
17385 /* Post success, Mark buffer available. */
17386 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
17387 lpfc_ncmd->status = IOSTAT_SUCCESS;
17388 num_posted++;
17389 }
17390 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17391 }
17392 }
17393 /* Push NVME buffers with sgl posted to the available list */
17394 lpfc_io_buf_replenish(phba, &nvme_nblist);
17395
17396 return num_posted;
17397 }
17398
17399 /**
17400 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
17401 * @phba: pointer to lpfc_hba struct that the frame was received on
17402 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17403 *
17404 * This function checks the fields in the @fc_hdr to see if the FC frame is a
17405 * valid type of frame that the LPFC driver will handle. This function will
17406 * return a zero if the frame is a valid frame or a non zero value when the
17407 * frame does not pass the check.
17408 **/
17409 static int
lpfc_fc_frame_check(struct lpfc_hba * phba,struct fc_frame_header * fc_hdr)17410 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17411 {
17412 /* make rctl_names static to save stack space */
17413 struct fc_vft_header *fc_vft_hdr;
17414 uint32_t *header = (uint32_t *) fc_hdr;
17415
17416 #define FC_RCTL_MDS_DIAGS 0xF4
17417
17418 switch (fc_hdr->fh_r_ctl) {
17419 case FC_RCTL_DD_UNCAT: /* uncategorized information */
17420 case FC_RCTL_DD_SOL_DATA: /* solicited data */
17421 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
17422 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
17423 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
17424 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
17425 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
17426 case FC_RCTL_DD_CMD_STATUS: /* command status */
17427 case FC_RCTL_ELS_REQ: /* extended link services request */
17428 case FC_RCTL_ELS_REP: /* extended link services reply */
17429 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
17430 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
17431 case FC_RCTL_BA_NOP: /* basic link service NOP */
17432 case FC_RCTL_BA_ABTS: /* basic link service abort */
17433 case FC_RCTL_BA_RMC: /* remove connection */
17434 case FC_RCTL_BA_ACC: /* basic accept */
17435 case FC_RCTL_BA_RJT: /* basic reject */
17436 case FC_RCTL_BA_PRMT:
17437 case FC_RCTL_ACK_1: /* acknowledge_1 */
17438 case FC_RCTL_ACK_0: /* acknowledge_0 */
17439 case FC_RCTL_P_RJT: /* port reject */
17440 case FC_RCTL_F_RJT: /* fabric reject */
17441 case FC_RCTL_P_BSY: /* port busy */
17442 case FC_RCTL_F_BSY: /* fabric busy to data frame */
17443 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
17444 case FC_RCTL_LCR: /* link credit reset */
17445 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
17446 case FC_RCTL_END: /* end */
17447 break;
17448 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
17449 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17450 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17451 return lpfc_fc_frame_check(phba, fc_hdr);
17452 default:
17453 goto drop;
17454 }
17455
17456 switch (fc_hdr->fh_type) {
17457 case FC_TYPE_BLS:
17458 case FC_TYPE_ELS:
17459 case FC_TYPE_FCP:
17460 case FC_TYPE_CT:
17461 case FC_TYPE_NVME:
17462 break;
17463 case FC_TYPE_IP:
17464 case FC_TYPE_ILS:
17465 default:
17466 goto drop;
17467 }
17468
17469 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
17470 "2538 Received frame rctl:x%x, type:x%x, "
17471 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
17472 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17473 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17474 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17475 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17476 be32_to_cpu(header[6]));
17477 return 0;
17478 drop:
17479 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
17480 "2539 Dropped frame rctl:x%x type:x%x\n",
17481 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17482 return 1;
17483 }
17484
17485 /**
17486 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17487 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17488 *
17489 * This function processes the FC header to retrieve the VFI from the VF
17490 * header, if one exists. This function will return the VFI if one exists
17491 * or 0 if no VSAN Header exists.
17492 **/
17493 static uint32_t
lpfc_fc_hdr_get_vfi(struct fc_frame_header * fc_hdr)17494 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17495 {
17496 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17497
17498 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17499 return 0;
17500 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17501 }
17502
17503 /**
17504 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17505 * @phba: Pointer to the HBA structure to search for the vport on
17506 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17507 * @fcfi: The FC Fabric ID that the frame came from
17508 * @did: Destination ID to match against
17509 *
17510 * This function searches the @phba for a vport that matches the content of the
17511 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17512 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17513 * returns the matching vport pointer or NULL if unable to match frame to a
17514 * vport.
17515 **/
17516 static struct lpfc_vport *
lpfc_fc_frame_to_vport(struct lpfc_hba * phba,struct fc_frame_header * fc_hdr,uint16_t fcfi,uint32_t did)17517 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17518 uint16_t fcfi, uint32_t did)
17519 {
17520 struct lpfc_vport **vports;
17521 struct lpfc_vport *vport = NULL;
17522 int i;
17523
17524 if (did == Fabric_DID)
17525 return phba->pport;
17526 if ((phba->pport->fc_flag & FC_PT2PT) &&
17527 !(phba->link_state == LPFC_HBA_READY))
17528 return phba->pport;
17529
17530 vports = lpfc_create_vport_work_array(phba);
17531 if (vports != NULL) {
17532 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17533 if (phba->fcf.fcfi == fcfi &&
17534 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17535 vports[i]->fc_myDID == did) {
17536 vport = vports[i];
17537 break;
17538 }
17539 }
17540 }
17541 lpfc_destroy_vport_work_array(phba, vports);
17542 return vport;
17543 }
17544
17545 /**
17546 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17547 * @vport: The vport to work on.
17548 *
17549 * This function updates the receive sequence time stamp for this vport. The
17550 * receive sequence time stamp indicates the time that the last frame of the
17551 * the sequence that has been idle for the longest amount of time was received.
17552 * the driver uses this time stamp to indicate if any received sequences have
17553 * timed out.
17554 **/
17555 static void
lpfc_update_rcv_time_stamp(struct lpfc_vport * vport)17556 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17557 {
17558 struct lpfc_dmabuf *h_buf;
17559 struct hbq_dmabuf *dmabuf = NULL;
17560
17561 /* get the oldest sequence on the rcv list */
17562 h_buf = list_get_first(&vport->rcv_buffer_list,
17563 struct lpfc_dmabuf, list);
17564 if (!h_buf)
17565 return;
17566 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17567 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17568 }
17569
17570 /**
17571 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17572 * @vport: The vport that the received sequences were sent to.
17573 *
17574 * This function cleans up all outstanding received sequences. This is called
17575 * by the driver when a link event or user action invalidates all the received
17576 * sequences.
17577 **/
17578 void
lpfc_cleanup_rcv_buffers(struct lpfc_vport * vport)17579 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17580 {
17581 struct lpfc_dmabuf *h_buf, *hnext;
17582 struct lpfc_dmabuf *d_buf, *dnext;
17583 struct hbq_dmabuf *dmabuf = NULL;
17584
17585 /* start with the oldest sequence on the rcv list */
17586 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17587 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17588 list_del_init(&dmabuf->hbuf.list);
17589 list_for_each_entry_safe(d_buf, dnext,
17590 &dmabuf->dbuf.list, list) {
17591 list_del_init(&d_buf->list);
17592 lpfc_in_buf_free(vport->phba, d_buf);
17593 }
17594 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17595 }
17596 }
17597
17598 /**
17599 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17600 * @vport: The vport that the received sequences were sent to.
17601 *
17602 * This function determines whether any received sequences have timed out by
17603 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17604 * indicates that there is at least one timed out sequence this routine will
17605 * go through the received sequences one at a time from most inactive to most
17606 * active to determine which ones need to be cleaned up. Once it has determined
17607 * that a sequence needs to be cleaned up it will simply free up the resources
17608 * without sending an abort.
17609 **/
17610 void
lpfc_rcv_seq_check_edtov(struct lpfc_vport * vport)17611 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17612 {
17613 struct lpfc_dmabuf *h_buf, *hnext;
17614 struct lpfc_dmabuf *d_buf, *dnext;
17615 struct hbq_dmabuf *dmabuf = NULL;
17616 unsigned long timeout;
17617 int abort_count = 0;
17618
17619 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17620 vport->rcv_buffer_time_stamp);
17621 if (list_empty(&vport->rcv_buffer_list) ||
17622 time_before(jiffies, timeout))
17623 return;
17624 /* start with the oldest sequence on the rcv list */
17625 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17626 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17627 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17628 dmabuf->time_stamp);
17629 if (time_before(jiffies, timeout))
17630 break;
17631 abort_count++;
17632 list_del_init(&dmabuf->hbuf.list);
17633 list_for_each_entry_safe(d_buf, dnext,
17634 &dmabuf->dbuf.list, list) {
17635 list_del_init(&d_buf->list);
17636 lpfc_in_buf_free(vport->phba, d_buf);
17637 }
17638 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17639 }
17640 if (abort_count)
17641 lpfc_update_rcv_time_stamp(vport);
17642 }
17643
17644 /**
17645 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17646 * @vport: pointer to a vitural port
17647 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17648 *
17649 * This function searches through the existing incomplete sequences that have
17650 * been sent to this @vport. If the frame matches one of the incomplete
17651 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17652 * make up that sequence. If no sequence is found that matches this frame then
17653 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17654 * This function returns a pointer to the first dmabuf in the sequence list that
17655 * the frame was linked to.
17656 **/
17657 static struct hbq_dmabuf *
lpfc_fc_frame_add(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)17658 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17659 {
17660 struct fc_frame_header *new_hdr;
17661 struct fc_frame_header *temp_hdr;
17662 struct lpfc_dmabuf *d_buf;
17663 struct lpfc_dmabuf *h_buf;
17664 struct hbq_dmabuf *seq_dmabuf = NULL;
17665 struct hbq_dmabuf *temp_dmabuf = NULL;
17666 uint8_t found = 0;
17667
17668 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17669 dmabuf->time_stamp = jiffies;
17670 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17671
17672 /* Use the hdr_buf to find the sequence that this frame belongs to */
17673 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17674 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17675 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17676 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17677 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17678 continue;
17679 /* found a pending sequence that matches this frame */
17680 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17681 break;
17682 }
17683 if (!seq_dmabuf) {
17684 /*
17685 * This indicates first frame received for this sequence.
17686 * Queue the buffer on the vport's rcv_buffer_list.
17687 */
17688 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17689 lpfc_update_rcv_time_stamp(vport);
17690 return dmabuf;
17691 }
17692 temp_hdr = seq_dmabuf->hbuf.virt;
17693 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17694 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17695 list_del_init(&seq_dmabuf->hbuf.list);
17696 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17697 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17698 lpfc_update_rcv_time_stamp(vport);
17699 return dmabuf;
17700 }
17701 /* move this sequence to the tail to indicate a young sequence */
17702 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17703 seq_dmabuf->time_stamp = jiffies;
17704 lpfc_update_rcv_time_stamp(vport);
17705 if (list_empty(&seq_dmabuf->dbuf.list)) {
17706 temp_hdr = dmabuf->hbuf.virt;
17707 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17708 return seq_dmabuf;
17709 }
17710 /* find the correct place in the sequence to insert this frame */
17711 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17712 while (!found) {
17713 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17714 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17715 /*
17716 * If the frame's sequence count is greater than the frame on
17717 * the list then insert the frame right after this frame
17718 */
17719 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17720 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17721 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17722 found = 1;
17723 break;
17724 }
17725
17726 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17727 break;
17728 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
17729 }
17730
17731 if (found)
17732 return seq_dmabuf;
17733 return NULL;
17734 }
17735
17736 /**
17737 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17738 * @vport: pointer to a vitural port
17739 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17740 *
17741 * This function tries to abort from the partially assembed sequence, described
17742 * by the information from basic abbort @dmabuf. It checks to see whether such
17743 * partially assembled sequence held by the driver. If so, it shall free up all
17744 * the frames from the partially assembled sequence.
17745 *
17746 * Return
17747 * true -- if there is matching partially assembled sequence present and all
17748 * the frames freed with the sequence;
17749 * false -- if there is no matching partially assembled sequence present so
17750 * nothing got aborted in the lower layer driver
17751 **/
17752 static bool
lpfc_sli4_abort_partial_seq(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)17753 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17754 struct hbq_dmabuf *dmabuf)
17755 {
17756 struct fc_frame_header *new_hdr;
17757 struct fc_frame_header *temp_hdr;
17758 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17759 struct hbq_dmabuf *seq_dmabuf = NULL;
17760
17761 /* Use the hdr_buf to find the sequence that matches this frame */
17762 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17763 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17764 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17765 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17766 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17767 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17768 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17769 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17770 continue;
17771 /* found a pending sequence that matches this frame */
17772 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17773 break;
17774 }
17775
17776 /* Free up all the frames from the partially assembled sequence */
17777 if (seq_dmabuf) {
17778 list_for_each_entry_safe(d_buf, n_buf,
17779 &seq_dmabuf->dbuf.list, list) {
17780 list_del_init(&d_buf->list);
17781 lpfc_in_buf_free(vport->phba, d_buf);
17782 }
17783 return true;
17784 }
17785 return false;
17786 }
17787
17788 /**
17789 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17790 * @vport: pointer to a vitural port
17791 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17792 *
17793 * This function tries to abort from the assembed sequence from upper level
17794 * protocol, described by the information from basic abbort @dmabuf. It
17795 * checks to see whether such pending context exists at upper level protocol.
17796 * If so, it shall clean up the pending context.
17797 *
17798 * Return
17799 * true -- if there is matching pending context of the sequence cleaned
17800 * at ulp;
17801 * false -- if there is no matching pending context of the sequence present
17802 * at ulp.
17803 **/
17804 static bool
lpfc_sli4_abort_ulp_seq(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)17805 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17806 {
17807 struct lpfc_hba *phba = vport->phba;
17808 int handled;
17809
17810 /* Accepting abort at ulp with SLI4 only */
17811 if (phba->sli_rev < LPFC_SLI_REV4)
17812 return false;
17813
17814 /* Register all caring upper level protocols to attend abort */
17815 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17816 if (handled)
17817 return true;
17818
17819 return false;
17820 }
17821
17822 /**
17823 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
17824 * @phba: Pointer to HBA context object.
17825 * @cmd_iocbq: pointer to the command iocbq structure.
17826 * @rsp_iocbq: pointer to the response iocbq structure.
17827 *
17828 * This function handles the sequence abort response iocb command complete
17829 * event. It properly releases the memory allocated to the sequence abort
17830 * accept iocb.
17831 **/
17832 static void
lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmd_iocbq,struct lpfc_iocbq * rsp_iocbq)17833 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
17834 struct lpfc_iocbq *cmd_iocbq,
17835 struct lpfc_iocbq *rsp_iocbq)
17836 {
17837 struct lpfc_nodelist *ndlp;
17838
17839 if (cmd_iocbq) {
17840 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17841 lpfc_nlp_put(ndlp);
17842 lpfc_sli_release_iocbq(phba, cmd_iocbq);
17843 }
17844
17845 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17846 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17847 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17848 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17849 rsp_iocbq->iocb.ulpStatus,
17850 rsp_iocbq->iocb.un.ulpWord[4]);
17851 }
17852
17853 /**
17854 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17855 * @phba: Pointer to HBA context object.
17856 * @xri: xri id in transaction.
17857 *
17858 * This function validates the xri maps to the known range of XRIs allocated an
17859 * used by the driver.
17860 **/
17861 uint16_t
lpfc_sli4_xri_inrange(struct lpfc_hba * phba,uint16_t xri)17862 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17863 uint16_t xri)
17864 {
17865 uint16_t i;
17866
17867 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17868 if (xri == phba->sli4_hba.xri_ids[i])
17869 return i;
17870 }
17871 return NO_XRI;
17872 }
17873
17874 /**
17875 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
17876 * @vport: pointer to a vitural port.
17877 * @fc_hdr: pointer to a FC frame header.
17878 * @aborted: was the partially assembled receive sequence successfully aborted
17879 *
17880 * This function sends a basic response to a previous unsol sequence abort
17881 * event after aborting the sequence handling.
17882 **/
17883 void
lpfc_sli4_seq_abort_rsp(struct lpfc_vport * vport,struct fc_frame_header * fc_hdr,bool aborted)17884 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17885 struct fc_frame_header *fc_hdr, bool aborted)
17886 {
17887 struct lpfc_hba *phba = vport->phba;
17888 struct lpfc_iocbq *ctiocb = NULL;
17889 struct lpfc_nodelist *ndlp;
17890 uint16_t oxid, rxid, xri, lxri;
17891 uint32_t sid, fctl;
17892 IOCB_t *icmd;
17893 int rc;
17894
17895 if (!lpfc_is_link_up(phba))
17896 return;
17897
17898 sid = sli4_sid_from_fc_hdr(fc_hdr);
17899 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17900 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17901
17902 ndlp = lpfc_findnode_did(vport, sid);
17903 if (!ndlp) {
17904 ndlp = lpfc_nlp_init(vport, sid);
17905 if (!ndlp) {
17906 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17907 "1268 Failed to allocate ndlp for "
17908 "oxid:x%x SID:x%x\n", oxid, sid);
17909 return;
17910 }
17911 /* Put ndlp onto pport node list */
17912 lpfc_enqueue_node(vport, ndlp);
17913 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17914 /* re-setup ndlp without removing from node list */
17915 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17916 if (!ndlp) {
17917 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17918 "3275 Failed to active ndlp found "
17919 "for oxid:x%x SID:x%x\n", oxid, sid);
17920 return;
17921 }
17922 }
17923
17924 /* Allocate buffer for rsp iocb */
17925 ctiocb = lpfc_sli_get_iocbq(phba);
17926 if (!ctiocb)
17927 return;
17928
17929 /* Extract the F_CTL field from FC_HDR */
17930 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17931
17932 icmd = &ctiocb->iocb;
17933 icmd->un.xseq64.bdl.bdeSize = 0;
17934 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17935 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17936 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17937 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17938
17939 /* Fill in the rest of iocb fields */
17940 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17941 icmd->ulpBdeCount = 0;
17942 icmd->ulpLe = 1;
17943 icmd->ulpClass = CLASS3;
17944 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17945 ctiocb->context1 = lpfc_nlp_get(ndlp);
17946
17947 ctiocb->vport = phba->pport;
17948 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17949 ctiocb->sli4_lxritag = NO_XRI;
17950 ctiocb->sli4_xritag = NO_XRI;
17951
17952 if (fctl & FC_FC_EX_CTX)
17953 /* Exchange responder sent the abort so we
17954 * own the oxid.
17955 */
17956 xri = oxid;
17957 else
17958 xri = rxid;
17959 lxri = lpfc_sli4_xri_inrange(phba, xri);
17960 if (lxri != NO_XRI)
17961 lpfc_set_rrq_active(phba, ndlp, lxri,
17962 (xri == oxid) ? rxid : oxid, 0);
17963 /* For BA_ABTS from exchange responder, if the logical xri with
17964 * the oxid maps to the FCP XRI range, the port no longer has
17965 * that exchange context, send a BLS_RJT. Override the IOCB for
17966 * a BA_RJT.
17967 */
17968 if ((fctl & FC_FC_EX_CTX) &&
17969 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17970 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17971 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17972 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17973 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17974 }
17975
17976 /* If BA_ABTS failed to abort a partially assembled receive sequence,
17977 * the driver no longer has that exchange, send a BLS_RJT. Override
17978 * the IOCB for a BA_RJT.
17979 */
17980 if (aborted == false) {
17981 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17982 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17983 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17984 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17985 }
17986
17987 if (fctl & FC_FC_EX_CTX) {
17988 /* ABTS sent by responder to CT exchange, construction
17989 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17990 * field and RX_ID from ABTS for RX_ID field.
17991 */
17992 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17993 } else {
17994 /* ABTS sent by initiator to CT exchange, construction
17995 * of BA_ACC will need to allocate a new XRI as for the
17996 * XRI_TAG field.
17997 */
17998 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17999 }
18000 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
18001 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
18002
18003 /* Xmit CT abts response on exchange <xid> */
18004 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
18005 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
18006 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
18007
18008 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
18009 if (rc == IOCB_ERROR) {
18010 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18011 "2925 Failed to issue CT ABTS RSP x%x on "
18012 "xri x%x, Data x%x\n",
18013 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
18014 phba->link_state);
18015 lpfc_nlp_put(ndlp);
18016 ctiocb->context1 = NULL;
18017 lpfc_sli_release_iocbq(phba, ctiocb);
18018 }
18019 }
18020
18021 /**
18022 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
18023 * @vport: Pointer to the vport on which this sequence was received
18024 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18025 *
18026 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
18027 * receive sequence is only partially assembed by the driver, it shall abort
18028 * the partially assembled frames for the sequence. Otherwise, if the
18029 * unsolicited receive sequence has been completely assembled and passed to
18030 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
18031 * unsolicited sequence has been aborted. After that, it will issue a basic
18032 * accept to accept the abort.
18033 **/
18034 static void
lpfc_sli4_handle_unsol_abort(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)18035 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
18036 struct hbq_dmabuf *dmabuf)
18037 {
18038 struct lpfc_hba *phba = vport->phba;
18039 struct fc_frame_header fc_hdr;
18040 uint32_t fctl;
18041 bool aborted;
18042
18043 /* Make a copy of fc_hdr before the dmabuf being released */
18044 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
18045 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
18046
18047 if (fctl & FC_FC_EX_CTX) {
18048 /* ABTS by responder to exchange, no cleanup needed */
18049 aborted = true;
18050 } else {
18051 /* ABTS by initiator to exchange, need to do cleanup */
18052 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
18053 if (aborted == false)
18054 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
18055 }
18056 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18057
18058 if (phba->nvmet_support) {
18059 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
18060 return;
18061 }
18062
18063 /* Respond with BA_ACC or BA_RJT accordingly */
18064 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
18065 }
18066
18067 /**
18068 * lpfc_seq_complete - Indicates if a sequence is complete
18069 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18070 *
18071 * This function checks the sequence, starting with the frame described by
18072 * @dmabuf, to see if all the frames associated with this sequence are present.
18073 * the frames associated with this sequence are linked to the @dmabuf using the
18074 * dbuf list. This function looks for two major things. 1) That the first frame
18075 * has a sequence count of zero. 2) There is a frame with last frame of sequence
18076 * set. 3) That there are no holes in the sequence count. The function will
18077 * return 1 when the sequence is complete, otherwise it will return 0.
18078 **/
18079 static int
lpfc_seq_complete(struct hbq_dmabuf * dmabuf)18080 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
18081 {
18082 struct fc_frame_header *hdr;
18083 struct lpfc_dmabuf *d_buf;
18084 struct hbq_dmabuf *seq_dmabuf;
18085 uint32_t fctl;
18086 int seq_count = 0;
18087
18088 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18089 /* make sure first fame of sequence has a sequence count of zero */
18090 if (hdr->fh_seq_cnt != seq_count)
18091 return 0;
18092 fctl = (hdr->fh_f_ctl[0] << 16 |
18093 hdr->fh_f_ctl[1] << 8 |
18094 hdr->fh_f_ctl[2]);
18095 /* If last frame of sequence we can return success. */
18096 if (fctl & FC_FC_END_SEQ)
18097 return 1;
18098 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
18099 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18100 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18101 /* If there is a hole in the sequence count then fail. */
18102 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
18103 return 0;
18104 fctl = (hdr->fh_f_ctl[0] << 16 |
18105 hdr->fh_f_ctl[1] << 8 |
18106 hdr->fh_f_ctl[2]);
18107 /* If last frame of sequence we can return success. */
18108 if (fctl & FC_FC_END_SEQ)
18109 return 1;
18110 }
18111 return 0;
18112 }
18113
18114 /**
18115 * lpfc_prep_seq - Prep sequence for ULP processing
18116 * @vport: Pointer to the vport on which this sequence was received
18117 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
18118 *
18119 * This function takes a sequence, described by a list of frames, and creates
18120 * a list of iocbq structures to describe the sequence. This iocbq list will be
18121 * used to issue to the generic unsolicited sequence handler. This routine
18122 * returns a pointer to the first iocbq in the list. If the function is unable
18123 * to allocate an iocbq then it throw out the received frames that were not
18124 * able to be described and return a pointer to the first iocbq. If unable to
18125 * allocate any iocbqs (including the first) this function will return NULL.
18126 **/
18127 static struct lpfc_iocbq *
lpfc_prep_seq(struct lpfc_vport * vport,struct hbq_dmabuf * seq_dmabuf)18128 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
18129 {
18130 struct hbq_dmabuf *hbq_buf;
18131 struct lpfc_dmabuf *d_buf, *n_buf;
18132 struct lpfc_iocbq *first_iocbq, *iocbq;
18133 struct fc_frame_header *fc_hdr;
18134 uint32_t sid;
18135 uint32_t len, tot_len;
18136 struct ulp_bde64 *pbde;
18137
18138 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18139 /* remove from receive buffer list */
18140 list_del_init(&seq_dmabuf->hbuf.list);
18141 lpfc_update_rcv_time_stamp(vport);
18142 /* get the Remote Port's SID */
18143 sid = sli4_sid_from_fc_hdr(fc_hdr);
18144 tot_len = 0;
18145 /* Get an iocbq struct to fill in. */
18146 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
18147 if (first_iocbq) {
18148 /* Initialize the first IOCB. */
18149 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
18150 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
18151 first_iocbq->vport = vport;
18152
18153 /* Check FC Header to see what TYPE of frame we are rcv'ing */
18154 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
18155 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
18156 first_iocbq->iocb.un.rcvels.parmRo =
18157 sli4_did_from_fc_hdr(fc_hdr);
18158 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
18159 } else
18160 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
18161 first_iocbq->iocb.ulpContext = NO_XRI;
18162 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
18163 be16_to_cpu(fc_hdr->fh_ox_id);
18164 /* iocbq is prepped for internal consumption. Physical vpi. */
18165 first_iocbq->iocb.unsli3.rcvsli3.vpi =
18166 vport->phba->vpi_ids[vport->vpi];
18167 /* put the first buffer into the first IOCBq */
18168 tot_len = bf_get(lpfc_rcqe_length,
18169 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
18170
18171 first_iocbq->context2 = &seq_dmabuf->dbuf;
18172 first_iocbq->context3 = NULL;
18173 first_iocbq->iocb.ulpBdeCount = 1;
18174 if (tot_len > LPFC_DATA_BUF_SIZE)
18175 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18176 LPFC_DATA_BUF_SIZE;
18177 else
18178 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
18179
18180 first_iocbq->iocb.un.rcvels.remoteID = sid;
18181
18182 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18183 }
18184 iocbq = first_iocbq;
18185 /*
18186 * Each IOCBq can have two Buffers assigned, so go through the list
18187 * of buffers for this sequence and save two buffers in each IOCBq
18188 */
18189 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
18190 if (!iocbq) {
18191 lpfc_in_buf_free(vport->phba, d_buf);
18192 continue;
18193 }
18194 if (!iocbq->context3) {
18195 iocbq->context3 = d_buf;
18196 iocbq->iocb.ulpBdeCount++;
18197 /* We need to get the size out of the right CQE */
18198 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18199 len = bf_get(lpfc_rcqe_length,
18200 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18201 pbde = (struct ulp_bde64 *)
18202 &iocbq->iocb.unsli3.sli3Words[4];
18203 if (len > LPFC_DATA_BUF_SIZE)
18204 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
18205 else
18206 pbde->tus.f.bdeSize = len;
18207
18208 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
18209 tot_len += len;
18210 } else {
18211 iocbq = lpfc_sli_get_iocbq(vport->phba);
18212 if (!iocbq) {
18213 if (first_iocbq) {
18214 first_iocbq->iocb.ulpStatus =
18215 IOSTAT_FCP_RSP_ERROR;
18216 first_iocbq->iocb.un.ulpWord[4] =
18217 IOERR_NO_RESOURCES;
18218 }
18219 lpfc_in_buf_free(vport->phba, d_buf);
18220 continue;
18221 }
18222 /* We need to get the size out of the right CQE */
18223 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18224 len = bf_get(lpfc_rcqe_length,
18225 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18226 iocbq->context2 = d_buf;
18227 iocbq->context3 = NULL;
18228 iocbq->iocb.ulpBdeCount = 1;
18229 if (len > LPFC_DATA_BUF_SIZE)
18230 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18231 LPFC_DATA_BUF_SIZE;
18232 else
18233 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
18234
18235 tot_len += len;
18236 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18237
18238 iocbq->iocb.un.rcvels.remoteID = sid;
18239 list_add_tail(&iocbq->list, &first_iocbq->list);
18240 }
18241 }
18242 /* Free the sequence's header buffer */
18243 if (!first_iocbq)
18244 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
18245
18246 return first_iocbq;
18247 }
18248
18249 static void
lpfc_sli4_send_seq_to_ulp(struct lpfc_vport * vport,struct hbq_dmabuf * seq_dmabuf)18250 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
18251 struct hbq_dmabuf *seq_dmabuf)
18252 {
18253 struct fc_frame_header *fc_hdr;
18254 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
18255 struct lpfc_hba *phba = vport->phba;
18256
18257 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18258 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
18259 if (!iocbq) {
18260 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18261 "2707 Ring %d handler: Failed to allocate "
18262 "iocb Rctl x%x Type x%x received\n",
18263 LPFC_ELS_RING,
18264 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18265 return;
18266 }
18267 if (!lpfc_complete_unsol_iocb(phba,
18268 phba->sli4_hba.els_wq->pring,
18269 iocbq, fc_hdr->fh_r_ctl,
18270 fc_hdr->fh_type))
18271 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18272 "2540 Ring %d handler: unexpected Rctl "
18273 "x%x Type x%x received\n",
18274 LPFC_ELS_RING,
18275 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18276
18277 /* Free iocb created in lpfc_prep_seq */
18278 list_for_each_entry_safe(curr_iocb, next_iocb,
18279 &iocbq->list, list) {
18280 list_del_init(&curr_iocb->list);
18281 lpfc_sli_release_iocbq(phba, curr_iocb);
18282 }
18283 lpfc_sli_release_iocbq(phba, iocbq);
18284 }
18285
18286 static void
lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)18287 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
18288 struct lpfc_iocbq *rspiocb)
18289 {
18290 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
18291
18292 if (pcmd && pcmd->virt)
18293 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18294 kfree(pcmd);
18295 lpfc_sli_release_iocbq(phba, cmdiocb);
18296 lpfc_drain_txq(phba);
18297 }
18298
18299 static void
lpfc_sli4_handle_mds_loopback(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)18300 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
18301 struct hbq_dmabuf *dmabuf)
18302 {
18303 struct fc_frame_header *fc_hdr;
18304 struct lpfc_hba *phba = vport->phba;
18305 struct lpfc_iocbq *iocbq = NULL;
18306 union lpfc_wqe *wqe;
18307 struct lpfc_dmabuf *pcmd = NULL;
18308 uint32_t frame_len;
18309 int rc;
18310 unsigned long iflags;
18311
18312 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18313 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
18314
18315 /* Send the received frame back */
18316 iocbq = lpfc_sli_get_iocbq(phba);
18317 if (!iocbq) {
18318 /* Queue cq event and wakeup worker thread to process it */
18319 spin_lock_irqsave(&phba->hbalock, iflags);
18320 list_add_tail(&dmabuf->cq_event.list,
18321 &phba->sli4_hba.sp_queue_event);
18322 phba->hba_flag |= HBA_SP_QUEUE_EVT;
18323 spin_unlock_irqrestore(&phba->hbalock, iflags);
18324 lpfc_worker_wake_up(phba);
18325 return;
18326 }
18327
18328 /* Allocate buffer for command payload */
18329 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
18330 if (pcmd)
18331 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
18332 &pcmd->phys);
18333 if (!pcmd || !pcmd->virt)
18334 goto exit;
18335
18336 INIT_LIST_HEAD(&pcmd->list);
18337
18338 /* copyin the payload */
18339 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
18340
18341 /* fill in BDE's for command */
18342 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
18343 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
18344 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
18345 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
18346
18347 iocbq->context2 = pcmd;
18348 iocbq->vport = vport;
18349 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
18350 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
18351
18352 /*
18353 * Setup rest of the iocb as though it were a WQE
18354 * Build the SEND_FRAME WQE
18355 */
18356 wqe = (union lpfc_wqe *)&iocbq->iocb;
18357
18358 wqe->send_frame.frame_len = frame_len;
18359 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
18360 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
18361 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
18362 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
18363 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
18364 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
18365
18366 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
18367 iocbq->iocb.ulpLe = 1;
18368 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
18369 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
18370 if (rc == IOCB_ERROR)
18371 goto exit;
18372
18373 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18374 return;
18375
18376 exit:
18377 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18378 "2023 Unable to process MDS loopback frame\n");
18379 if (pcmd && pcmd->virt)
18380 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18381 kfree(pcmd);
18382 if (iocbq)
18383 lpfc_sli_release_iocbq(phba, iocbq);
18384 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18385 }
18386
18387 /**
18388 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
18389 * @phba: Pointer to HBA context object.
18390 * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
18391 *
18392 * This function is called with no lock held. This function processes all
18393 * the received buffers and gives it to upper layers when a received buffer
18394 * indicates that it is the final frame in the sequence. The interrupt
18395 * service routine processes received buffers at interrupt contexts.
18396 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
18397 * appropriate receive function when the final frame in a sequence is received.
18398 **/
18399 void
lpfc_sli4_handle_received_buffer(struct lpfc_hba * phba,struct hbq_dmabuf * dmabuf)18400 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18401 struct hbq_dmabuf *dmabuf)
18402 {
18403 struct hbq_dmabuf *seq_dmabuf;
18404 struct fc_frame_header *fc_hdr;
18405 struct lpfc_vport *vport;
18406 uint32_t fcfi;
18407 uint32_t did;
18408
18409 /* Process each received buffer */
18410 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18411
18412 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18413 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18414 vport = phba->pport;
18415 /* Handle MDS Loopback frames */
18416 if (!(phba->pport->load_flag & FC_UNLOADING))
18417 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18418 else
18419 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18420 return;
18421 }
18422
18423 /* check to see if this a valid type of frame */
18424 if (lpfc_fc_frame_check(phba, fc_hdr)) {
18425 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18426 return;
18427 }
18428
18429 if ((bf_get(lpfc_cqe_code,
18430 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18431 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18432 &dmabuf->cq_event.cqe.rcqe_cmpl);
18433 else
18434 fcfi = bf_get(lpfc_rcqe_fcf_id,
18435 &dmabuf->cq_event.cqe.rcqe_cmpl);
18436
18437 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
18438 vport = phba->pport;
18439 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18440 "2023 MDS Loopback %d bytes\n",
18441 bf_get(lpfc_rcqe_length,
18442 &dmabuf->cq_event.cqe.rcqe_cmpl));
18443 /* Handle MDS Loopback frames */
18444 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18445 return;
18446 }
18447
18448 /* d_id this frame is directed to */
18449 did = sli4_did_from_fc_hdr(fc_hdr);
18450
18451 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
18452 if (!vport) {
18453 /* throw out the frame */
18454 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18455 return;
18456 }
18457
18458 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
18459 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18460 (did != Fabric_DID)) {
18461 /*
18462 * Throw out the frame if we are not pt2pt.
18463 * The pt2pt protocol allows for discovery frames
18464 * to be received without a registered VPI.
18465 */
18466 if (!(vport->fc_flag & FC_PT2PT) ||
18467 (phba->link_state == LPFC_HBA_READY)) {
18468 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18469 return;
18470 }
18471 }
18472
18473 /* Handle the basic abort sequence (BA_ABTS) event */
18474 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18475 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18476 return;
18477 }
18478
18479 /* Link this frame */
18480 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18481 if (!seq_dmabuf) {
18482 /* unable to add frame to vport - throw it out */
18483 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18484 return;
18485 }
18486 /* If not last frame in sequence continue processing frames. */
18487 if (!lpfc_seq_complete(seq_dmabuf))
18488 return;
18489
18490 /* Send the complete sequence to the upper layer protocol */
18491 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
18492 }
18493
18494 /**
18495 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18496 * @phba: pointer to lpfc hba data structure.
18497 *
18498 * This routine is invoked to post rpi header templates to the
18499 * HBA consistent with the SLI-4 interface spec. This routine
18500 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18501 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18502 *
18503 * This routine does not require any locks. It's usage is expected
18504 * to be driver load or reset recovery when the driver is
18505 * sequential.
18506 *
18507 * Return codes
18508 * 0 - successful
18509 * -EIO - The mailbox failed to complete successfully.
18510 * When this error occurs, the driver is not guaranteed
18511 * to have any rpi regions posted to the device and
18512 * must either attempt to repost the regions or take a
18513 * fatal error.
18514 **/
18515 int
lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba * phba)18516 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18517 {
18518 struct lpfc_rpi_hdr *rpi_page;
18519 uint32_t rc = 0;
18520 uint16_t lrpi = 0;
18521
18522 /* SLI4 ports that support extents do not require RPI headers. */
18523 if (!phba->sli4_hba.rpi_hdrs_in_use)
18524 goto exit;
18525 if (phba->sli4_hba.extents_in_use)
18526 return -EIO;
18527
18528 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18529 /*
18530 * Assign the rpi headers a physical rpi only if the driver
18531 * has not initialized those resources. A port reset only
18532 * needs the headers posted.
18533 */
18534 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18535 LPFC_RPI_RSRC_RDY)
18536 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18537
18538 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18539 if (rc != MBX_SUCCESS) {
18540 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18541 "2008 Error %d posting all rpi "
18542 "headers\n", rc);
18543 rc = -EIO;
18544 break;
18545 }
18546 }
18547
18548 exit:
18549 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18550 LPFC_RPI_RSRC_RDY);
18551 return rc;
18552 }
18553
18554 /**
18555 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18556 * @phba: pointer to lpfc hba data structure.
18557 * @rpi_page: pointer to the rpi memory region.
18558 *
18559 * This routine is invoked to post a single rpi header to the
18560 * HBA consistent with the SLI-4 interface spec. This memory region
18561 * maps up to 64 rpi context regions.
18562 *
18563 * Return codes
18564 * 0 - successful
18565 * -ENOMEM - No available memory
18566 * -EIO - The mailbox failed to complete successfully.
18567 **/
18568 int
lpfc_sli4_post_rpi_hdr(struct lpfc_hba * phba,struct lpfc_rpi_hdr * rpi_page)18569 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18570 {
18571 LPFC_MBOXQ_t *mboxq;
18572 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18573 uint32_t rc = 0;
18574 uint32_t shdr_status, shdr_add_status;
18575 union lpfc_sli4_cfg_shdr *shdr;
18576
18577 /* SLI4 ports that support extents do not require RPI headers. */
18578 if (!phba->sli4_hba.rpi_hdrs_in_use)
18579 return rc;
18580 if (phba->sli4_hba.extents_in_use)
18581 return -EIO;
18582
18583 /* The port is notified of the header region via a mailbox command. */
18584 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18585 if (!mboxq) {
18586 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18587 "2001 Unable to allocate memory for issuing "
18588 "SLI_CONFIG_SPECIAL mailbox command\n");
18589 return -ENOMEM;
18590 }
18591
18592 /* Post all rpi memory regions to the port. */
18593 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18594 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18595 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18596 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18597 sizeof(struct lpfc_sli4_cfg_mhdr),
18598 LPFC_SLI4_MBX_EMBED);
18599
18600
18601 /* Post the physical rpi to the port for this rpi header. */
18602 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18603 rpi_page->start_rpi);
18604 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18605 hdr_tmpl, rpi_page->page_count);
18606
18607 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18608 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18609 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18610 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18611 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18612 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18613 mempool_free(mboxq, phba->mbox_mem_pool);
18614 if (shdr_status || shdr_add_status || rc) {
18615 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18616 "2514 POST_RPI_HDR mailbox failed with "
18617 "status x%x add_status x%x, mbx status x%x\n",
18618 shdr_status, shdr_add_status, rc);
18619 rc = -ENXIO;
18620 } else {
18621 /*
18622 * The next_rpi stores the next logical module-64 rpi value used
18623 * to post physical rpis in subsequent rpi postings.
18624 */
18625 spin_lock_irq(&phba->hbalock);
18626 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18627 spin_unlock_irq(&phba->hbalock);
18628 }
18629 return rc;
18630 }
18631
18632 /**
18633 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18634 * @phba: pointer to lpfc hba data structure.
18635 *
18636 * This routine is invoked to post rpi header templates to the
18637 * HBA consistent with the SLI-4 interface spec. This routine
18638 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18639 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18640 *
18641 * Returns
18642 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
18643 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18644 **/
18645 int
lpfc_sli4_alloc_rpi(struct lpfc_hba * phba)18646 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18647 {
18648 unsigned long rpi;
18649 uint16_t max_rpi, rpi_limit;
18650 uint16_t rpi_remaining, lrpi = 0;
18651 struct lpfc_rpi_hdr *rpi_hdr;
18652 unsigned long iflag;
18653
18654 /*
18655 * Fetch the next logical rpi. Because this index is logical,
18656 * the driver starts at 0 each time.
18657 */
18658 spin_lock_irqsave(&phba->hbalock, iflag);
18659 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18660 rpi_limit = phba->sli4_hba.next_rpi;
18661
18662 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18663 if (rpi >= rpi_limit)
18664 rpi = LPFC_RPI_ALLOC_ERROR;
18665 else {
18666 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18667 phba->sli4_hba.max_cfg_param.rpi_used++;
18668 phba->sli4_hba.rpi_count++;
18669 }
18670 lpfc_printf_log(phba, KERN_INFO,
18671 LOG_NODE | LOG_DISCOVERY,
18672 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
18673 (int) rpi, max_rpi, rpi_limit);
18674
18675 /*
18676 * Don't try to allocate more rpi header regions if the device limit
18677 * has been exhausted.
18678 */
18679 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18680 (phba->sli4_hba.rpi_count >= max_rpi)) {
18681 spin_unlock_irqrestore(&phba->hbalock, iflag);
18682 return rpi;
18683 }
18684
18685 /*
18686 * RPI header postings are not required for SLI4 ports capable of
18687 * extents.
18688 */
18689 if (!phba->sli4_hba.rpi_hdrs_in_use) {
18690 spin_unlock_irqrestore(&phba->hbalock, iflag);
18691 return rpi;
18692 }
18693
18694 /*
18695 * If the driver is running low on rpi resources, allocate another
18696 * page now. Note that the next_rpi value is used because
18697 * it represents how many are actually in use whereas max_rpi notes
18698 * how many are supported max by the device.
18699 */
18700 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18701 spin_unlock_irqrestore(&phba->hbalock, iflag);
18702 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18703 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18704 if (!rpi_hdr) {
18705 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18706 "2002 Error Could not grow rpi "
18707 "count\n");
18708 } else {
18709 lrpi = rpi_hdr->start_rpi;
18710 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18711 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18712 }
18713 }
18714
18715 return rpi;
18716 }
18717
18718 /**
18719 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18720 * @phba: pointer to lpfc hba data structure.
18721 * @rpi: rpi to free
18722 *
18723 * This routine is invoked to release an rpi to the pool of
18724 * available rpis maintained by the driver.
18725 **/
18726 static void
__lpfc_sli4_free_rpi(struct lpfc_hba * phba,int rpi)18727 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18728 {
18729 /*
18730 * if the rpi value indicates a prior unreg has already
18731 * been done, skip the unreg.
18732 */
18733 if (rpi == LPFC_RPI_ALLOC_ERROR)
18734 return;
18735
18736 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18737 phba->sli4_hba.rpi_count--;
18738 phba->sli4_hba.max_cfg_param.rpi_used--;
18739 } else {
18740 lpfc_printf_log(phba, KERN_INFO,
18741 LOG_NODE | LOG_DISCOVERY,
18742 "2016 rpi %x not inuse\n",
18743 rpi);
18744 }
18745 }
18746
18747 /**
18748 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18749 * @phba: pointer to lpfc hba data structure.
18750 * @rpi: rpi to free
18751 *
18752 * This routine is invoked to release an rpi to the pool of
18753 * available rpis maintained by the driver.
18754 **/
18755 void
lpfc_sli4_free_rpi(struct lpfc_hba * phba,int rpi)18756 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18757 {
18758 spin_lock_irq(&phba->hbalock);
18759 __lpfc_sli4_free_rpi(phba, rpi);
18760 spin_unlock_irq(&phba->hbalock);
18761 }
18762
18763 /**
18764 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18765 * @phba: pointer to lpfc hba data structure.
18766 *
18767 * This routine is invoked to remove the memory region that
18768 * provided rpi via a bitmask.
18769 **/
18770 void
lpfc_sli4_remove_rpis(struct lpfc_hba * phba)18771 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18772 {
18773 kfree(phba->sli4_hba.rpi_bmask);
18774 kfree(phba->sli4_hba.rpi_ids);
18775 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
18776 }
18777
18778 /**
18779 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18780 * @ndlp: pointer to lpfc nodelist data structure.
18781 * @cmpl: completion call-back.
18782 * @arg: data to load as MBox 'caller buffer information'
18783 *
18784 * This routine is invoked to remove the memory region that
18785 * provided rpi via a bitmask.
18786 **/
18787 int
lpfc_sli4_resume_rpi(struct lpfc_nodelist * ndlp,void (* cmpl)(struct lpfc_hba *,LPFC_MBOXQ_t *),void * arg)18788 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18789 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
18790 {
18791 LPFC_MBOXQ_t *mboxq;
18792 struct lpfc_hba *phba = ndlp->phba;
18793 int rc;
18794
18795 /* The port is notified of the header region via a mailbox command. */
18796 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18797 if (!mboxq)
18798 return -ENOMEM;
18799
18800 /* Post all rpi memory regions to the port. */
18801 lpfc_resume_rpi(mboxq, ndlp);
18802 if (cmpl) {
18803 mboxq->mbox_cmpl = cmpl;
18804 mboxq->ctx_buf = arg;
18805 mboxq->ctx_ndlp = ndlp;
18806 } else
18807 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18808 mboxq->vport = ndlp->vport;
18809 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18810 if (rc == MBX_NOT_FINISHED) {
18811 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18812 "2010 Resume RPI Mailbox failed "
18813 "status %d, mbxStatus x%x\n", rc,
18814 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18815 mempool_free(mboxq, phba->mbox_mem_pool);
18816 return -EIO;
18817 }
18818 return 0;
18819 }
18820
18821 /**
18822 * lpfc_sli4_init_vpi - Initialize a vpi with the port
18823 * @vport: Pointer to the vport for which the vpi is being initialized
18824 *
18825 * This routine is invoked to activate a vpi with the port.
18826 *
18827 * Returns:
18828 * 0 success
18829 * -Evalue otherwise
18830 **/
18831 int
lpfc_sli4_init_vpi(struct lpfc_vport * vport)18832 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
18833 {
18834 LPFC_MBOXQ_t *mboxq;
18835 int rc = 0;
18836 int retval = MBX_SUCCESS;
18837 uint32_t mbox_tmo;
18838 struct lpfc_hba *phba = vport->phba;
18839 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18840 if (!mboxq)
18841 return -ENOMEM;
18842 lpfc_init_vpi(phba, mboxq, vport->vpi);
18843 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
18844 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
18845 if (rc != MBX_SUCCESS) {
18846 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18847 "2022 INIT VPI Mailbox failed "
18848 "status %d, mbxStatus x%x\n", rc,
18849 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18850 retval = -EIO;
18851 }
18852 if (rc != MBX_TIMEOUT)
18853 mempool_free(mboxq, vport->phba->mbox_mem_pool);
18854
18855 return retval;
18856 }
18857
18858 /**
18859 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18860 * @phba: pointer to lpfc hba data structure.
18861 * @mboxq: Pointer to mailbox object.
18862 *
18863 * This routine is invoked to manually add a single FCF record. The caller
18864 * must pass a completely initialized FCF_Record. This routine takes
18865 * care of the nonembedded mailbox operations.
18866 **/
18867 static void
lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)18868 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18869 {
18870 void *virt_addr;
18871 union lpfc_sli4_cfg_shdr *shdr;
18872 uint32_t shdr_status, shdr_add_status;
18873
18874 virt_addr = mboxq->sge_array->addr[0];
18875 /* The IOCTL status is embedded in the mailbox subheader. */
18876 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18877 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18878 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18879
18880 if ((shdr_status || shdr_add_status) &&
18881 (shdr_status != STATUS_FCF_IN_USE))
18882 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18883 "2558 ADD_FCF_RECORD mailbox failed with "
18884 "status x%x add_status x%x\n",
18885 shdr_status, shdr_add_status);
18886
18887 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18888 }
18889
18890 /**
18891 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18892 * @phba: pointer to lpfc hba data structure.
18893 * @fcf_record: pointer to the initialized fcf record to add.
18894 *
18895 * This routine is invoked to manually add a single FCF record. The caller
18896 * must pass a completely initialized FCF_Record. This routine takes
18897 * care of the nonembedded mailbox operations.
18898 **/
18899 int
lpfc_sli4_add_fcf_record(struct lpfc_hba * phba,struct fcf_record * fcf_record)18900 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18901 {
18902 int rc = 0;
18903 LPFC_MBOXQ_t *mboxq;
18904 uint8_t *bytep;
18905 void *virt_addr;
18906 struct lpfc_mbx_sge sge;
18907 uint32_t alloc_len, req_len;
18908 uint32_t fcfindex;
18909
18910 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18911 if (!mboxq) {
18912 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18913 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18914 return -ENOMEM;
18915 }
18916
18917 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18918 sizeof(uint32_t);
18919
18920 /* Allocate DMA memory and set up the non-embedded mailbox command */
18921 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18922 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18923 req_len, LPFC_SLI4_MBX_NEMBED);
18924 if (alloc_len < req_len) {
18925 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18926 "2523 Allocated DMA memory size (x%x) is "
18927 "less than the requested DMA memory "
18928 "size (x%x)\n", alloc_len, req_len);
18929 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18930 return -ENOMEM;
18931 }
18932
18933 /*
18934 * Get the first SGE entry from the non-embedded DMA memory. This
18935 * routine only uses a single SGE.
18936 */
18937 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18938 virt_addr = mboxq->sge_array->addr[0];
18939 /*
18940 * Configure the FCF record for FCFI 0. This is the driver's
18941 * hardcoded default and gets used in nonFIP mode.
18942 */
18943 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18944 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18945 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18946
18947 /*
18948 * Copy the fcf_index and the FCF Record Data. The data starts after
18949 * the FCoE header plus word10. The data copy needs to be endian
18950 * correct.
18951 */
18952 bytep += sizeof(uint32_t);
18953 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18954 mboxq->vport = phba->pport;
18955 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18956 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18957 if (rc == MBX_NOT_FINISHED) {
18958 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18959 "2515 ADD_FCF_RECORD mailbox failed with "
18960 "status 0x%x\n", rc);
18961 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18962 rc = -EIO;
18963 } else
18964 rc = 0;
18965
18966 return rc;
18967 }
18968
18969 /**
18970 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18971 * @phba: pointer to lpfc hba data structure.
18972 * @fcf_record: pointer to the fcf record to write the default data.
18973 * @fcf_index: FCF table entry index.
18974 *
18975 * This routine is invoked to build the driver's default FCF record. The
18976 * values used are hardcoded. This routine handles memory initialization.
18977 *
18978 **/
18979 void
lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba * phba,struct fcf_record * fcf_record,uint16_t fcf_index)18980 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18981 struct fcf_record *fcf_record,
18982 uint16_t fcf_index)
18983 {
18984 memset(fcf_record, 0, sizeof(struct fcf_record));
18985 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18986 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18987 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18988 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18989 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18990 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18991 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18992 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18993 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18994 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18995 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18996 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18997 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18998 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18999 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
19000 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
19001 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
19002 /* Set the VLAN bit map */
19003 if (phba->valid_vlan) {
19004 fcf_record->vlan_bitmap[phba->vlan_id / 8]
19005 = 1 << (phba->vlan_id % 8);
19006 }
19007 }
19008
19009 /**
19010 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
19011 * @phba: pointer to lpfc hba data structure.
19012 * @fcf_index: FCF table entry offset.
19013 *
19014 * This routine is invoked to scan the entire FCF table by reading FCF
19015 * record and processing it one at a time starting from the @fcf_index
19016 * for initial FCF discovery or fast FCF failover rediscovery.
19017 *
19018 * Return 0 if the mailbox command is submitted successfully, none 0
19019 * otherwise.
19020 **/
19021 int
lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba * phba,uint16_t fcf_index)19022 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19023 {
19024 int rc = 0, error;
19025 LPFC_MBOXQ_t *mboxq;
19026
19027 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
19028 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
19029 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19030 if (!mboxq) {
19031 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19032 "2000 Failed to allocate mbox for "
19033 "READ_FCF cmd\n");
19034 error = -ENOMEM;
19035 goto fail_fcf_scan;
19036 }
19037 /* Construct the read FCF record mailbox command */
19038 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19039 if (rc) {
19040 error = -EINVAL;
19041 goto fail_fcf_scan;
19042 }
19043 /* Issue the mailbox command asynchronously */
19044 mboxq->vport = phba->pport;
19045 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
19046
19047 spin_lock_irq(&phba->hbalock);
19048 phba->hba_flag |= FCF_TS_INPROG;
19049 spin_unlock_irq(&phba->hbalock);
19050
19051 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19052 if (rc == MBX_NOT_FINISHED)
19053 error = -EIO;
19054 else {
19055 /* Reset eligible FCF count for new scan */
19056 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
19057 phba->fcf.eligible_fcf_cnt = 0;
19058 error = 0;
19059 }
19060 fail_fcf_scan:
19061 if (error) {
19062 if (mboxq)
19063 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19064 /* FCF scan failed, clear FCF_TS_INPROG flag */
19065 spin_lock_irq(&phba->hbalock);
19066 phba->hba_flag &= ~FCF_TS_INPROG;
19067 spin_unlock_irq(&phba->hbalock);
19068 }
19069 return error;
19070 }
19071
19072 /**
19073 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
19074 * @phba: pointer to lpfc hba data structure.
19075 * @fcf_index: FCF table entry offset.
19076 *
19077 * This routine is invoked to read an FCF record indicated by @fcf_index
19078 * and to use it for FLOGI roundrobin FCF failover.
19079 *
19080 * Return 0 if the mailbox command is submitted successfully, none 0
19081 * otherwise.
19082 **/
19083 int
lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba * phba,uint16_t fcf_index)19084 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19085 {
19086 int rc = 0, error;
19087 LPFC_MBOXQ_t *mboxq;
19088
19089 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19090 if (!mboxq) {
19091 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19092 "2763 Failed to allocate mbox for "
19093 "READ_FCF cmd\n");
19094 error = -ENOMEM;
19095 goto fail_fcf_read;
19096 }
19097 /* Construct the read FCF record mailbox command */
19098 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19099 if (rc) {
19100 error = -EINVAL;
19101 goto fail_fcf_read;
19102 }
19103 /* Issue the mailbox command asynchronously */
19104 mboxq->vport = phba->pport;
19105 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
19106 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19107 if (rc == MBX_NOT_FINISHED)
19108 error = -EIO;
19109 else
19110 error = 0;
19111
19112 fail_fcf_read:
19113 if (error && mboxq)
19114 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19115 return error;
19116 }
19117
19118 /**
19119 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
19120 * @phba: pointer to lpfc hba data structure.
19121 * @fcf_index: FCF table entry offset.
19122 *
19123 * This routine is invoked to read an FCF record indicated by @fcf_index to
19124 * determine whether it's eligible for FLOGI roundrobin failover list.
19125 *
19126 * Return 0 if the mailbox command is submitted successfully, none 0
19127 * otherwise.
19128 **/
19129 int
lpfc_sli4_read_fcf_rec(struct lpfc_hba * phba,uint16_t fcf_index)19130 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19131 {
19132 int rc = 0, error;
19133 LPFC_MBOXQ_t *mboxq;
19134
19135 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19136 if (!mboxq) {
19137 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19138 "2758 Failed to allocate mbox for "
19139 "READ_FCF cmd\n");
19140 error = -ENOMEM;
19141 goto fail_fcf_read;
19142 }
19143 /* Construct the read FCF record mailbox command */
19144 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19145 if (rc) {
19146 error = -EINVAL;
19147 goto fail_fcf_read;
19148 }
19149 /* Issue the mailbox command asynchronously */
19150 mboxq->vport = phba->pport;
19151 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
19152 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19153 if (rc == MBX_NOT_FINISHED)
19154 error = -EIO;
19155 else
19156 error = 0;
19157
19158 fail_fcf_read:
19159 if (error && mboxq)
19160 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19161 return error;
19162 }
19163
19164 /**
19165 * lpfc_check_next_fcf_pri_level
19166 * @phba: pointer to the lpfc_hba struct for this port.
19167 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
19168 * routine when the rr_bmask is empty. The FCF indecies are put into the
19169 * rr_bmask based on their priority level. Starting from the highest priority
19170 * to the lowest. The most likely FCF candidate will be in the highest
19171 * priority group. When this routine is called it searches the fcf_pri list for
19172 * next lowest priority group and repopulates the rr_bmask with only those
19173 * fcf_indexes.
19174 * returns:
19175 * 1=success 0=failure
19176 **/
19177 static int
lpfc_check_next_fcf_pri_level(struct lpfc_hba * phba)19178 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
19179 {
19180 uint16_t next_fcf_pri;
19181 uint16_t last_index;
19182 struct lpfc_fcf_pri *fcf_pri;
19183 int rc;
19184 int ret = 0;
19185
19186 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
19187 LPFC_SLI4_FCF_TBL_INDX_MAX);
19188 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19189 "3060 Last IDX %d\n", last_index);
19190
19191 /* Verify the priority list has 2 or more entries */
19192 spin_lock_irq(&phba->hbalock);
19193 if (list_empty(&phba->fcf.fcf_pri_list) ||
19194 list_is_singular(&phba->fcf.fcf_pri_list)) {
19195 spin_unlock_irq(&phba->hbalock);
19196 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19197 "3061 Last IDX %d\n", last_index);
19198 return 0; /* Empty rr list */
19199 }
19200 spin_unlock_irq(&phba->hbalock);
19201
19202 next_fcf_pri = 0;
19203 /*
19204 * Clear the rr_bmask and set all of the bits that are at this
19205 * priority.
19206 */
19207 memset(phba->fcf.fcf_rr_bmask, 0,
19208 sizeof(*phba->fcf.fcf_rr_bmask));
19209 spin_lock_irq(&phba->hbalock);
19210 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19211 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
19212 continue;
19213 /*
19214 * the 1st priority that has not FLOGI failed
19215 * will be the highest.
19216 */
19217 if (!next_fcf_pri)
19218 next_fcf_pri = fcf_pri->fcf_rec.priority;
19219 spin_unlock_irq(&phba->hbalock);
19220 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19221 rc = lpfc_sli4_fcf_rr_index_set(phba,
19222 fcf_pri->fcf_rec.fcf_index);
19223 if (rc)
19224 return 0;
19225 }
19226 spin_lock_irq(&phba->hbalock);
19227 }
19228 /*
19229 * if next_fcf_pri was not set above and the list is not empty then
19230 * we have failed flogis on all of them. So reset flogi failed
19231 * and start at the beginning.
19232 */
19233 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
19234 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19235 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
19236 /*
19237 * the 1st priority that has not FLOGI failed
19238 * will be the highest.
19239 */
19240 if (!next_fcf_pri)
19241 next_fcf_pri = fcf_pri->fcf_rec.priority;
19242 spin_unlock_irq(&phba->hbalock);
19243 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19244 rc = lpfc_sli4_fcf_rr_index_set(phba,
19245 fcf_pri->fcf_rec.fcf_index);
19246 if (rc)
19247 return 0;
19248 }
19249 spin_lock_irq(&phba->hbalock);
19250 }
19251 } else
19252 ret = 1;
19253 spin_unlock_irq(&phba->hbalock);
19254
19255 return ret;
19256 }
19257 /**
19258 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
19259 * @phba: pointer to lpfc hba data structure.
19260 *
19261 * This routine is to get the next eligible FCF record index in a round
19262 * robin fashion. If the next eligible FCF record index equals to the
19263 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
19264 * shall be returned, otherwise, the next eligible FCF record's index
19265 * shall be returned.
19266 **/
19267 uint16_t
lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba * phba)19268 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
19269 {
19270 uint16_t next_fcf_index;
19271
19272 initial_priority:
19273 /* Search start from next bit of currently registered FCF index */
19274 next_fcf_index = phba->fcf.current_rec.fcf_indx;
19275
19276 next_priority:
19277 /* Determine the next fcf index to check */
19278 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
19279 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19280 LPFC_SLI4_FCF_TBL_INDX_MAX,
19281 next_fcf_index);
19282
19283 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
19284 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19285 /*
19286 * If we have wrapped then we need to clear the bits that
19287 * have been tested so that we can detect when we should
19288 * change the priority level.
19289 */
19290 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19291 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
19292 }
19293
19294
19295 /* Check roundrobin failover list empty condition */
19296 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
19297 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
19298 /*
19299 * If next fcf index is not found check if there are lower
19300 * Priority level fcf's in the fcf_priority list.
19301 * Set up the rr_bmask with all of the avaiable fcf bits
19302 * at that level and continue the selection process.
19303 */
19304 if (lpfc_check_next_fcf_pri_level(phba))
19305 goto initial_priority;
19306 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
19307 "2844 No roundrobin failover FCF available\n");
19308
19309 return LPFC_FCOE_FCF_NEXT_NONE;
19310 }
19311
19312 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
19313 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
19314 LPFC_FCF_FLOGI_FAILED) {
19315 if (list_is_singular(&phba->fcf.fcf_pri_list))
19316 return LPFC_FCOE_FCF_NEXT_NONE;
19317
19318 goto next_priority;
19319 }
19320
19321 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19322 "2845 Get next roundrobin failover FCF (x%x)\n",
19323 next_fcf_index);
19324
19325 return next_fcf_index;
19326 }
19327
19328 /**
19329 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
19330 * @phba: pointer to lpfc hba data structure.
19331 * @fcf_index: index into the FCF table to 'set'
19332 *
19333 * This routine sets the FCF record index in to the eligible bmask for
19334 * roundrobin failover search. It checks to make sure that the index
19335 * does not go beyond the range of the driver allocated bmask dimension
19336 * before setting the bit.
19337 *
19338 * Returns 0 if the index bit successfully set, otherwise, it returns
19339 * -EINVAL.
19340 **/
19341 int
lpfc_sli4_fcf_rr_index_set(struct lpfc_hba * phba,uint16_t fcf_index)19342 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
19343 {
19344 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19345 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19346 "2610 FCF (x%x) reached driver's book "
19347 "keeping dimension:x%x\n",
19348 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19349 return -EINVAL;
19350 }
19351 /* Set the eligible FCF record index bmask */
19352 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19353
19354 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19355 "2790 Set FCF (x%x) to roundrobin FCF failover "
19356 "bmask\n", fcf_index);
19357
19358 return 0;
19359 }
19360
19361 /**
19362 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
19363 * @phba: pointer to lpfc hba data structure.
19364 * @fcf_index: index into the FCF table to 'clear'
19365 *
19366 * This routine clears the FCF record index from the eligible bmask for
19367 * roundrobin failover search. It checks to make sure that the index
19368 * does not go beyond the range of the driver allocated bmask dimension
19369 * before clearing the bit.
19370 **/
19371 void
lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba * phba,uint16_t fcf_index)19372 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
19373 {
19374 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
19375 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19376 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19377 "2762 FCF (x%x) reached driver's book "
19378 "keeping dimension:x%x\n",
19379 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19380 return;
19381 }
19382 /* Clear the eligible FCF record index bmask */
19383 spin_lock_irq(&phba->hbalock);
19384 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
19385 list) {
19386 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
19387 list_del_init(&fcf_pri->list);
19388 break;
19389 }
19390 }
19391 spin_unlock_irq(&phba->hbalock);
19392 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19393
19394 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19395 "2791 Clear FCF (x%x) from roundrobin failover "
19396 "bmask\n", fcf_index);
19397 }
19398
19399 /**
19400 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
19401 * @phba: pointer to lpfc hba data structure.
19402 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
19403 *
19404 * This routine is the completion routine for the rediscover FCF table mailbox
19405 * command. If the mailbox command returned failure, it will try to stop the
19406 * FCF rediscover wait timer.
19407 **/
19408 static void
lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba * phba,LPFC_MBOXQ_t * mbox)19409 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
19410 {
19411 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19412 uint32_t shdr_status, shdr_add_status;
19413
19414 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19415
19416 shdr_status = bf_get(lpfc_mbox_hdr_status,
19417 &redisc_fcf->header.cfg_shdr.response);
19418 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19419 &redisc_fcf->header.cfg_shdr.response);
19420 if (shdr_status || shdr_add_status) {
19421 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19422 "2746 Requesting for FCF rediscovery failed "
19423 "status x%x add_status x%x\n",
19424 shdr_status, shdr_add_status);
19425 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
19426 spin_lock_irq(&phba->hbalock);
19427 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
19428 spin_unlock_irq(&phba->hbalock);
19429 /*
19430 * CVL event triggered FCF rediscover request failed,
19431 * last resort to re-try current registered FCF entry.
19432 */
19433 lpfc_retry_pport_discovery(phba);
19434 } else {
19435 spin_lock_irq(&phba->hbalock);
19436 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
19437 spin_unlock_irq(&phba->hbalock);
19438 /*
19439 * DEAD FCF event triggered FCF rediscover request
19440 * failed, last resort to fail over as a link down
19441 * to FCF registration.
19442 */
19443 lpfc_sli4_fcf_dead_failthrough(phba);
19444 }
19445 } else {
19446 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19447 "2775 Start FCF rediscover quiescent timer\n");
19448 /*
19449 * Start FCF rediscovery wait timer for pending FCF
19450 * before rescan FCF record table.
19451 */
19452 lpfc_fcf_redisc_wait_start_timer(phba);
19453 }
19454
19455 mempool_free(mbox, phba->mbox_mem_pool);
19456 }
19457
19458 /**
19459 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
19460 * @phba: pointer to lpfc hba data structure.
19461 *
19462 * This routine is invoked to request for rediscovery of the entire FCF table
19463 * by the port.
19464 **/
19465 int
lpfc_sli4_redisc_fcf_table(struct lpfc_hba * phba)19466 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19467 {
19468 LPFC_MBOXQ_t *mbox;
19469 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19470 int rc, length;
19471
19472 /* Cancel retry delay timers to all vports before FCF rediscover */
19473 lpfc_cancel_all_vport_retry_delay_timer(phba);
19474
19475 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19476 if (!mbox) {
19477 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19478 "2745 Failed to allocate mbox for "
19479 "requesting FCF rediscover.\n");
19480 return -ENOMEM;
19481 }
19482
19483 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19484 sizeof(struct lpfc_sli4_cfg_mhdr));
19485 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19486 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19487 length, LPFC_SLI4_MBX_EMBED);
19488
19489 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19490 /* Set count to 0 for invalidating the entire FCF database */
19491 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19492
19493 /* Issue the mailbox command asynchronously */
19494 mbox->vport = phba->pport;
19495 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19496 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19497
19498 if (rc == MBX_NOT_FINISHED) {
19499 mempool_free(mbox, phba->mbox_mem_pool);
19500 return -EIO;
19501 }
19502 return 0;
19503 }
19504
19505 /**
19506 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19507 * @phba: pointer to lpfc hba data structure.
19508 *
19509 * This function is the failover routine as a last resort to the FCF DEAD
19510 * event when driver failed to perform fast FCF failover.
19511 **/
19512 void
lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba * phba)19513 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19514 {
19515 uint32_t link_state;
19516
19517 /*
19518 * Last resort as FCF DEAD event failover will treat this as
19519 * a link down, but save the link state because we don't want
19520 * it to be changed to Link Down unless it is already down.
19521 */
19522 link_state = phba->link_state;
19523 lpfc_linkdown(phba);
19524 phba->link_state = link_state;
19525
19526 /* Unregister FCF if no devices connected to it */
19527 lpfc_unregister_unused_fcf(phba);
19528 }
19529
19530 /**
19531 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
19532 * @phba: pointer to lpfc hba data structure.
19533 * @rgn23_data: pointer to configure region 23 data.
19534 *
19535 * This function gets SLI3 port configure region 23 data through memory dump
19536 * mailbox command. When it successfully retrieves data, the size of the data
19537 * will be returned, otherwise, 0 will be returned.
19538 **/
19539 static uint32_t
lpfc_sli_get_config_region23(struct lpfc_hba * phba,char * rgn23_data)19540 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19541 {
19542 LPFC_MBOXQ_t *pmb = NULL;
19543 MAILBOX_t *mb;
19544 uint32_t offset = 0;
19545 int i, rc;
19546
19547 if (!rgn23_data)
19548 return 0;
19549
19550 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19551 if (!pmb) {
19552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19553 "2600 failed to allocate mailbox memory\n");
19554 return 0;
19555 }
19556 mb = &pmb->u.mb;
19557
19558 do {
19559 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19560 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19561
19562 if (rc != MBX_SUCCESS) {
19563 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19564 "2601 failed to read config "
19565 "region 23, rc 0x%x Status 0x%x\n",
19566 rc, mb->mbxStatus);
19567 mb->un.varDmp.word_cnt = 0;
19568 }
19569 /*
19570 * dump mem may return a zero when finished or we got a
19571 * mailbox error, either way we are done.
19572 */
19573 if (mb->un.varDmp.word_cnt == 0)
19574 break;
19575
19576 i = mb->un.varDmp.word_cnt * sizeof(uint32_t);
19577 if (offset + i > DMP_RGN23_SIZE)
19578 i = DMP_RGN23_SIZE - offset;
19579 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19580 rgn23_data + offset, i);
19581 offset += i;
19582 } while (offset < DMP_RGN23_SIZE);
19583
19584 mempool_free(pmb, phba->mbox_mem_pool);
19585 return offset;
19586 }
19587
19588 /**
19589 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19590 * @phba: pointer to lpfc hba data structure.
19591 * @rgn23_data: pointer to configure region 23 data.
19592 *
19593 * This function gets SLI4 port configure region 23 data through memory dump
19594 * mailbox command. When it successfully retrieves data, the size of the data
19595 * will be returned, otherwise, 0 will be returned.
19596 **/
19597 static uint32_t
lpfc_sli4_get_config_region23(struct lpfc_hba * phba,char * rgn23_data)19598 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19599 {
19600 LPFC_MBOXQ_t *mboxq = NULL;
19601 struct lpfc_dmabuf *mp = NULL;
19602 struct lpfc_mqe *mqe;
19603 uint32_t data_length = 0;
19604 int rc;
19605
19606 if (!rgn23_data)
19607 return 0;
19608
19609 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19610 if (!mboxq) {
19611 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19612 "3105 failed to allocate mailbox memory\n");
19613 return 0;
19614 }
19615
19616 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19617 goto out;
19618 mqe = &mboxq->u.mqe;
19619 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19620 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19621 if (rc)
19622 goto out;
19623 data_length = mqe->un.mb_words[5];
19624 if (data_length == 0)
19625 goto out;
19626 if (data_length > DMP_RGN23_SIZE) {
19627 data_length = 0;
19628 goto out;
19629 }
19630 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19631 out:
19632 mempool_free(mboxq, phba->mbox_mem_pool);
19633 if (mp) {
19634 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19635 kfree(mp);
19636 }
19637 return data_length;
19638 }
19639
19640 /**
19641 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19642 * @phba: pointer to lpfc hba data structure.
19643 *
19644 * This function read region 23 and parse TLV for port status to
19645 * decide if the user disaled the port. If the TLV indicates the
19646 * port is disabled, the hba_flag is set accordingly.
19647 **/
19648 void
lpfc_sli_read_link_ste(struct lpfc_hba * phba)19649 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19650 {
19651 uint8_t *rgn23_data = NULL;
19652 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19653 uint32_t offset = 0;
19654
19655 /* Get adapter Region 23 data */
19656 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19657 if (!rgn23_data)
19658 goto out;
19659
19660 if (phba->sli_rev < LPFC_SLI_REV4)
19661 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19662 else {
19663 if_type = bf_get(lpfc_sli_intf_if_type,
19664 &phba->sli4_hba.sli_intf);
19665 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19666 goto out;
19667 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19668 }
19669
19670 if (!data_size)
19671 goto out;
19672
19673 /* Check the region signature first */
19674 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19675 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19676 "2619 Config region 23 has bad signature\n");
19677 goto out;
19678 }
19679 offset += 4;
19680
19681 /* Check the data structure version */
19682 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19683 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19684 "2620 Config region 23 has bad version\n");
19685 goto out;
19686 }
19687 offset += 4;
19688
19689 /* Parse TLV entries in the region */
19690 while (offset < data_size) {
19691 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19692 break;
19693 /*
19694 * If the TLV is not driver specific TLV or driver id is
19695 * not linux driver id, skip the record.
19696 */
19697 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19698 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19699 (rgn23_data[offset + 3] != 0)) {
19700 offset += rgn23_data[offset + 1] * 4 + 4;
19701 continue;
19702 }
19703
19704 /* Driver found a driver specific TLV in the config region */
19705 sub_tlv_len = rgn23_data[offset + 1] * 4;
19706 offset += 4;
19707 tlv_offset = 0;
19708
19709 /*
19710 * Search for configured port state sub-TLV.
19711 */
19712 while ((offset < data_size) &&
19713 (tlv_offset < sub_tlv_len)) {
19714 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19715 offset += 4;
19716 tlv_offset += 4;
19717 break;
19718 }
19719 if (rgn23_data[offset] != PORT_STE_TYPE) {
19720 offset += rgn23_data[offset + 1] * 4 + 4;
19721 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19722 continue;
19723 }
19724
19725 /* This HBA contains PORT_STE configured */
19726 if (!rgn23_data[offset + 2])
19727 phba->hba_flag |= LINK_DISABLED;
19728
19729 goto out;
19730 }
19731 }
19732
19733 out:
19734 kfree(rgn23_data);
19735 return;
19736 }
19737
19738 /**
19739 * lpfc_wr_object - write an object to the firmware
19740 * @phba: HBA structure that indicates port to create a queue on.
19741 * @dmabuf_list: list of dmabufs to write to the port.
19742 * @size: the total byte value of the objects to write to the port.
19743 * @offset: the current offset to be used to start the transfer.
19744 *
19745 * This routine will create a wr_object mailbox command to send to the port.
19746 * the mailbox command will be constructed using the dma buffers described in
19747 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19748 * BDEs that the imbedded mailbox can support. The @offset variable will be
19749 * used to indicate the starting offset of the transfer and will also return
19750 * the offset after the write object mailbox has completed. @size is used to
19751 * determine the end of the object and whether the eof bit should be set.
19752 *
19753 * Return 0 is successful and offset will contain the the new offset to use
19754 * for the next write.
19755 * Return negative value for error cases.
19756 **/
19757 int
lpfc_wr_object(struct lpfc_hba * phba,struct list_head * dmabuf_list,uint32_t size,uint32_t * offset)19758 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19759 uint32_t size, uint32_t *offset)
19760 {
19761 struct lpfc_mbx_wr_object *wr_object;
19762 LPFC_MBOXQ_t *mbox;
19763 int rc = 0, i = 0;
19764 uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
19765 uint32_t mbox_tmo;
19766 struct lpfc_dmabuf *dmabuf;
19767 uint32_t written = 0;
19768 bool check_change_status = false;
19769
19770 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19771 if (!mbox)
19772 return -ENOMEM;
19773
19774 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19775 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19776 sizeof(struct lpfc_mbx_wr_object) -
19777 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19778
19779 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19780 wr_object->u.request.write_offset = *offset;
19781 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19782 wr_object->u.request.object_name[0] =
19783 cpu_to_le32(wr_object->u.request.object_name[0]);
19784 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19785 list_for_each_entry(dmabuf, dmabuf_list, list) {
19786 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19787 break;
19788 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19789 wr_object->u.request.bde[i].addrHigh =
19790 putPaddrHigh(dmabuf->phys);
19791 if (written + SLI4_PAGE_SIZE >= size) {
19792 wr_object->u.request.bde[i].tus.f.bdeSize =
19793 (size - written);
19794 written += (size - written);
19795 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19796 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19797 check_change_status = true;
19798 } else {
19799 wr_object->u.request.bde[i].tus.f.bdeSize =
19800 SLI4_PAGE_SIZE;
19801 written += SLI4_PAGE_SIZE;
19802 }
19803 i++;
19804 }
19805 wr_object->u.request.bde_count = i;
19806 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19807 if (!phba->sli4_hba.intr_enable)
19808 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19809 else {
19810 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
19811 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19812 }
19813 /* The IOCTL status is embedded in the mailbox subheader. */
19814 shdr_status = bf_get(lpfc_mbox_hdr_status,
19815 &wr_object->header.cfg_shdr.response);
19816 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19817 &wr_object->header.cfg_shdr.response);
19818 if (check_change_status) {
19819 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19820 &wr_object->u.response);
19821
19822 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
19823 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
19824 shdr_csf = bf_get(lpfc_wr_object_csf,
19825 &wr_object->u.response);
19826 if (shdr_csf)
19827 shdr_change_status =
19828 LPFC_CHANGE_STATUS_PCI_RESET;
19829 }
19830
19831 switch (shdr_change_status) {
19832 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19833 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19834 "3198 Firmware write complete: System "
19835 "reboot required to instantiate\n");
19836 break;
19837 case (LPFC_CHANGE_STATUS_FW_RESET):
19838 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19839 "3199 Firmware write complete: Firmware"
19840 " reset required to instantiate\n");
19841 break;
19842 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19843 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19844 "3200 Firmware write complete: Port "
19845 "Migration or PCI Reset required to "
19846 "instantiate\n");
19847 break;
19848 case (LPFC_CHANGE_STATUS_PCI_RESET):
19849 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19850 "3201 Firmware write complete: PCI "
19851 "Reset required to instantiate\n");
19852 break;
19853 default:
19854 break;
19855 }
19856 }
19857 if (!phba->sli4_hba.intr_enable)
19858 mempool_free(mbox, phba->mbox_mem_pool);
19859 else if (rc != MBX_TIMEOUT)
19860 mempool_free(mbox, phba->mbox_mem_pool);
19861 if (shdr_status || shdr_add_status || rc) {
19862 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19863 "3025 Write Object mailbox failed with "
19864 "status x%x add_status x%x, mbx status x%x\n",
19865 shdr_status, shdr_add_status, rc);
19866 rc = -ENXIO;
19867 *offset = shdr_add_status;
19868 } else
19869 *offset += wr_object->u.response.actual_write_length;
19870 return rc;
19871 }
19872
19873 /**
19874 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19875 * @vport: pointer to vport data structure.
19876 *
19877 * This function iterate through the mailboxq and clean up all REG_LOGIN
19878 * and REG_VPI mailbox commands associated with the vport. This function
19879 * is called when driver want to restart discovery of the vport due to
19880 * a Clear Virtual Link event.
19881 **/
19882 void
lpfc_cleanup_pending_mbox(struct lpfc_vport * vport)19883 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19884 {
19885 struct lpfc_hba *phba = vport->phba;
19886 LPFC_MBOXQ_t *mb, *nextmb;
19887 struct lpfc_dmabuf *mp;
19888 struct lpfc_nodelist *ndlp;
19889 struct lpfc_nodelist *act_mbx_ndlp = NULL;
19890 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
19891 LIST_HEAD(mbox_cmd_list);
19892 uint8_t restart_loop;
19893
19894 /* Clean up internally queued mailbox commands with the vport */
19895 spin_lock_irq(&phba->hbalock);
19896 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19897 if (mb->vport != vport)
19898 continue;
19899
19900 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19901 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19902 continue;
19903
19904 list_del(&mb->list);
19905 list_add_tail(&mb->list, &mbox_cmd_list);
19906 }
19907 /* Clean up active mailbox command with the vport */
19908 mb = phba->sli.mbox_active;
19909 if (mb && (mb->vport == vport)) {
19910 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19911 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19912 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19913 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19914 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19915 /* Put reference count for delayed processing */
19916 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19917 /* Unregister the RPI when mailbox complete */
19918 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19919 }
19920 }
19921 /* Cleanup any mailbox completions which are not yet processed */
19922 do {
19923 restart_loop = 0;
19924 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19925 /*
19926 * If this mailox is already processed or it is
19927 * for another vport ignore it.
19928 */
19929 if ((mb->vport != vport) ||
19930 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19931 continue;
19932
19933 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19934 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19935 continue;
19936
19937 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19938 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19939 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19940 /* Unregister the RPI when mailbox complete */
19941 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19942 restart_loop = 1;
19943 spin_unlock_irq(&phba->hbalock);
19944 spin_lock(shost->host_lock);
19945 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19946 spin_unlock(shost->host_lock);
19947 spin_lock_irq(&phba->hbalock);
19948 break;
19949 }
19950 }
19951 } while (restart_loop);
19952
19953 spin_unlock_irq(&phba->hbalock);
19954
19955 /* Release the cleaned-up mailbox commands */
19956 while (!list_empty(&mbox_cmd_list)) {
19957 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
19958 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19959 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
19960 if (mp) {
19961 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19962 kfree(mp);
19963 }
19964 mb->ctx_buf = NULL;
19965 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19966 mb->ctx_ndlp = NULL;
19967 if (ndlp) {
19968 spin_lock(shost->host_lock);
19969 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19970 spin_unlock(shost->host_lock);
19971 lpfc_nlp_put(ndlp);
19972 }
19973 }
19974 mempool_free(mb, phba->mbox_mem_pool);
19975 }
19976
19977 /* Release the ndlp with the cleaned-up active mailbox command */
19978 if (act_mbx_ndlp) {
19979 spin_lock(shost->host_lock);
19980 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19981 spin_unlock(shost->host_lock);
19982 lpfc_nlp_put(act_mbx_ndlp);
19983 }
19984 }
19985
19986 /**
19987 * lpfc_drain_txq - Drain the txq
19988 * @phba: Pointer to HBA context object.
19989 *
19990 * This function attempt to submit IOCBs on the txq
19991 * to the adapter. For SLI4 adapters, the txq contains
19992 * ELS IOCBs that have been deferred because the there
19993 * are no SGLs. This congestion can occur with large
19994 * vport counts during node discovery.
19995 **/
19996
19997 uint32_t
lpfc_drain_txq(struct lpfc_hba * phba)19998 lpfc_drain_txq(struct lpfc_hba *phba)
19999 {
20000 LIST_HEAD(completions);
20001 struct lpfc_sli_ring *pring;
20002 struct lpfc_iocbq *piocbq = NULL;
20003 unsigned long iflags = 0;
20004 char *fail_msg = NULL;
20005 struct lpfc_sglq *sglq;
20006 union lpfc_wqe128 wqe;
20007 uint32_t txq_cnt = 0;
20008 struct lpfc_queue *wq;
20009
20010 if (phba->link_flag & LS_MDS_LOOPBACK) {
20011 /* MDS WQE are posted only to first WQ*/
20012 wq = phba->sli4_hba.hdwq[0].io_wq;
20013 if (unlikely(!wq))
20014 return 0;
20015 pring = wq->pring;
20016 } else {
20017 wq = phba->sli4_hba.els_wq;
20018 if (unlikely(!wq))
20019 return 0;
20020 pring = lpfc_phba_elsring(phba);
20021 }
20022
20023 if (unlikely(!pring) || list_empty(&pring->txq))
20024 return 0;
20025
20026 spin_lock_irqsave(&pring->ring_lock, iflags);
20027 list_for_each_entry(piocbq, &pring->txq, list) {
20028 txq_cnt++;
20029 }
20030
20031 if (txq_cnt > pring->txq_max)
20032 pring->txq_max = txq_cnt;
20033
20034 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20035
20036 while (!list_empty(&pring->txq)) {
20037 spin_lock_irqsave(&pring->ring_lock, iflags);
20038
20039 piocbq = lpfc_sli_ringtx_get(phba, pring);
20040 if (!piocbq) {
20041 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20042 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20043 "2823 txq empty and txq_cnt is %d\n ",
20044 txq_cnt);
20045 break;
20046 }
20047 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
20048 if (!sglq) {
20049 __lpfc_sli_ringtx_put(phba, pring, piocbq);
20050 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20051 break;
20052 }
20053 txq_cnt--;
20054
20055 /* The xri and iocb resources secured,
20056 * attempt to issue request
20057 */
20058 piocbq->sli4_lxritag = sglq->sli4_lxritag;
20059 piocbq->sli4_xritag = sglq->sli4_xritag;
20060 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
20061 fail_msg = "to convert bpl to sgl";
20062 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
20063 fail_msg = "to convert iocb to wqe";
20064 else if (lpfc_sli4_wq_put(wq, &wqe))
20065 fail_msg = " - Wq is full";
20066 else
20067 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
20068
20069 if (fail_msg) {
20070 /* Failed means we can't issue and need to cancel */
20071 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20072 "2822 IOCB failed %s iotag 0x%x "
20073 "xri 0x%x\n",
20074 fail_msg,
20075 piocbq->iotag, piocbq->sli4_xritag);
20076 list_add_tail(&piocbq->list, &completions);
20077 fail_msg = NULL;
20078 }
20079 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20080 }
20081
20082 /* Cancel all the IOCBs that cannot be issued */
20083 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
20084 IOERR_SLI_ABORTED);
20085
20086 return txq_cnt;
20087 }
20088
20089 /**
20090 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
20091 * @phba: Pointer to HBA context object.
20092 * @pwqeq: Pointer to command WQE.
20093 * @sglq: Pointer to the scatter gather queue object.
20094 *
20095 * This routine converts the bpl or bde that is in the WQE
20096 * to a sgl list for the sli4 hardware. The physical address
20097 * of the bpl/bde is converted back to a virtual address.
20098 * If the WQE contains a BPL then the list of BDE's is
20099 * converted to sli4_sge's. If the WQE contains a single
20100 * BDE then it is converted to a single sli_sge.
20101 * The WQE is still in cpu endianness so the contents of
20102 * the bpl can be used without byte swapping.
20103 *
20104 * Returns valid XRI = Success, NO_XRI = Failure.
20105 */
20106 static uint16_t
lpfc_wqe_bpl2sgl(struct lpfc_hba * phba,struct lpfc_iocbq * pwqeq,struct lpfc_sglq * sglq)20107 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
20108 struct lpfc_sglq *sglq)
20109 {
20110 uint16_t xritag = NO_XRI;
20111 struct ulp_bde64 *bpl = NULL;
20112 struct ulp_bde64 bde;
20113 struct sli4_sge *sgl = NULL;
20114 struct lpfc_dmabuf *dmabuf;
20115 union lpfc_wqe128 *wqe;
20116 int numBdes = 0;
20117 int i = 0;
20118 uint32_t offset = 0; /* accumulated offset in the sg request list */
20119 int inbound = 0; /* number of sg reply entries inbound from firmware */
20120 uint32_t cmd;
20121
20122 if (!pwqeq || !sglq)
20123 return xritag;
20124
20125 sgl = (struct sli4_sge *)sglq->sgl;
20126 wqe = &pwqeq->wqe;
20127 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
20128
20129 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
20130 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
20131 return sglq->sli4_xritag;
20132 numBdes = pwqeq->rsvd2;
20133 if (numBdes) {
20134 /* The addrHigh and addrLow fields within the WQE
20135 * have not been byteswapped yet so there is no
20136 * need to swap them back.
20137 */
20138 if (pwqeq->context3)
20139 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
20140 else
20141 return xritag;
20142
20143 bpl = (struct ulp_bde64 *)dmabuf->virt;
20144 if (!bpl)
20145 return xritag;
20146
20147 for (i = 0; i < numBdes; i++) {
20148 /* Should already be byte swapped. */
20149 sgl->addr_hi = bpl->addrHigh;
20150 sgl->addr_lo = bpl->addrLow;
20151
20152 sgl->word2 = le32_to_cpu(sgl->word2);
20153 if ((i+1) == numBdes)
20154 bf_set(lpfc_sli4_sge_last, sgl, 1);
20155 else
20156 bf_set(lpfc_sli4_sge_last, sgl, 0);
20157 /* swap the size field back to the cpu so we
20158 * can assign it to the sgl.
20159 */
20160 bde.tus.w = le32_to_cpu(bpl->tus.w);
20161 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
20162 /* The offsets in the sgl need to be accumulated
20163 * separately for the request and reply lists.
20164 * The request is always first, the reply follows.
20165 */
20166 switch (cmd) {
20167 case CMD_GEN_REQUEST64_WQE:
20168 /* add up the reply sg entries */
20169 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
20170 inbound++;
20171 /* first inbound? reset the offset */
20172 if (inbound == 1)
20173 offset = 0;
20174 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20175 bf_set(lpfc_sli4_sge_type, sgl,
20176 LPFC_SGE_TYPE_DATA);
20177 offset += bde.tus.f.bdeSize;
20178 break;
20179 case CMD_FCP_TRSP64_WQE:
20180 bf_set(lpfc_sli4_sge_offset, sgl, 0);
20181 bf_set(lpfc_sli4_sge_type, sgl,
20182 LPFC_SGE_TYPE_DATA);
20183 break;
20184 case CMD_FCP_TSEND64_WQE:
20185 case CMD_FCP_TRECEIVE64_WQE:
20186 bf_set(lpfc_sli4_sge_type, sgl,
20187 bpl->tus.f.bdeFlags);
20188 if (i < 3)
20189 offset = 0;
20190 else
20191 offset += bde.tus.f.bdeSize;
20192 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20193 break;
20194 }
20195 sgl->word2 = cpu_to_le32(sgl->word2);
20196 bpl++;
20197 sgl++;
20198 }
20199 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
20200 /* The addrHigh and addrLow fields of the BDE have not
20201 * been byteswapped yet so they need to be swapped
20202 * before putting them in the sgl.
20203 */
20204 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
20205 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
20206 sgl->word2 = le32_to_cpu(sgl->word2);
20207 bf_set(lpfc_sli4_sge_last, sgl, 1);
20208 sgl->word2 = cpu_to_le32(sgl->word2);
20209 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
20210 }
20211 return sglq->sli4_xritag;
20212 }
20213
20214 /**
20215 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
20216 * @phba: Pointer to HBA context object.
20217 * @qp: Pointer to HDW queue.
20218 * @pwqe: Pointer to command WQE.
20219 **/
20220 int
lpfc_sli4_issue_wqe(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * qp,struct lpfc_iocbq * pwqe)20221 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20222 struct lpfc_iocbq *pwqe)
20223 {
20224 union lpfc_wqe128 *wqe = &pwqe->wqe;
20225 struct lpfc_async_xchg_ctx *ctxp;
20226 struct lpfc_queue *wq;
20227 struct lpfc_sglq *sglq;
20228 struct lpfc_sli_ring *pring;
20229 unsigned long iflags;
20230 uint32_t ret = 0;
20231
20232 /* NVME_LS and NVME_LS ABTS requests. */
20233 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
20234 pring = phba->sli4_hba.nvmels_wq->pring;
20235 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20236 qp, wq_access);
20237 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
20238 if (!sglq) {
20239 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20240 return WQE_BUSY;
20241 }
20242 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20243 pwqe->sli4_xritag = sglq->sli4_xritag;
20244 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
20245 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20246 return WQE_ERROR;
20247 }
20248 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20249 pwqe->sli4_xritag);
20250 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
20251 if (ret) {
20252 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20253 return ret;
20254 }
20255
20256 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20257 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20258
20259 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20260 return 0;
20261 }
20262
20263 /* NVME_FCREQ and NVME_ABTS requests */
20264 if (pwqe->iocb_flag & LPFC_IO_NVME) {
20265 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
20266 wq = qp->io_wq;
20267 pring = wq->pring;
20268
20269 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20270
20271 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20272 qp, wq_access);
20273 ret = lpfc_sli4_wq_put(wq, wqe);
20274 if (ret) {
20275 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20276 return ret;
20277 }
20278 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20279 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20280
20281 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20282 return 0;
20283 }
20284
20285 /* NVMET requests */
20286 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
20287 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
20288 wq = qp->io_wq;
20289 pring = wq->pring;
20290
20291 ctxp = pwqe->context2;
20292 sglq = ctxp->ctxbuf->sglq;
20293 if (pwqe->sli4_xritag == NO_XRI) {
20294 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20295 pwqe->sli4_xritag = sglq->sli4_xritag;
20296 }
20297 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20298 pwqe->sli4_xritag);
20299 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20300
20301 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20302 qp, wq_access);
20303 ret = lpfc_sli4_wq_put(wq, wqe);
20304 if (ret) {
20305 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20306 return ret;
20307 }
20308 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20309 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20310
20311 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20312 return 0;
20313 }
20314 return WQE_ERROR;
20315 }
20316
20317 #ifdef LPFC_MXP_STAT
20318 /**
20319 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
20320 * @phba: pointer to lpfc hba data structure.
20321 * @hwqid: belong to which HWQ.
20322 *
20323 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
20324 * 15 seconds after a test case is running.
20325 *
20326 * The user should call lpfc_debugfs_multixripools_write before running a test
20327 * case to clear stat_snapshot_taken. Then the user starts a test case. During
20328 * test case is running, stat_snapshot_taken is incremented by 1 every time when
20329 * this routine is called from heartbeat timer. When stat_snapshot_taken is
20330 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
20331 **/
lpfc_snapshot_mxp(struct lpfc_hba * phba,u32 hwqid)20332 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
20333 {
20334 struct lpfc_sli4_hdw_queue *qp;
20335 struct lpfc_multixri_pool *multixri_pool;
20336 struct lpfc_pvt_pool *pvt_pool;
20337 struct lpfc_pbl_pool *pbl_pool;
20338 u32 txcmplq_cnt;
20339
20340 qp = &phba->sli4_hba.hdwq[hwqid];
20341 multixri_pool = qp->p_multixri_pool;
20342 if (!multixri_pool)
20343 return;
20344
20345 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
20346 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20347 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20348 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20349
20350 multixri_pool->stat_pbl_count = pbl_pool->count;
20351 multixri_pool->stat_pvt_count = pvt_pool->count;
20352 multixri_pool->stat_busy_count = txcmplq_cnt;
20353 }
20354
20355 multixri_pool->stat_snapshot_taken++;
20356 }
20357 #endif
20358
20359 /**
20360 * lpfc_adjust_pvt_pool_count - Adjust private pool count
20361 * @phba: pointer to lpfc hba data structure.
20362 * @hwqid: belong to which HWQ.
20363 *
20364 * This routine moves some XRIs from private to public pool when private pool
20365 * is not busy.
20366 **/
lpfc_adjust_pvt_pool_count(struct lpfc_hba * phba,u32 hwqid)20367 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
20368 {
20369 struct lpfc_multixri_pool *multixri_pool;
20370 u32 io_req_count;
20371 u32 prev_io_req_count;
20372
20373 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20374 if (!multixri_pool)
20375 return;
20376 io_req_count = multixri_pool->io_req_count;
20377 prev_io_req_count = multixri_pool->prev_io_req_count;
20378
20379 if (prev_io_req_count != io_req_count) {
20380 /* Private pool is busy */
20381 multixri_pool->prev_io_req_count = io_req_count;
20382 } else {
20383 /* Private pool is not busy.
20384 * Move XRIs from private to public pool.
20385 */
20386 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
20387 }
20388 }
20389
20390 /**
20391 * lpfc_adjust_high_watermark - Adjust high watermark
20392 * @phba: pointer to lpfc hba data structure.
20393 * @hwqid: belong to which HWQ.
20394 *
20395 * This routine sets high watermark as number of outstanding XRIs,
20396 * but make sure the new value is between xri_limit/2 and xri_limit.
20397 **/
lpfc_adjust_high_watermark(struct lpfc_hba * phba,u32 hwqid)20398 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
20399 {
20400 u32 new_watermark;
20401 u32 watermark_max;
20402 u32 watermark_min;
20403 u32 xri_limit;
20404 u32 txcmplq_cnt;
20405 u32 abts_io_bufs;
20406 struct lpfc_multixri_pool *multixri_pool;
20407 struct lpfc_sli4_hdw_queue *qp;
20408
20409 qp = &phba->sli4_hba.hdwq[hwqid];
20410 multixri_pool = qp->p_multixri_pool;
20411 if (!multixri_pool)
20412 return;
20413 xri_limit = multixri_pool->xri_limit;
20414
20415 watermark_max = xri_limit;
20416 watermark_min = xri_limit / 2;
20417
20418 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20419 abts_io_bufs = qp->abts_scsi_io_bufs;
20420 abts_io_bufs += qp->abts_nvme_io_bufs;
20421
20422 new_watermark = txcmplq_cnt + abts_io_bufs;
20423 new_watermark = min(watermark_max, new_watermark);
20424 new_watermark = max(watermark_min, new_watermark);
20425 multixri_pool->pvt_pool.high_watermark = new_watermark;
20426
20427 #ifdef LPFC_MXP_STAT
20428 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
20429 new_watermark);
20430 #endif
20431 }
20432
20433 /**
20434 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
20435 * @phba: pointer to lpfc hba data structure.
20436 * @hwqid: belong to which HWQ.
20437 *
20438 * This routine is called from hearbeat timer when pvt_pool is idle.
20439 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
20440 * The first step moves (all - low_watermark) amount of XRIs.
20441 * The second step moves the rest of XRIs.
20442 **/
lpfc_move_xri_pvt_to_pbl(struct lpfc_hba * phba,u32 hwqid)20443 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20444 {
20445 struct lpfc_pbl_pool *pbl_pool;
20446 struct lpfc_pvt_pool *pvt_pool;
20447 struct lpfc_sli4_hdw_queue *qp;
20448 struct lpfc_io_buf *lpfc_ncmd;
20449 struct lpfc_io_buf *lpfc_ncmd_next;
20450 unsigned long iflag;
20451 struct list_head tmp_list;
20452 u32 tmp_count;
20453
20454 qp = &phba->sli4_hba.hdwq[hwqid];
20455 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20456 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20457 tmp_count = 0;
20458
20459 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20460 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
20461
20462 if (pvt_pool->count > pvt_pool->low_watermark) {
20463 /* Step 1: move (all - low_watermark) from pvt_pool
20464 * to pbl_pool
20465 */
20466
20467 /* Move low watermark of bufs from pvt_pool to tmp_list */
20468 INIT_LIST_HEAD(&tmp_list);
20469 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20470 &pvt_pool->list, list) {
20471 list_move_tail(&lpfc_ncmd->list, &tmp_list);
20472 tmp_count++;
20473 if (tmp_count >= pvt_pool->low_watermark)
20474 break;
20475 }
20476
20477 /* Move all bufs from pvt_pool to pbl_pool */
20478 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20479
20480 /* Move all bufs from tmp_list to pvt_pool */
20481 list_splice(&tmp_list, &pvt_pool->list);
20482
20483 pbl_pool->count += (pvt_pool->count - tmp_count);
20484 pvt_pool->count = tmp_count;
20485 } else {
20486 /* Step 2: move the rest from pvt_pool to pbl_pool */
20487 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20488 pbl_pool->count += pvt_pool->count;
20489 pvt_pool->count = 0;
20490 }
20491
20492 spin_unlock(&pvt_pool->lock);
20493 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20494 }
20495
20496 /**
20497 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20498 * @phba: pointer to lpfc hba data structure
20499 * @qp: pointer to HDW queue
20500 * @pbl_pool: specified public free XRI pool
20501 * @pvt_pool: specified private free XRI pool
20502 * @count: number of XRIs to move
20503 *
20504 * This routine tries to move some free common bufs from the specified pbl_pool
20505 * to the specified pvt_pool. It might move less than count XRIs if there's not
20506 * enough in public pool.
20507 *
20508 * Return:
20509 * true - if XRIs are successfully moved from the specified pbl_pool to the
20510 * specified pvt_pool
20511 * false - if the specified pbl_pool is empty or locked by someone else
20512 **/
20513 static bool
_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * qp,struct lpfc_pbl_pool * pbl_pool,struct lpfc_pvt_pool * pvt_pool,u32 count)20514 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20515 struct lpfc_pbl_pool *pbl_pool,
20516 struct lpfc_pvt_pool *pvt_pool, u32 count)
20517 {
20518 struct lpfc_io_buf *lpfc_ncmd;
20519 struct lpfc_io_buf *lpfc_ncmd_next;
20520 unsigned long iflag;
20521 int ret;
20522
20523 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20524 if (ret) {
20525 if (pbl_pool->count) {
20526 /* Move a batch of XRIs from public to private pool */
20527 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
20528 list_for_each_entry_safe(lpfc_ncmd,
20529 lpfc_ncmd_next,
20530 &pbl_pool->list,
20531 list) {
20532 list_move_tail(&lpfc_ncmd->list,
20533 &pvt_pool->list);
20534 pvt_pool->count++;
20535 pbl_pool->count--;
20536 count--;
20537 if (count == 0)
20538 break;
20539 }
20540
20541 spin_unlock(&pvt_pool->lock);
20542 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20543 return true;
20544 }
20545 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20546 }
20547
20548 return false;
20549 }
20550
20551 /**
20552 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20553 * @phba: pointer to lpfc hba data structure.
20554 * @hwqid: belong to which HWQ.
20555 * @count: number of XRIs to move
20556 *
20557 * This routine tries to find some free common bufs in one of public pools with
20558 * Round Robin method. The search always starts from local hwqid, then the next
20559 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20560 * a batch of free common bufs are moved to private pool on hwqid.
20561 * It might move less than count XRIs if there's not enough in public pool.
20562 **/
lpfc_move_xri_pbl_to_pvt(struct lpfc_hba * phba,u32 hwqid,u32 count)20563 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20564 {
20565 struct lpfc_multixri_pool *multixri_pool;
20566 struct lpfc_multixri_pool *next_multixri_pool;
20567 struct lpfc_pvt_pool *pvt_pool;
20568 struct lpfc_pbl_pool *pbl_pool;
20569 struct lpfc_sli4_hdw_queue *qp;
20570 u32 next_hwqid;
20571 u32 hwq_count;
20572 int ret;
20573
20574 qp = &phba->sli4_hba.hdwq[hwqid];
20575 multixri_pool = qp->p_multixri_pool;
20576 pvt_pool = &multixri_pool->pvt_pool;
20577 pbl_pool = &multixri_pool->pbl_pool;
20578
20579 /* Check if local pbl_pool is available */
20580 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20581 if (ret) {
20582 #ifdef LPFC_MXP_STAT
20583 multixri_pool->local_pbl_hit_count++;
20584 #endif
20585 return;
20586 }
20587
20588 hwq_count = phba->cfg_hdw_queue;
20589
20590 /* Get the next hwqid which was found last time */
20591 next_hwqid = multixri_pool->rrb_next_hwqid;
20592
20593 do {
20594 /* Go to next hwq */
20595 next_hwqid = (next_hwqid + 1) % hwq_count;
20596
20597 next_multixri_pool =
20598 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20599 pbl_pool = &next_multixri_pool->pbl_pool;
20600
20601 /* Check if the public free xri pool is available */
20602 ret = _lpfc_move_xri_pbl_to_pvt(
20603 phba, qp, pbl_pool, pvt_pool, count);
20604
20605 /* Exit while-loop if success or all hwqid are checked */
20606 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20607
20608 /* Starting point for the next time */
20609 multixri_pool->rrb_next_hwqid = next_hwqid;
20610
20611 if (!ret) {
20612 /* stats: all public pools are empty*/
20613 multixri_pool->pbl_empty_count++;
20614 }
20615
20616 #ifdef LPFC_MXP_STAT
20617 if (ret) {
20618 if (next_hwqid == hwqid)
20619 multixri_pool->local_pbl_hit_count++;
20620 else
20621 multixri_pool->other_pbl_hit_count++;
20622 }
20623 #endif
20624 }
20625
20626 /**
20627 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20628 * @phba: pointer to lpfc hba data structure.
20629 * @hwqid: belong to which HWQ.
20630 *
20631 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20632 * low watermark.
20633 **/
lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba * phba,u32 hwqid)20634 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20635 {
20636 struct lpfc_multixri_pool *multixri_pool;
20637 struct lpfc_pvt_pool *pvt_pool;
20638
20639 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20640 pvt_pool = &multixri_pool->pvt_pool;
20641
20642 if (pvt_pool->count < pvt_pool->low_watermark)
20643 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20644 }
20645
20646 /**
20647 * lpfc_release_io_buf - Return one IO buf back to free pool
20648 * @phba: pointer to lpfc hba data structure.
20649 * @lpfc_ncmd: IO buf to be returned.
20650 * @qp: belong to which HWQ.
20651 *
20652 * This routine returns one IO buf back to free pool. If this is an urgent IO,
20653 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20654 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20655 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
20656 * lpfc_io_buf_list_put.
20657 **/
lpfc_release_io_buf(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_ncmd,struct lpfc_sli4_hdw_queue * qp)20658 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20659 struct lpfc_sli4_hdw_queue *qp)
20660 {
20661 unsigned long iflag;
20662 struct lpfc_pbl_pool *pbl_pool;
20663 struct lpfc_pvt_pool *pvt_pool;
20664 struct lpfc_epd_pool *epd_pool;
20665 u32 txcmplq_cnt;
20666 u32 xri_owned;
20667 u32 xri_limit;
20668 u32 abts_io_bufs;
20669
20670 /* MUST zero fields if buffer is reused by another protocol */
20671 lpfc_ncmd->nvmeCmd = NULL;
20672 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20673 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20674
20675 if (phba->cfg_xpsgl && !phba->nvmet_support &&
20676 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
20677 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
20678
20679 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
20680 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
20681
20682 if (phba->cfg_xri_rebalancing) {
20683 if (lpfc_ncmd->expedite) {
20684 /* Return to expedite pool */
20685 epd_pool = &phba->epd_pool;
20686 spin_lock_irqsave(&epd_pool->lock, iflag);
20687 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20688 epd_pool->count++;
20689 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20690 return;
20691 }
20692
20693 /* Avoid invalid access if an IO sneaks in and is being rejected
20694 * just _after_ xri pools are destroyed in lpfc_offline.
20695 * Nothing much can be done at this point.
20696 */
20697 if (!qp->p_multixri_pool)
20698 return;
20699
20700 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20701 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20702
20703 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20704 abts_io_bufs = qp->abts_scsi_io_bufs;
20705 abts_io_bufs += qp->abts_nvme_io_bufs;
20706
20707 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20708 xri_limit = qp->p_multixri_pool->xri_limit;
20709
20710 #ifdef LPFC_MXP_STAT
20711 if (xri_owned <= xri_limit)
20712 qp->p_multixri_pool->below_limit_count++;
20713 else
20714 qp->p_multixri_pool->above_limit_count++;
20715 #endif
20716
20717 /* XRI goes to either public or private free xri pool
20718 * based on watermark and xri_limit
20719 */
20720 if ((pvt_pool->count < pvt_pool->low_watermark) ||
20721 (xri_owned < xri_limit &&
20722 pvt_pool->count < pvt_pool->high_watermark)) {
20723 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20724 qp, free_pvt_pool);
20725 list_add_tail(&lpfc_ncmd->list,
20726 &pvt_pool->list);
20727 pvt_pool->count++;
20728 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20729 } else {
20730 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20731 qp, free_pub_pool);
20732 list_add_tail(&lpfc_ncmd->list,
20733 &pbl_pool->list);
20734 pbl_pool->count++;
20735 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20736 }
20737 } else {
20738 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20739 qp, free_xri);
20740 list_add_tail(&lpfc_ncmd->list,
20741 &qp->lpfc_io_buf_list_put);
20742 qp->put_io_bufs++;
20743 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20744 iflag);
20745 }
20746 }
20747
20748 /**
20749 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
20750 * @phba: pointer to lpfc hba data structure.
20751 * @qp: pointer to HDW queue
20752 * @pvt_pool: pointer to private pool data structure.
20753 * @ndlp: pointer to lpfc nodelist data structure.
20754 *
20755 * This routine tries to get one free IO buf from private pool.
20756 *
20757 * Return:
20758 * pointer to one free IO buf - if private pool is not empty
20759 * NULL - if private pool is empty
20760 **/
20761 static struct lpfc_io_buf *
lpfc_get_io_buf_from_private_pool(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * qp,struct lpfc_pvt_pool * pvt_pool,struct lpfc_nodelist * ndlp)20762 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
20763 struct lpfc_sli4_hdw_queue *qp,
20764 struct lpfc_pvt_pool *pvt_pool,
20765 struct lpfc_nodelist *ndlp)
20766 {
20767 struct lpfc_io_buf *lpfc_ncmd;
20768 struct lpfc_io_buf *lpfc_ncmd_next;
20769 unsigned long iflag;
20770
20771 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
20772 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20773 &pvt_pool->list, list) {
20774 if (lpfc_test_rrq_active(
20775 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20776 continue;
20777 list_del(&lpfc_ncmd->list);
20778 pvt_pool->count--;
20779 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20780 return lpfc_ncmd;
20781 }
20782 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20783
20784 return NULL;
20785 }
20786
20787 /**
20788 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
20789 * @phba: pointer to lpfc hba data structure.
20790 *
20791 * This routine tries to get one free IO buf from expedite pool.
20792 *
20793 * Return:
20794 * pointer to one free IO buf - if expedite pool is not empty
20795 * NULL - if expedite pool is empty
20796 **/
20797 static struct lpfc_io_buf *
lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba * phba)20798 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20799 {
20800 struct lpfc_io_buf *lpfc_ncmd;
20801 struct lpfc_io_buf *lpfc_ncmd_next;
20802 unsigned long iflag;
20803 struct lpfc_epd_pool *epd_pool;
20804
20805 epd_pool = &phba->epd_pool;
20806 lpfc_ncmd = NULL;
20807
20808 spin_lock_irqsave(&epd_pool->lock, iflag);
20809 if (epd_pool->count > 0) {
20810 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20811 &epd_pool->list, list) {
20812 list_del(&lpfc_ncmd->list);
20813 epd_pool->count--;
20814 break;
20815 }
20816 }
20817 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20818
20819 return lpfc_ncmd;
20820 }
20821
20822 /**
20823 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
20824 * @phba: pointer to lpfc hba data structure.
20825 * @ndlp: pointer to lpfc nodelist data structure.
20826 * @hwqid: belong to which HWQ
20827 * @expedite: 1 means this request is urgent.
20828 *
20829 * This routine will do the following actions and then return a pointer to
20830 * one free IO buf.
20831 *
20832 * 1. If private free xri count is empty, move some XRIs from public to
20833 * private pool.
20834 * 2. Get one XRI from private free xri pool.
20835 * 3. If we fail to get one from pvt_pool and this is an expedite request,
20836 * get one free xri from expedite pool.
20837 *
20838 * Note: ndlp is only used on SCSI side for RRQ testing.
20839 * The caller should pass NULL for ndlp on NVME side.
20840 *
20841 * Return:
20842 * pointer to one free IO buf - if private pool is not empty
20843 * NULL - if private pool is empty
20844 **/
20845 static struct lpfc_io_buf *
lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,int hwqid,int expedite)20846 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20847 struct lpfc_nodelist *ndlp,
20848 int hwqid, int expedite)
20849 {
20850 struct lpfc_sli4_hdw_queue *qp;
20851 struct lpfc_multixri_pool *multixri_pool;
20852 struct lpfc_pvt_pool *pvt_pool;
20853 struct lpfc_io_buf *lpfc_ncmd;
20854
20855 qp = &phba->sli4_hba.hdwq[hwqid];
20856 lpfc_ncmd = NULL;
20857 multixri_pool = qp->p_multixri_pool;
20858 pvt_pool = &multixri_pool->pvt_pool;
20859 multixri_pool->io_req_count++;
20860
20861 /* If pvt_pool is empty, move some XRIs from public to private pool */
20862 if (pvt_pool->count == 0)
20863 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20864
20865 /* Get one XRI from private free xri pool */
20866 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
20867
20868 if (lpfc_ncmd) {
20869 lpfc_ncmd->hdwq = qp;
20870 lpfc_ncmd->hdwq_no = hwqid;
20871 } else if (expedite) {
20872 /* If we fail to get one from pvt_pool and this is an expedite
20873 * request, get one free xri from expedite pool.
20874 */
20875 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20876 }
20877
20878 return lpfc_ncmd;
20879 }
20880
20881 static inline struct lpfc_io_buf *
lpfc_io_buf(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,int idx)20882 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20883 {
20884 struct lpfc_sli4_hdw_queue *qp;
20885 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20886
20887 qp = &phba->sli4_hba.hdwq[idx];
20888 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20889 &qp->lpfc_io_buf_list_get, list) {
20890 if (lpfc_test_rrq_active(phba, ndlp,
20891 lpfc_cmd->cur_iocbq.sli4_lxritag))
20892 continue;
20893
20894 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20895 continue;
20896
20897 list_del_init(&lpfc_cmd->list);
20898 qp->get_io_bufs--;
20899 lpfc_cmd->hdwq = qp;
20900 lpfc_cmd->hdwq_no = idx;
20901 return lpfc_cmd;
20902 }
20903 return NULL;
20904 }
20905
20906 /**
20907 * lpfc_get_io_buf - Get one IO buffer from free pool
20908 * @phba: The HBA for which this call is being executed.
20909 * @ndlp: pointer to lpfc nodelist data structure.
20910 * @hwqid: belong to which HWQ
20911 * @expedite: 1 means this request is urgent.
20912 *
20913 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
20914 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
20915 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
20916 *
20917 * Note: ndlp is only used on SCSI side for RRQ testing.
20918 * The caller should pass NULL for ndlp on NVME side.
20919 *
20920 * Return codes:
20921 * NULL - Error
20922 * Pointer to lpfc_io_buf - Success
20923 **/
lpfc_get_io_buf(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,u32 hwqid,int expedite)20924 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20925 struct lpfc_nodelist *ndlp,
20926 u32 hwqid, int expedite)
20927 {
20928 struct lpfc_sli4_hdw_queue *qp;
20929 unsigned long iflag;
20930 struct lpfc_io_buf *lpfc_cmd;
20931
20932 qp = &phba->sli4_hba.hdwq[hwqid];
20933 lpfc_cmd = NULL;
20934
20935 if (phba->cfg_xri_rebalancing)
20936 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20937 phba, ndlp, hwqid, expedite);
20938 else {
20939 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20940 qp, alloc_xri_get);
20941 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20942 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20943 if (!lpfc_cmd) {
20944 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20945 qp, alloc_xri_put);
20946 list_splice(&qp->lpfc_io_buf_list_put,
20947 &qp->lpfc_io_buf_list_get);
20948 qp->get_io_bufs += qp->put_io_bufs;
20949 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20950 qp->put_io_bufs = 0;
20951 spin_unlock(&qp->io_buf_list_put_lock);
20952 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20953 expedite)
20954 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20955 }
20956 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
20957 }
20958
20959 return lpfc_cmd;
20960 }
20961
20962 /**
20963 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
20964 * @phba: The HBA for which this call is being executed.
20965 * @lpfc_buf: IO buf structure to append the SGL chunk
20966 *
20967 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
20968 * and will allocate an SGL chunk if the pool is empty.
20969 *
20970 * Return codes:
20971 * NULL - Error
20972 * Pointer to sli4_hybrid_sgl - Success
20973 **/
20974 struct sli4_hybrid_sgl *
lpfc_get_sgl_per_hdwq(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_buf)20975 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20976 {
20977 struct sli4_hybrid_sgl *list_entry = NULL;
20978 struct sli4_hybrid_sgl *tmp = NULL;
20979 struct sli4_hybrid_sgl *allocated_sgl = NULL;
20980 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20981 struct list_head *buf_list = &hdwq->sgl_list;
20982 unsigned long iflags;
20983
20984 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20985
20986 if (likely(!list_empty(buf_list))) {
20987 /* break off 1 chunk from the sgl_list */
20988 list_for_each_entry_safe(list_entry, tmp,
20989 buf_list, list_node) {
20990 list_move_tail(&list_entry->list_node,
20991 &lpfc_buf->dma_sgl_xtra_list);
20992 break;
20993 }
20994 } else {
20995 /* allocate more */
20996 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20997 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
20998 cpu_to_node(hdwq->io_wq->chann));
20999 if (!tmp) {
21000 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21001 "8353 error kmalloc memory for HDWQ "
21002 "%d %s\n",
21003 lpfc_buf->hdwq_no, __func__);
21004 return NULL;
21005 }
21006
21007 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
21008 GFP_ATOMIC, &tmp->dma_phys_sgl);
21009 if (!tmp->dma_sgl) {
21010 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21011 "8354 error pool_alloc memory for HDWQ "
21012 "%d %s\n",
21013 lpfc_buf->hdwq_no, __func__);
21014 kfree(tmp);
21015 return NULL;
21016 }
21017
21018 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21019 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
21020 }
21021
21022 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
21023 struct sli4_hybrid_sgl,
21024 list_node);
21025
21026 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21027
21028 return allocated_sgl;
21029 }
21030
21031 /**
21032 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
21033 * @phba: The HBA for which this call is being executed.
21034 * @lpfc_buf: IO buf structure with the SGL chunk
21035 *
21036 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
21037 *
21038 * Return codes:
21039 * 0 - Success
21040 * -EINVAL - Error
21041 **/
21042 int
lpfc_put_sgl_per_hdwq(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_buf)21043 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21044 {
21045 int rc = 0;
21046 struct sli4_hybrid_sgl *list_entry = NULL;
21047 struct sli4_hybrid_sgl *tmp = NULL;
21048 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21049 struct list_head *buf_list = &hdwq->sgl_list;
21050 unsigned long iflags;
21051
21052 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21053
21054 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
21055 list_for_each_entry_safe(list_entry, tmp,
21056 &lpfc_buf->dma_sgl_xtra_list,
21057 list_node) {
21058 list_move_tail(&list_entry->list_node,
21059 buf_list);
21060 }
21061 } else {
21062 rc = -EINVAL;
21063 }
21064
21065 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21066 return rc;
21067 }
21068
21069 /**
21070 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
21071 * @phba: phba object
21072 * @hdwq: hdwq to cleanup sgl buff resources on
21073 *
21074 * This routine frees all SGL chunks of hdwq SGL chunk pool.
21075 *
21076 * Return codes:
21077 * None
21078 **/
21079 void
lpfc_free_sgl_per_hdwq(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * hdwq)21080 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
21081 struct lpfc_sli4_hdw_queue *hdwq)
21082 {
21083 struct list_head *buf_list = &hdwq->sgl_list;
21084 struct sli4_hybrid_sgl *list_entry = NULL;
21085 struct sli4_hybrid_sgl *tmp = NULL;
21086 unsigned long iflags;
21087
21088 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21089
21090 /* Free sgl pool */
21091 list_for_each_entry_safe(list_entry, tmp,
21092 buf_list, list_node) {
21093 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
21094 list_entry->dma_sgl,
21095 list_entry->dma_phys_sgl);
21096 list_del(&list_entry->list_node);
21097 kfree(list_entry);
21098 }
21099
21100 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21101 }
21102
21103 /**
21104 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
21105 * @phba: The HBA for which this call is being executed.
21106 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
21107 *
21108 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
21109 * and will allocate an CMD/RSP buffer if the pool is empty.
21110 *
21111 * Return codes:
21112 * NULL - Error
21113 * Pointer to fcp_cmd_rsp_buf - Success
21114 **/
21115 struct fcp_cmd_rsp_buf *
lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_buf)21116 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21117 struct lpfc_io_buf *lpfc_buf)
21118 {
21119 struct fcp_cmd_rsp_buf *list_entry = NULL;
21120 struct fcp_cmd_rsp_buf *tmp = NULL;
21121 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
21122 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21123 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21124 unsigned long iflags;
21125
21126 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21127
21128 if (likely(!list_empty(buf_list))) {
21129 /* break off 1 chunk from the list */
21130 list_for_each_entry_safe(list_entry, tmp,
21131 buf_list,
21132 list_node) {
21133 list_move_tail(&list_entry->list_node,
21134 &lpfc_buf->dma_cmd_rsp_list);
21135 break;
21136 }
21137 } else {
21138 /* allocate more */
21139 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21140 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21141 cpu_to_node(hdwq->io_wq->chann));
21142 if (!tmp) {
21143 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21144 "8355 error kmalloc memory for HDWQ "
21145 "%d %s\n",
21146 lpfc_buf->hdwq_no, __func__);
21147 return NULL;
21148 }
21149
21150 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
21151 GFP_ATOMIC,
21152 &tmp->fcp_cmd_rsp_dma_handle);
21153
21154 if (!tmp->fcp_cmnd) {
21155 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21156 "8356 error pool_alloc memory for HDWQ "
21157 "%d %s\n",
21158 lpfc_buf->hdwq_no, __func__);
21159 kfree(tmp);
21160 return NULL;
21161 }
21162
21163 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
21164 sizeof(struct fcp_cmnd));
21165
21166 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21167 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
21168 }
21169
21170 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
21171 struct fcp_cmd_rsp_buf,
21172 list_node);
21173
21174 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21175
21176 return allocated_buf;
21177 }
21178
21179 /**
21180 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
21181 * @phba: The HBA for which this call is being executed.
21182 * @lpfc_buf: IO buf structure with the CMD/RSP buf
21183 *
21184 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
21185 *
21186 * Return codes:
21187 * 0 - Success
21188 * -EINVAL - Error
21189 **/
21190 int
lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_buf)21191 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21192 struct lpfc_io_buf *lpfc_buf)
21193 {
21194 int rc = 0;
21195 struct fcp_cmd_rsp_buf *list_entry = NULL;
21196 struct fcp_cmd_rsp_buf *tmp = NULL;
21197 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21198 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21199 unsigned long iflags;
21200
21201 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21202
21203 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
21204 list_for_each_entry_safe(list_entry, tmp,
21205 &lpfc_buf->dma_cmd_rsp_list,
21206 list_node) {
21207 list_move_tail(&list_entry->list_node,
21208 buf_list);
21209 }
21210 } else {
21211 rc = -EINVAL;
21212 }
21213
21214 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21215 return rc;
21216 }
21217
21218 /**
21219 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
21220 * @phba: phba object
21221 * @hdwq: hdwq to cleanup cmd rsp buff resources on
21222 *
21223 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
21224 *
21225 * Return codes:
21226 * None
21227 **/
21228 void
lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * hdwq)21229 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21230 struct lpfc_sli4_hdw_queue *hdwq)
21231 {
21232 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21233 struct fcp_cmd_rsp_buf *list_entry = NULL;
21234 struct fcp_cmd_rsp_buf *tmp = NULL;
21235 unsigned long iflags;
21236
21237 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21238
21239 /* Free cmd_rsp buf pool */
21240 list_for_each_entry_safe(list_entry, tmp,
21241 buf_list,
21242 list_node) {
21243 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
21244 list_entry->fcp_cmnd,
21245 list_entry->fcp_cmd_rsp_dma_handle);
21246 list_del(&list_entry->list_node);
21247 kfree(list_entry);
21248 }
21249
21250 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21251 }
21252