1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
27
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
33 #include <scsi/fc/fc_fs.h>
34 #include <linux/aer.h>
35
36 #include "lpfc_hw4.h"
37 #include "lpfc_hw.h"
38 #include "lpfc_sli.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_nl.h"
41 #include "lpfc_disc.h"
42 #include "lpfc_scsi.h"
43 #include "lpfc.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_logmsg.h"
46 #include "lpfc_compat.h"
47 #include "lpfc_debugfs.h"
48 #include "lpfc_vport.h"
49
50 /* There are only four IOCB completion types. */
51 typedef enum _lpfc_iocb_type {
52 LPFC_UNKNOWN_IOCB,
53 LPFC_UNSOL_IOCB,
54 LPFC_SOL_IOCB,
55 LPFC_ABORT_IOCB
56 } lpfc_iocb_type;
57
58
59 /* Provide function prototypes local to this module. */
60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint32_t);
62 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
63 uint8_t *, uint32_t *);
64 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65 struct lpfc_iocbq *);
66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67 struct hbq_dmabuf *);
68 static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69 struct lpfc_cqe *);
70 static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
71 int);
72 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
73 uint32_t);
74
75 static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq * iocbq)76 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
77 {
78 return &iocbq->iocb;
79 }
80
81 /**
82 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
83 * @q: The Work Queue to operate on.
84 * @wqe: The work Queue Entry to put on the Work queue.
85 *
86 * This routine will copy the contents of @wqe to the next available entry on
87 * the @q. This function will then ring the Work Queue Doorbell to signal the
88 * HBA to start processing the Work Queue Entry. This function returns 0 if
89 * successful. If no entries are available on @q then this function will return
90 * -ENOMEM.
91 * The caller is expected to hold the hbalock when calling this routine.
92 **/
93 static uint32_t
lpfc_sli4_wq_put(struct lpfc_queue * q,union lpfc_wqe * wqe)94 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
95 {
96 union lpfc_wqe *temp_wqe;
97 struct lpfc_register doorbell;
98 uint32_t host_index;
99 uint32_t idx;
100
101 /* sanity check on queue memory */
102 if (unlikely(!q))
103 return -ENOMEM;
104 temp_wqe = q->qe[q->host_index].wqe;
105
106 /* If the host has not yet processed the next entry then we are done */
107 idx = ((q->host_index + 1) % q->entry_count);
108 if (idx == q->hba_index) {
109 q->WQ_overflow++;
110 return -ENOMEM;
111 }
112 q->WQ_posted++;
113 /* set consumption flag every once in a while */
114 if (!((q->host_index + 1) % q->entry_repost))
115 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
116 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
117 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
118 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
119
120 /* Update the host index before invoking device */
121 host_index = q->host_index;
122
123 q->host_index = idx;
124
125 /* Ring Doorbell */
126 doorbell.word0 = 0;
127 if (q->db_format == LPFC_DB_LIST_FORMAT) {
128 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
129 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
130 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
131 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
132 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
133 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
134 } else {
135 return -EINVAL;
136 }
137 writel(doorbell.word0, q->db_regaddr);
138
139 return 0;
140 }
141
142 /**
143 * lpfc_sli4_wq_release - Updates internal hba index for WQ
144 * @q: The Work Queue to operate on.
145 * @index: The index to advance the hba index to.
146 *
147 * This routine will update the HBA index of a queue to reflect consumption of
148 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
149 * an entry the host calls this function to update the queue's internal
150 * pointers. This routine returns the number of entries that were consumed by
151 * the HBA.
152 **/
153 static uint32_t
lpfc_sli4_wq_release(struct lpfc_queue * q,uint32_t index)154 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
155 {
156 uint32_t released = 0;
157
158 /* sanity check on queue memory */
159 if (unlikely(!q))
160 return 0;
161
162 if (q->hba_index == index)
163 return 0;
164 do {
165 q->hba_index = ((q->hba_index + 1) % q->entry_count);
166 released++;
167 } while (q->hba_index != index);
168 return released;
169 }
170
171 /**
172 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
173 * @q: The Mailbox Queue to operate on.
174 * @wqe: The Mailbox Queue Entry to put on the Work queue.
175 *
176 * This routine will copy the contents of @mqe to the next available entry on
177 * the @q. This function will then ring the Work Queue Doorbell to signal the
178 * HBA to start processing the Work Queue Entry. This function returns 0 if
179 * successful. If no entries are available on @q then this function will return
180 * -ENOMEM.
181 * The caller is expected to hold the hbalock when calling this routine.
182 **/
183 static uint32_t
lpfc_sli4_mq_put(struct lpfc_queue * q,struct lpfc_mqe * mqe)184 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
185 {
186 struct lpfc_mqe *temp_mqe;
187 struct lpfc_register doorbell;
188 uint32_t host_index;
189
190 /* sanity check on queue memory */
191 if (unlikely(!q))
192 return -ENOMEM;
193 temp_mqe = q->qe[q->host_index].mqe;
194
195 /* If the host has not yet processed the next entry then we are done */
196 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
197 return -ENOMEM;
198 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
199 /* Save off the mailbox pointer for completion */
200 q->phba->mbox = (MAILBOX_t *)temp_mqe;
201
202 /* Update the host index before invoking device */
203 host_index = q->host_index;
204 q->host_index = ((q->host_index + 1) % q->entry_count);
205
206 /* Ring Doorbell */
207 doorbell.word0 = 0;
208 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
209 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
210 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
211 return 0;
212 }
213
214 /**
215 * lpfc_sli4_mq_release - Updates internal hba index for MQ
216 * @q: The Mailbox Queue to operate on.
217 *
218 * This routine will update the HBA index of a queue to reflect consumption of
219 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
220 * an entry the host calls this function to update the queue's internal
221 * pointers. This routine returns the number of entries that were consumed by
222 * the HBA.
223 **/
224 static uint32_t
lpfc_sli4_mq_release(struct lpfc_queue * q)225 lpfc_sli4_mq_release(struct lpfc_queue *q)
226 {
227 /* sanity check on queue memory */
228 if (unlikely(!q))
229 return 0;
230
231 /* Clear the mailbox pointer for completion */
232 q->phba->mbox = NULL;
233 q->hba_index = ((q->hba_index + 1) % q->entry_count);
234 return 1;
235 }
236
237 /**
238 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
239 * @q: The Event Queue to get the first valid EQE from
240 *
241 * This routine will get the first valid Event Queue Entry from @q, update
242 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
243 * the Queue (no more work to do), or the Queue is full of EQEs that have been
244 * processed, but not popped back to the HBA then this routine will return NULL.
245 **/
246 static struct lpfc_eqe *
lpfc_sli4_eq_get(struct lpfc_queue * q)247 lpfc_sli4_eq_get(struct lpfc_queue *q)
248 {
249 struct lpfc_eqe *eqe;
250 uint32_t idx;
251
252 /* sanity check on queue memory */
253 if (unlikely(!q))
254 return NULL;
255 eqe = q->qe[q->hba_index].eqe;
256
257 /* If the next EQE is not valid then we are done */
258 if (!bf_get_le32(lpfc_eqe_valid, eqe))
259 return NULL;
260 /* If the host has not yet processed the next entry then we are done */
261 idx = ((q->hba_index + 1) % q->entry_count);
262 if (idx == q->host_index)
263 return NULL;
264
265 q->hba_index = idx;
266 return eqe;
267 }
268
269 /**
270 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
271 * @q: The Event Queue to disable interrupts
272 *
273 **/
274 static inline void
lpfc_sli4_eq_clr_intr(struct lpfc_queue * q)275 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
276 {
277 struct lpfc_register doorbell;
278
279 doorbell.word0 = 0;
280 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
281 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
282 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
283 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
284 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
285 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
286 }
287
288 /**
289 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
290 * @q: The Event Queue that the host has completed processing for.
291 * @arm: Indicates whether the host wants to arms this CQ.
292 *
293 * This routine will mark all Event Queue Entries on @q, from the last
294 * known completed entry to the last entry that was processed, as completed
295 * by clearing the valid bit for each completion queue entry. Then it will
296 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
297 * The internal host index in the @q will be updated by this routine to indicate
298 * that the host has finished processing the entries. The @arm parameter
299 * indicates that the queue should be rearmed when ringing the doorbell.
300 *
301 * This function will return the number of EQEs that were popped.
302 **/
303 uint32_t
lpfc_sli4_eq_release(struct lpfc_queue * q,bool arm)304 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
305 {
306 uint32_t released = 0;
307 struct lpfc_eqe *temp_eqe;
308 struct lpfc_register doorbell;
309
310 /* sanity check on queue memory */
311 if (unlikely(!q))
312 return 0;
313
314 /* while there are valid entries */
315 while (q->hba_index != q->host_index) {
316 temp_eqe = q->qe[q->host_index].eqe;
317 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
318 released++;
319 q->host_index = ((q->host_index + 1) % q->entry_count);
320 }
321 if (unlikely(released == 0 && !arm))
322 return 0;
323
324 /* ring doorbell for number popped */
325 doorbell.word0 = 0;
326 if (arm) {
327 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
328 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
329 }
330 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
331 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
332 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
333 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
334 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
335 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
336 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
337 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
338 readl(q->phba->sli4_hba.EQCQDBregaddr);
339 return released;
340 }
341
342 /**
343 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
344 * @q: The Completion Queue to get the first valid CQE from
345 *
346 * This routine will get the first valid Completion Queue Entry from @q, update
347 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
348 * the Queue (no more work to do), or the Queue is full of CQEs that have been
349 * processed, but not popped back to the HBA then this routine will return NULL.
350 **/
351 static struct lpfc_cqe *
lpfc_sli4_cq_get(struct lpfc_queue * q)352 lpfc_sli4_cq_get(struct lpfc_queue *q)
353 {
354 struct lpfc_cqe *cqe;
355 uint32_t idx;
356
357 /* sanity check on queue memory */
358 if (unlikely(!q))
359 return NULL;
360
361 /* If the next CQE is not valid then we are done */
362 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
363 return NULL;
364 /* If the host has not yet processed the next entry then we are done */
365 idx = ((q->hba_index + 1) % q->entry_count);
366 if (idx == q->host_index)
367 return NULL;
368
369 cqe = q->qe[q->hba_index].cqe;
370 q->hba_index = idx;
371 return cqe;
372 }
373
374 /**
375 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
376 * @q: The Completion Queue that the host has completed processing for.
377 * @arm: Indicates whether the host wants to arms this CQ.
378 *
379 * This routine will mark all Completion queue entries on @q, from the last
380 * known completed entry to the last entry that was processed, as completed
381 * by clearing the valid bit for each completion queue entry. Then it will
382 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
383 * The internal host index in the @q will be updated by this routine to indicate
384 * that the host has finished processing the entries. The @arm parameter
385 * indicates that the queue should be rearmed when ringing the doorbell.
386 *
387 * This function will return the number of CQEs that were released.
388 **/
389 uint32_t
lpfc_sli4_cq_release(struct lpfc_queue * q,bool arm)390 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
391 {
392 uint32_t released = 0;
393 struct lpfc_cqe *temp_qe;
394 struct lpfc_register doorbell;
395
396 /* sanity check on queue memory */
397 if (unlikely(!q))
398 return 0;
399 /* while there are valid entries */
400 while (q->hba_index != q->host_index) {
401 temp_qe = q->qe[q->host_index].cqe;
402 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
403 released++;
404 q->host_index = ((q->host_index + 1) % q->entry_count);
405 }
406 if (unlikely(released == 0 && !arm))
407 return 0;
408
409 /* ring doorbell for number popped */
410 doorbell.word0 = 0;
411 if (arm)
412 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
413 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
414 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
415 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
416 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
417 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
418 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
419 return released;
420 }
421
422 /**
423 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
424 * @q: The Header Receive Queue to operate on.
425 * @wqe: The Receive Queue Entry to put on the Receive queue.
426 *
427 * This routine will copy the contents of @wqe to the next available entry on
428 * the @q. This function will then ring the Receive Queue Doorbell to signal the
429 * HBA to start processing the Receive Queue Entry. This function returns the
430 * index that the rqe was copied to if successful. If no entries are available
431 * on @q then this function will return -ENOMEM.
432 * The caller is expected to hold the hbalock when calling this routine.
433 **/
434 static int
lpfc_sli4_rq_put(struct lpfc_queue * hq,struct lpfc_queue * dq,struct lpfc_rqe * hrqe,struct lpfc_rqe * drqe)435 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
436 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
437 {
438 struct lpfc_rqe *temp_hrqe;
439 struct lpfc_rqe *temp_drqe;
440 struct lpfc_register doorbell;
441 int put_index;
442
443 /* sanity check on queue memory */
444 if (unlikely(!hq) || unlikely(!dq))
445 return -ENOMEM;
446 put_index = hq->host_index;
447 temp_hrqe = hq->qe[hq->host_index].rqe;
448 temp_drqe = dq->qe[dq->host_index].rqe;
449
450 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
451 return -EINVAL;
452 if (hq->host_index != dq->host_index)
453 return -EINVAL;
454 /* If the host has not yet processed the next entry then we are done */
455 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
456 return -EBUSY;
457 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
458 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
459
460 /* Update the host index to point to the next slot */
461 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
462 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
463
464 /* Ring The Header Receive Queue Doorbell */
465 if (!(hq->host_index % hq->entry_repost)) {
466 doorbell.word0 = 0;
467 if (hq->db_format == LPFC_DB_RING_FORMAT) {
468 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
469 hq->entry_repost);
470 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
471 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
472 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
473 hq->entry_repost);
474 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
475 hq->host_index);
476 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
477 } else {
478 return -EINVAL;
479 }
480 writel(doorbell.word0, hq->db_regaddr);
481 }
482 return put_index;
483 }
484
485 /**
486 * lpfc_sli4_rq_release - Updates internal hba index for RQ
487 * @q: The Header Receive Queue to operate on.
488 *
489 * This routine will update the HBA index of a queue to reflect consumption of
490 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
491 * consumed an entry the host calls this function to update the queue's
492 * internal pointers. This routine returns the number of entries that were
493 * consumed by the HBA.
494 **/
495 static uint32_t
lpfc_sli4_rq_release(struct lpfc_queue * hq,struct lpfc_queue * dq)496 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
497 {
498 /* sanity check on queue memory */
499 if (unlikely(!hq) || unlikely(!dq))
500 return 0;
501
502 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
503 return 0;
504 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
505 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
506 return 1;
507 }
508
509 /**
510 * lpfc_cmd_iocb - Get next command iocb entry in the ring
511 * @phba: Pointer to HBA context object.
512 * @pring: Pointer to driver SLI ring object.
513 *
514 * This function returns pointer to next command iocb entry
515 * in the command ring. The caller must hold hbalock to prevent
516 * other threads consume the next command iocb.
517 * SLI-2/SLI-3 provide different sized iocbs.
518 **/
519 static inline IOCB_t *
lpfc_cmd_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)520 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
521 {
522 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
523 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
524 }
525
526 /**
527 * lpfc_resp_iocb - Get next response iocb entry in the ring
528 * @phba: Pointer to HBA context object.
529 * @pring: Pointer to driver SLI ring object.
530 *
531 * This function returns pointer to next response iocb entry
532 * in the response ring. The caller must hold hbalock to make sure
533 * that no other thread consume the next response iocb.
534 * SLI-2/SLI-3 provide different sized iocbs.
535 **/
536 static inline IOCB_t *
lpfc_resp_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)537 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
538 {
539 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
540 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
541 }
542
543 /**
544 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
545 * @phba: Pointer to HBA context object.
546 *
547 * This function is called with hbalock held. This function
548 * allocates a new driver iocb object from the iocb pool. If the
549 * allocation is successful, it returns pointer to the newly
550 * allocated iocb object else it returns NULL.
551 **/
552 struct lpfc_iocbq *
__lpfc_sli_get_iocbq(struct lpfc_hba * phba)553 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
554 {
555 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
556 struct lpfc_iocbq * iocbq = NULL;
557
558 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
559 if (iocbq)
560 phba->iocb_cnt++;
561 if (phba->iocb_cnt > phba->iocb_max)
562 phba->iocb_max = phba->iocb_cnt;
563 return iocbq;
564 }
565
566 /**
567 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
568 * @phba: Pointer to HBA context object.
569 * @xritag: XRI value.
570 *
571 * This function clears the sglq pointer from the array of acive
572 * sglq's. The xritag that is passed in is used to index into the
573 * array. Before the xritag can be used it needs to be adjusted
574 * by subtracting the xribase.
575 *
576 * Returns sglq ponter = success, NULL = Failure.
577 **/
578 static struct lpfc_sglq *
__lpfc_clear_active_sglq(struct lpfc_hba * phba,uint16_t xritag)579 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
580 {
581 struct lpfc_sglq *sglq;
582
583 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
584 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
585 return sglq;
586 }
587
588 /**
589 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
590 * @phba: Pointer to HBA context object.
591 * @xritag: XRI value.
592 *
593 * This function returns the sglq pointer from the array of acive
594 * sglq's. The xritag that is passed in is used to index into the
595 * array. Before the xritag can be used it needs to be adjusted
596 * by subtracting the xribase.
597 *
598 * Returns sglq ponter = success, NULL = Failure.
599 **/
600 struct lpfc_sglq *
__lpfc_get_active_sglq(struct lpfc_hba * phba,uint16_t xritag)601 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
602 {
603 struct lpfc_sglq *sglq;
604
605 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
606 return sglq;
607 }
608
609 /**
610 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
611 * @phba: Pointer to HBA context object.
612 * @xritag: xri used in this exchange.
613 * @rrq: The RRQ to be cleared.
614 *
615 **/
616 void
lpfc_clr_rrq_active(struct lpfc_hba * phba,uint16_t xritag,struct lpfc_node_rrq * rrq)617 lpfc_clr_rrq_active(struct lpfc_hba *phba,
618 uint16_t xritag,
619 struct lpfc_node_rrq *rrq)
620 {
621 struct lpfc_nodelist *ndlp = NULL;
622
623 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
624 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
625
626 /* The target DID could have been swapped (cable swap)
627 * we should use the ndlp from the findnode if it is
628 * available.
629 */
630 if ((!ndlp) && rrq->ndlp)
631 ndlp = rrq->ndlp;
632
633 if (!ndlp)
634 goto out;
635
636 if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
637 rrq->send_rrq = 0;
638 rrq->xritag = 0;
639 rrq->rrq_stop_time = 0;
640 }
641 out:
642 mempool_free(rrq, phba->rrq_pool);
643 }
644
645 /**
646 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
647 * @phba: Pointer to HBA context object.
648 *
649 * This function is called with hbalock held. This function
650 * Checks if stop_time (ratov from setting rrq active) has
651 * been reached, if it has and the send_rrq flag is set then
652 * it will call lpfc_send_rrq. If the send_rrq flag is not set
653 * then it will just call the routine to clear the rrq and
654 * free the rrq resource.
655 * The timer is set to the next rrq that is going to expire before
656 * leaving the routine.
657 *
658 **/
659 void
lpfc_handle_rrq_active(struct lpfc_hba * phba)660 lpfc_handle_rrq_active(struct lpfc_hba *phba)
661 {
662 struct lpfc_node_rrq *rrq;
663 struct lpfc_node_rrq *nextrrq;
664 unsigned long next_time;
665 unsigned long iflags;
666 LIST_HEAD(send_rrq);
667
668 spin_lock_irqsave(&phba->hbalock, iflags);
669 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
670 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
671 list_for_each_entry_safe(rrq, nextrrq,
672 &phba->active_rrq_list, list) {
673 if (time_after(jiffies, rrq->rrq_stop_time))
674 list_move(&rrq->list, &send_rrq);
675 else if (time_before(rrq->rrq_stop_time, next_time))
676 next_time = rrq->rrq_stop_time;
677 }
678 spin_unlock_irqrestore(&phba->hbalock, iflags);
679 if (!list_empty(&phba->active_rrq_list))
680 mod_timer(&phba->rrq_tmr, next_time);
681 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
682 list_del(&rrq->list);
683 if (!rrq->send_rrq)
684 /* this call will free the rrq */
685 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
686 else if (lpfc_send_rrq(phba, rrq)) {
687 /* if we send the rrq then the completion handler
688 * will clear the bit in the xribitmap.
689 */
690 lpfc_clr_rrq_active(phba, rrq->xritag,
691 rrq);
692 }
693 }
694 }
695
696 /**
697 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
698 * @vport: Pointer to vport context object.
699 * @xri: The xri used in the exchange.
700 * @did: The targets DID for this exchange.
701 *
702 * returns NULL = rrq not found in the phba->active_rrq_list.
703 * rrq = rrq for this xri and target.
704 **/
705 struct lpfc_node_rrq *
lpfc_get_active_rrq(struct lpfc_vport * vport,uint16_t xri,uint32_t did)706 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
707 {
708 struct lpfc_hba *phba = vport->phba;
709 struct lpfc_node_rrq *rrq;
710 struct lpfc_node_rrq *nextrrq;
711 unsigned long iflags;
712
713 if (phba->sli_rev != LPFC_SLI_REV4)
714 return NULL;
715 spin_lock_irqsave(&phba->hbalock, iflags);
716 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
717 if (rrq->vport == vport && rrq->xritag == xri &&
718 rrq->nlp_DID == did){
719 list_del(&rrq->list);
720 spin_unlock_irqrestore(&phba->hbalock, iflags);
721 return rrq;
722 }
723 }
724 spin_unlock_irqrestore(&phba->hbalock, iflags);
725 return NULL;
726 }
727
728 /**
729 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
730 * @vport: Pointer to vport context object.
731 * @ndlp: Pointer to the lpfc_node_list structure.
732 * If ndlp is NULL Remove all active RRQs for this vport from the
733 * phba->active_rrq_list and clear the rrq.
734 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
735 **/
736 void
lpfc_cleanup_vports_rrqs(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)737 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
738
739 {
740 struct lpfc_hba *phba = vport->phba;
741 struct lpfc_node_rrq *rrq;
742 struct lpfc_node_rrq *nextrrq;
743 unsigned long iflags;
744 LIST_HEAD(rrq_list);
745
746 if (phba->sli_rev != LPFC_SLI_REV4)
747 return;
748 if (!ndlp) {
749 lpfc_sli4_vport_delete_els_xri_aborted(vport);
750 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
751 }
752 spin_lock_irqsave(&phba->hbalock, iflags);
753 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
754 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
755 list_move(&rrq->list, &rrq_list);
756 spin_unlock_irqrestore(&phba->hbalock, iflags);
757
758 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
759 list_del(&rrq->list);
760 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
761 }
762 }
763
764 /**
765 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
766 * @phba: Pointer to HBA context object.
767 *
768 * Remove all rrqs from the phba->active_rrq_list and free them by
769 * calling __lpfc_clr_active_rrq
770 *
771 **/
772 void
lpfc_cleanup_wt_rrqs(struct lpfc_hba * phba)773 lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
774 {
775 struct lpfc_node_rrq *rrq;
776 struct lpfc_node_rrq *nextrrq;
777 unsigned long next_time;
778 unsigned long iflags;
779 LIST_HEAD(rrq_list);
780
781 if (phba->sli_rev != LPFC_SLI_REV4)
782 return;
783 spin_lock_irqsave(&phba->hbalock, iflags);
784 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
785 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2));
786 list_splice_init(&phba->active_rrq_list, &rrq_list);
787 spin_unlock_irqrestore(&phba->hbalock, iflags);
788
789 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
790 list_del(&rrq->list);
791 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
792 }
793 if (!list_empty(&phba->active_rrq_list))
794 mod_timer(&phba->rrq_tmr, next_time);
795 }
796
797
798 /**
799 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
800 * @phba: Pointer to HBA context object.
801 * @ndlp: Targets nodelist pointer for this exchange.
802 * @xritag the xri in the bitmap to test.
803 *
804 * This function is called with hbalock held. This function
805 * returns 0 = rrq not active for this xri
806 * 1 = rrq is valid for this xri.
807 **/
808 int
lpfc_test_rrq_active(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,uint16_t xritag)809 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
810 uint16_t xritag)
811 {
812 if (!ndlp)
813 return 0;
814 if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
815 return 1;
816 else
817 return 0;
818 }
819
820 /**
821 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
822 * @phba: Pointer to HBA context object.
823 * @ndlp: nodelist pointer for this target.
824 * @xritag: xri used in this exchange.
825 * @rxid: Remote Exchange ID.
826 * @send_rrq: Flag used to determine if we should send rrq els cmd.
827 *
828 * This function takes the hbalock.
829 * The active bit is always set in the active rrq xri_bitmap even
830 * if there is no slot avaiable for the other rrq information.
831 *
832 * returns 0 rrq actived for this xri
833 * < 0 No memory or invalid ndlp.
834 **/
835 int
lpfc_set_rrq_active(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,uint16_t xritag,uint16_t rxid,uint16_t send_rrq)836 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
837 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
838 {
839 unsigned long iflags;
840 struct lpfc_node_rrq *rrq;
841 int empty;
842
843 if (!ndlp)
844 return -EINVAL;
845
846 if (!phba->cfg_enable_rrq)
847 return -EINVAL;
848
849 spin_lock_irqsave(&phba->hbalock, iflags);
850 if (phba->pport->load_flag & FC_UNLOADING) {
851 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
852 goto out;
853 }
854
855 /*
856 * set the active bit even if there is no mem available.
857 */
858 if (NLP_CHK_FREE_REQ(ndlp))
859 goto out;
860
861 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
862 goto out;
863
864 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
865 goto out;
866
867 spin_unlock_irqrestore(&phba->hbalock, iflags);
868 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
869 if (!rrq) {
870 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
871 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
872 " DID:0x%x Send:%d\n",
873 xritag, rxid, ndlp->nlp_DID, send_rrq);
874 return -EINVAL;
875 }
876 if (phba->cfg_enable_rrq == 1)
877 rrq->send_rrq = send_rrq;
878 else
879 rrq->send_rrq = 0;
880 rrq->xritag = xritag;
881 rrq->rrq_stop_time = jiffies +
882 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
883 rrq->ndlp = ndlp;
884 rrq->nlp_DID = ndlp->nlp_DID;
885 rrq->vport = ndlp->vport;
886 rrq->rxid = rxid;
887 spin_lock_irqsave(&phba->hbalock, iflags);
888 empty = list_empty(&phba->active_rrq_list);
889 list_add_tail(&rrq->list, &phba->active_rrq_list);
890 phba->hba_flag |= HBA_RRQ_ACTIVE;
891 if (empty)
892 lpfc_worker_wake_up(phba);
893 spin_unlock_irqrestore(&phba->hbalock, iflags);
894 return 0;
895 out:
896 spin_unlock_irqrestore(&phba->hbalock, iflags);
897 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
898 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
899 " DID:0x%x Send:%d\n",
900 xritag, rxid, ndlp->nlp_DID, send_rrq);
901 return -EINVAL;
902 }
903
904 /**
905 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
906 * @phba: Pointer to HBA context object.
907 * @piocb: Pointer to the iocbq.
908 *
909 * This function is called with hbalock held. This function
910 * gets a new driver sglq object from the sglq list. If the
911 * list is not empty then it is successful, it returns pointer to the newly
912 * allocated sglq object else it returns NULL.
913 **/
914 static struct lpfc_sglq *
__lpfc_sli_get_sglq(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq)915 __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
916 {
917 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
918 struct lpfc_sglq *sglq = NULL;
919 struct lpfc_sglq *start_sglq = NULL;
920 struct lpfc_scsi_buf *lpfc_cmd;
921 struct lpfc_nodelist *ndlp;
922 int found = 0;
923
924 if (piocbq->iocb_flag & LPFC_IO_FCP) {
925 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
926 ndlp = lpfc_cmd->rdata->pnode;
927 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
928 !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
929 ndlp = piocbq->context_un.ndlp;
930 else if (piocbq->iocb_flag & LPFC_IO_LIBDFC)
931 ndlp = piocbq->context_un.ndlp;
932 else
933 ndlp = piocbq->context1;
934
935 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
936 start_sglq = sglq;
937 while (!found) {
938 if (!sglq)
939 return NULL;
940 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) {
941 /* This xri has an rrq outstanding for this DID.
942 * put it back in the list and get another xri.
943 */
944 list_add_tail(&sglq->list, lpfc_sgl_list);
945 sglq = NULL;
946 list_remove_head(lpfc_sgl_list, sglq,
947 struct lpfc_sglq, list);
948 if (sglq == start_sglq) {
949 sglq = NULL;
950 break;
951 } else
952 continue;
953 }
954 sglq->ndlp = ndlp;
955 found = 1;
956 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
957 sglq->state = SGL_ALLOCATED;
958 }
959 return sglq;
960 }
961
962 /**
963 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
964 * @phba: Pointer to HBA context object.
965 *
966 * This function is called with no lock held. This function
967 * allocates a new driver iocb object from the iocb pool. If the
968 * allocation is successful, it returns pointer to the newly
969 * allocated iocb object else it returns NULL.
970 **/
971 struct lpfc_iocbq *
lpfc_sli_get_iocbq(struct lpfc_hba * phba)972 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
973 {
974 struct lpfc_iocbq * iocbq = NULL;
975 unsigned long iflags;
976
977 spin_lock_irqsave(&phba->hbalock, iflags);
978 iocbq = __lpfc_sli_get_iocbq(phba);
979 spin_unlock_irqrestore(&phba->hbalock, iflags);
980 return iocbq;
981 }
982
983 /**
984 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
985 * @phba: Pointer to HBA context object.
986 * @iocbq: Pointer to driver iocb object.
987 *
988 * This function is called with hbalock held to release driver
989 * iocb object to the iocb pool. The iotag in the iocb object
990 * does not change for each use of the iocb object. This function
991 * clears all other fields of the iocb object when it is freed.
992 * The sqlq structure that holds the xritag and phys and virtual
993 * mappings for the scatter gather list is retrieved from the
994 * active array of sglq. The get of the sglq pointer also clears
995 * the entry in the array. If the status of the IO indiactes that
996 * this IO was aborted then the sglq entry it put on the
997 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
998 * IO has good status or fails for any other reason then the sglq
999 * entry is added to the free list (lpfc_sgl_list).
1000 **/
1001 static void
__lpfc_sli_release_iocbq_s4(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1002 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1003 {
1004 struct lpfc_sglq *sglq;
1005 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1006 unsigned long iflag = 0;
1007 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
1008
1009 if (iocbq->sli4_xritag == NO_XRI)
1010 sglq = NULL;
1011 else
1012 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1013
1014 /*
1015 ** This should have been removed from the txcmplq before calling
1016 ** iocbq_release. The normal completion
1017 ** path should have already done the list_del_init.
1018 */
1019 if (unlikely(!list_empty(&iocbq->list))) {
1020 if (iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)
1021 iocbq->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
1022 list_del_init(&iocbq->list);
1023 }
1024
1025
1026 if (sglq) {
1027 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1028 (sglq->state != SGL_XRI_ABORTED)) {
1029 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
1030 iflag);
1031 list_add(&sglq->list,
1032 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1033 spin_unlock_irqrestore(
1034 &phba->sli4_hba.abts_sgl_list_lock, iflag);
1035 } else {
1036 sglq->state = SGL_FREED;
1037 sglq->ndlp = NULL;
1038 list_add_tail(&sglq->list,
1039 &phba->sli4_hba.lpfc_sgl_list);
1040
1041 /* Check if TXQ queue needs to be serviced */
1042 if (!list_empty(&pring->txq))
1043 lpfc_worker_wake_up(phba);
1044 }
1045 }
1046
1047
1048 /*
1049 * Clean all volatile data fields, preserve iotag and node struct.
1050 */
1051 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1052 iocbq->sli4_lxritag = NO_XRI;
1053 iocbq->sli4_xritag = NO_XRI;
1054 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1055 }
1056
1057
1058 /**
1059 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1060 * @phba: Pointer to HBA context object.
1061 * @iocbq: Pointer to driver iocb object.
1062 *
1063 * This function is called with hbalock held to release driver
1064 * iocb object to the iocb pool. The iotag in the iocb object
1065 * does not change for each use of the iocb object. This function
1066 * clears all other fields of the iocb object when it is freed.
1067 **/
1068 static void
__lpfc_sli_release_iocbq_s3(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1069 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1070 {
1071 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1072
1073 /*
1074 ** This should have been removed from the txcmplq before calling
1075 ** iocbq_release. The normal completion
1076 ** path should have already done the list_del_init.
1077 */
1078 if (unlikely(!list_empty(&iocbq->list)))
1079 list_del_init(&iocbq->list);
1080
1081 /*
1082 * Clean all volatile data fields, preserve iotag and node struct.
1083 */
1084 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1085 iocbq->sli4_xritag = NO_XRI;
1086 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1087 }
1088
1089 /**
1090 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1091 * @phba: Pointer to HBA context object.
1092 * @iocbq: Pointer to driver iocb object.
1093 *
1094 * This function is called with hbalock held to release driver
1095 * iocb object to the iocb pool. The iotag in the iocb object
1096 * does not change for each use of the iocb object. This function
1097 * clears all other fields of the iocb object when it is freed.
1098 **/
1099 static void
__lpfc_sli_release_iocbq(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1100 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1101 {
1102 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1103 phba->iocb_cnt--;
1104 }
1105
1106 /**
1107 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1108 * @phba: Pointer to HBA context object.
1109 * @iocbq: Pointer to driver iocb object.
1110 *
1111 * This function is called with no lock held to release the iocb to
1112 * iocb pool.
1113 **/
1114 void
lpfc_sli_release_iocbq(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1115 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1116 {
1117 unsigned long iflags;
1118
1119 /*
1120 * Clean all volatile data fields, preserve iotag and node struct.
1121 */
1122 spin_lock_irqsave(&phba->hbalock, iflags);
1123 __lpfc_sli_release_iocbq(phba, iocbq);
1124 spin_unlock_irqrestore(&phba->hbalock, iflags);
1125 }
1126
1127 /**
1128 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1129 * @phba: Pointer to HBA context object.
1130 * @iocblist: List of IOCBs.
1131 * @ulpstatus: ULP status in IOCB command field.
1132 * @ulpWord4: ULP word-4 in IOCB command field.
1133 *
1134 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1135 * on the list by invoking the complete callback function associated with the
1136 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1137 * fields.
1138 **/
1139 void
lpfc_sli_cancel_iocbs(struct lpfc_hba * phba,struct list_head * iocblist,uint32_t ulpstatus,uint32_t ulpWord4)1140 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1141 uint32_t ulpstatus, uint32_t ulpWord4)
1142 {
1143 struct lpfc_iocbq *piocb;
1144
1145 while (!list_empty(iocblist)) {
1146 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1147 if (!piocb->iocb_cmpl)
1148 lpfc_sli_release_iocbq(phba, piocb);
1149 else {
1150 piocb->iocb.ulpStatus = ulpstatus;
1151 piocb->iocb.un.ulpWord[4] = ulpWord4;
1152 (piocb->iocb_cmpl) (phba, piocb, piocb);
1153 }
1154 }
1155 return;
1156 }
1157
1158 /**
1159 * lpfc_sli_iocb_cmd_type - Get the iocb type
1160 * @iocb_cmnd: iocb command code.
1161 *
1162 * This function is called by ring event handler function to get the iocb type.
1163 * This function translates the iocb command to an iocb command type used to
1164 * decide the final disposition of each completed IOCB.
1165 * The function returns
1166 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1167 * LPFC_SOL_IOCB if it is a solicited iocb completion
1168 * LPFC_ABORT_IOCB if it is an abort iocb
1169 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1170 *
1171 * The caller is not required to hold any lock.
1172 **/
1173 static lpfc_iocb_type
lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)1174 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1175 {
1176 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1177
1178 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1179 return 0;
1180
1181 switch (iocb_cmnd) {
1182 case CMD_XMIT_SEQUENCE_CR:
1183 case CMD_XMIT_SEQUENCE_CX:
1184 case CMD_XMIT_BCAST_CN:
1185 case CMD_XMIT_BCAST_CX:
1186 case CMD_ELS_REQUEST_CR:
1187 case CMD_ELS_REQUEST_CX:
1188 case CMD_CREATE_XRI_CR:
1189 case CMD_CREATE_XRI_CX:
1190 case CMD_GET_RPI_CN:
1191 case CMD_XMIT_ELS_RSP_CX:
1192 case CMD_GET_RPI_CR:
1193 case CMD_FCP_IWRITE_CR:
1194 case CMD_FCP_IWRITE_CX:
1195 case CMD_FCP_IREAD_CR:
1196 case CMD_FCP_IREAD_CX:
1197 case CMD_FCP_ICMND_CR:
1198 case CMD_FCP_ICMND_CX:
1199 case CMD_FCP_TSEND_CX:
1200 case CMD_FCP_TRSP_CX:
1201 case CMD_FCP_TRECEIVE_CX:
1202 case CMD_FCP_AUTO_TRSP_CX:
1203 case CMD_ADAPTER_MSG:
1204 case CMD_ADAPTER_DUMP:
1205 case CMD_XMIT_SEQUENCE64_CR:
1206 case CMD_XMIT_SEQUENCE64_CX:
1207 case CMD_XMIT_BCAST64_CN:
1208 case CMD_XMIT_BCAST64_CX:
1209 case CMD_ELS_REQUEST64_CR:
1210 case CMD_ELS_REQUEST64_CX:
1211 case CMD_FCP_IWRITE64_CR:
1212 case CMD_FCP_IWRITE64_CX:
1213 case CMD_FCP_IREAD64_CR:
1214 case CMD_FCP_IREAD64_CX:
1215 case CMD_FCP_ICMND64_CR:
1216 case CMD_FCP_ICMND64_CX:
1217 case CMD_FCP_TSEND64_CX:
1218 case CMD_FCP_TRSP64_CX:
1219 case CMD_FCP_TRECEIVE64_CX:
1220 case CMD_GEN_REQUEST64_CR:
1221 case CMD_GEN_REQUEST64_CX:
1222 case CMD_XMIT_ELS_RSP64_CX:
1223 case DSSCMD_IWRITE64_CR:
1224 case DSSCMD_IWRITE64_CX:
1225 case DSSCMD_IREAD64_CR:
1226 case DSSCMD_IREAD64_CX:
1227 type = LPFC_SOL_IOCB;
1228 break;
1229 case CMD_ABORT_XRI_CN:
1230 case CMD_ABORT_XRI_CX:
1231 case CMD_CLOSE_XRI_CN:
1232 case CMD_CLOSE_XRI_CX:
1233 case CMD_XRI_ABORTED_CX:
1234 case CMD_ABORT_MXRI64_CN:
1235 case CMD_XMIT_BLS_RSP64_CX:
1236 type = LPFC_ABORT_IOCB;
1237 break;
1238 case CMD_RCV_SEQUENCE_CX:
1239 case CMD_RCV_ELS_REQ_CX:
1240 case CMD_RCV_SEQUENCE64_CX:
1241 case CMD_RCV_ELS_REQ64_CX:
1242 case CMD_ASYNC_STATUS:
1243 case CMD_IOCB_RCV_SEQ64_CX:
1244 case CMD_IOCB_RCV_ELS64_CX:
1245 case CMD_IOCB_RCV_CONT64_CX:
1246 case CMD_IOCB_RET_XRI64_CX:
1247 type = LPFC_UNSOL_IOCB;
1248 break;
1249 case CMD_IOCB_XMIT_MSEQ64_CR:
1250 case CMD_IOCB_XMIT_MSEQ64_CX:
1251 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1252 case CMD_IOCB_RCV_ELS_LIST64_CX:
1253 case CMD_IOCB_CLOSE_EXTENDED_CN:
1254 case CMD_IOCB_ABORT_EXTENDED_CN:
1255 case CMD_IOCB_RET_HBQE64_CN:
1256 case CMD_IOCB_FCP_IBIDIR64_CR:
1257 case CMD_IOCB_FCP_IBIDIR64_CX:
1258 case CMD_IOCB_FCP_ITASKMGT64_CX:
1259 case CMD_IOCB_LOGENTRY_CN:
1260 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1261 printk("%s - Unhandled SLI-3 Command x%x\n",
1262 __func__, iocb_cmnd);
1263 type = LPFC_UNKNOWN_IOCB;
1264 break;
1265 default:
1266 type = LPFC_UNKNOWN_IOCB;
1267 break;
1268 }
1269
1270 return type;
1271 }
1272
1273 /**
1274 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1275 * @phba: Pointer to HBA context object.
1276 *
1277 * This function is called from SLI initialization code
1278 * to configure every ring of the HBA's SLI interface. The
1279 * caller is not required to hold any lock. This function issues
1280 * a config_ring mailbox command for each ring.
1281 * This function returns zero if successful else returns a negative
1282 * error code.
1283 **/
1284 static int
lpfc_sli_ring_map(struct lpfc_hba * phba)1285 lpfc_sli_ring_map(struct lpfc_hba *phba)
1286 {
1287 struct lpfc_sli *psli = &phba->sli;
1288 LPFC_MBOXQ_t *pmb;
1289 MAILBOX_t *pmbox;
1290 int i, rc, ret = 0;
1291
1292 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1293 if (!pmb)
1294 return -ENOMEM;
1295 pmbox = &pmb->u.mb;
1296 phba->link_state = LPFC_INIT_MBX_CMDS;
1297 for (i = 0; i < psli->num_rings; i++) {
1298 lpfc_config_ring(phba, i, pmb);
1299 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1300 if (rc != MBX_SUCCESS) {
1301 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1302 "0446 Adapter failed to init (%d), "
1303 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1304 "ring %d\n",
1305 rc, pmbox->mbxCommand,
1306 pmbox->mbxStatus, i);
1307 phba->link_state = LPFC_HBA_ERROR;
1308 ret = -ENXIO;
1309 break;
1310 }
1311 }
1312 mempool_free(pmb, phba->mbox_mem_pool);
1313 return ret;
1314 }
1315
1316 /**
1317 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1318 * @phba: Pointer to HBA context object.
1319 * @pring: Pointer to driver SLI ring object.
1320 * @piocb: Pointer to the driver iocb object.
1321 *
1322 * This function is called with hbalock held. The function adds the
1323 * new iocb to txcmplq of the given ring. This function always returns
1324 * 0. If this function is called for ELS ring, this function checks if
1325 * there is a vport associated with the ELS command. This function also
1326 * starts els_tmofunc timer if this is an ELS command.
1327 **/
1328 static int
lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * piocb)1329 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1330 struct lpfc_iocbq *piocb)
1331 {
1332 list_add_tail(&piocb->list, &pring->txcmplq);
1333 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1334
1335 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1336 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1337 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1338 if (!piocb->vport)
1339 BUG();
1340 else
1341 mod_timer(&piocb->vport->els_tmofunc,
1342 jiffies +
1343 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1344 }
1345
1346
1347 return 0;
1348 }
1349
1350 /**
1351 * lpfc_sli_ringtx_get - Get first element of the txq
1352 * @phba: Pointer to HBA context object.
1353 * @pring: Pointer to driver SLI ring object.
1354 *
1355 * This function is called with hbalock held to get next
1356 * iocb in txq of the given ring. If there is any iocb in
1357 * the txq, the function returns first iocb in the list after
1358 * removing the iocb from the list, else it returns NULL.
1359 **/
1360 struct lpfc_iocbq *
lpfc_sli_ringtx_get(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)1361 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1362 {
1363 struct lpfc_iocbq *cmd_iocb;
1364
1365 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1366 return cmd_iocb;
1367 }
1368
1369 /**
1370 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1371 * @phba: Pointer to HBA context object.
1372 * @pring: Pointer to driver SLI ring object.
1373 *
1374 * This function is called with hbalock held and the caller must post the
1375 * iocb without releasing the lock. If the caller releases the lock,
1376 * iocb slot returned by the function is not guaranteed to be available.
1377 * The function returns pointer to the next available iocb slot if there
1378 * is available slot in the ring, else it returns NULL.
1379 * If the get index of the ring is ahead of the put index, the function
1380 * will post an error attention event to the worker thread to take the
1381 * HBA to offline state.
1382 **/
1383 static IOCB_t *
lpfc_sli_next_iocb_slot(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)1384 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1385 {
1386 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1387 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1388 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1389 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1390 pring->sli.sli3.next_cmdidx = 0;
1391
1392 if (unlikely(pring->sli.sli3.local_getidx ==
1393 pring->sli.sli3.next_cmdidx)) {
1394
1395 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1396
1397 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1398 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1399 "0315 Ring %d issue: portCmdGet %d "
1400 "is bigger than cmd ring %d\n",
1401 pring->ringno,
1402 pring->sli.sli3.local_getidx,
1403 max_cmd_idx);
1404
1405 phba->link_state = LPFC_HBA_ERROR;
1406 /*
1407 * All error attention handlers are posted to
1408 * worker thread
1409 */
1410 phba->work_ha |= HA_ERATT;
1411 phba->work_hs = HS_FFER3;
1412
1413 lpfc_worker_wake_up(phba);
1414
1415 return NULL;
1416 }
1417
1418 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1419 return NULL;
1420 }
1421
1422 return lpfc_cmd_iocb(phba, pring);
1423 }
1424
1425 /**
1426 * lpfc_sli_next_iotag - Get an iotag for the iocb
1427 * @phba: Pointer to HBA context object.
1428 * @iocbq: Pointer to driver iocb object.
1429 *
1430 * This function gets an iotag for the iocb. If there is no unused iotag and
1431 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1432 * array and assigns a new iotag.
1433 * The function returns the allocated iotag if successful, else returns zero.
1434 * Zero is not a valid iotag.
1435 * The caller is not required to hold any lock.
1436 **/
1437 uint16_t
lpfc_sli_next_iotag(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1438 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1439 {
1440 struct lpfc_iocbq **new_arr;
1441 struct lpfc_iocbq **old_arr;
1442 size_t new_len;
1443 struct lpfc_sli *psli = &phba->sli;
1444 uint16_t iotag;
1445
1446 spin_lock_irq(&phba->hbalock);
1447 iotag = psli->last_iotag;
1448 if(++iotag < psli->iocbq_lookup_len) {
1449 psli->last_iotag = iotag;
1450 psli->iocbq_lookup[iotag] = iocbq;
1451 spin_unlock_irq(&phba->hbalock);
1452 iocbq->iotag = iotag;
1453 return iotag;
1454 } else if (psli->iocbq_lookup_len < (0xffff
1455 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1456 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1457 spin_unlock_irq(&phba->hbalock);
1458 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1459 GFP_KERNEL);
1460 if (new_arr) {
1461 spin_lock_irq(&phba->hbalock);
1462 old_arr = psli->iocbq_lookup;
1463 if (new_len <= psli->iocbq_lookup_len) {
1464 /* highly unprobable case */
1465 kfree(new_arr);
1466 iotag = psli->last_iotag;
1467 if(++iotag < psli->iocbq_lookup_len) {
1468 psli->last_iotag = iotag;
1469 psli->iocbq_lookup[iotag] = iocbq;
1470 spin_unlock_irq(&phba->hbalock);
1471 iocbq->iotag = iotag;
1472 return iotag;
1473 }
1474 spin_unlock_irq(&phba->hbalock);
1475 return 0;
1476 }
1477 if (psli->iocbq_lookup)
1478 memcpy(new_arr, old_arr,
1479 ((psli->last_iotag + 1) *
1480 sizeof (struct lpfc_iocbq *)));
1481 psli->iocbq_lookup = new_arr;
1482 psli->iocbq_lookup_len = new_len;
1483 psli->last_iotag = iotag;
1484 psli->iocbq_lookup[iotag] = iocbq;
1485 spin_unlock_irq(&phba->hbalock);
1486 iocbq->iotag = iotag;
1487 kfree(old_arr);
1488 return iotag;
1489 }
1490 } else
1491 spin_unlock_irq(&phba->hbalock);
1492
1493 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1494 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1495 psli->last_iotag);
1496
1497 return 0;
1498 }
1499
1500 /**
1501 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1502 * @phba: Pointer to HBA context object.
1503 * @pring: Pointer to driver SLI ring object.
1504 * @iocb: Pointer to iocb slot in the ring.
1505 * @nextiocb: Pointer to driver iocb object which need to be
1506 * posted to firmware.
1507 *
1508 * This function is called with hbalock held to post a new iocb to
1509 * the firmware. This function copies the new iocb to ring iocb slot and
1510 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1511 * a completion call back for this iocb else the function will free the
1512 * iocb object.
1513 **/
1514 static void
lpfc_sli_submit_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,IOCB_t * iocb,struct lpfc_iocbq * nextiocb)1515 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1516 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1517 {
1518 /*
1519 * Set up an iotag
1520 */
1521 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1522
1523
1524 if (pring->ringno == LPFC_ELS_RING) {
1525 lpfc_debugfs_slow_ring_trc(phba,
1526 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1527 *(((uint32_t *) &nextiocb->iocb) + 4),
1528 *(((uint32_t *) &nextiocb->iocb) + 6),
1529 *(((uint32_t *) &nextiocb->iocb) + 7));
1530 }
1531
1532 /*
1533 * Issue iocb command to adapter
1534 */
1535 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1536 wmb();
1537 pring->stats.iocb_cmd++;
1538
1539 /*
1540 * If there is no completion routine to call, we can release the
1541 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1542 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1543 */
1544 if (nextiocb->iocb_cmpl)
1545 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1546 else
1547 __lpfc_sli_release_iocbq(phba, nextiocb);
1548
1549 /*
1550 * Let the HBA know what IOCB slot will be the next one the
1551 * driver will put a command into.
1552 */
1553 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1554 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1555 }
1556
1557 /**
1558 * lpfc_sli_update_full_ring - Update the chip attention register
1559 * @phba: Pointer to HBA context object.
1560 * @pring: Pointer to driver SLI ring object.
1561 *
1562 * The caller is not required to hold any lock for calling this function.
1563 * This function updates the chip attention bits for the ring to inform firmware
1564 * that there are pending work to be done for this ring and requests an
1565 * interrupt when there is space available in the ring. This function is
1566 * called when the driver is unable to post more iocbs to the ring due
1567 * to unavailability of space in the ring.
1568 **/
1569 static void
lpfc_sli_update_full_ring(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)1570 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1571 {
1572 int ringno = pring->ringno;
1573
1574 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1575
1576 wmb();
1577
1578 /*
1579 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1580 * The HBA will tell us when an IOCB entry is available.
1581 */
1582 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1583 readl(phba->CAregaddr); /* flush */
1584
1585 pring->stats.iocb_cmd_full++;
1586 }
1587
1588 /**
1589 * lpfc_sli_update_ring - Update chip attention register
1590 * @phba: Pointer to HBA context object.
1591 * @pring: Pointer to driver SLI ring object.
1592 *
1593 * This function updates the chip attention register bit for the
1594 * given ring to inform HBA that there is more work to be done
1595 * in this ring. The caller is not required to hold any lock.
1596 **/
1597 static void
lpfc_sli_update_ring(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)1598 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1599 {
1600 int ringno = pring->ringno;
1601
1602 /*
1603 * Tell the HBA that there is work to do in this ring.
1604 */
1605 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1606 wmb();
1607 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1608 readl(phba->CAregaddr); /* flush */
1609 }
1610 }
1611
1612 /**
1613 * lpfc_sli_resume_iocb - Process iocbs in the txq
1614 * @phba: Pointer to HBA context object.
1615 * @pring: Pointer to driver SLI ring object.
1616 *
1617 * This function is called with hbalock held to post pending iocbs
1618 * in the txq to the firmware. This function is called when driver
1619 * detects space available in the ring.
1620 **/
1621 static void
lpfc_sli_resume_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)1622 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1623 {
1624 IOCB_t *iocb;
1625 struct lpfc_iocbq *nextiocb;
1626
1627 /*
1628 * Check to see if:
1629 * (a) there is anything on the txq to send
1630 * (b) link is up
1631 * (c) link attention events can be processed (fcp ring only)
1632 * (d) IOCB processing is not blocked by the outstanding mbox command.
1633 */
1634
1635 if (lpfc_is_link_up(phba) &&
1636 (!list_empty(&pring->txq)) &&
1637 (pring->ringno != phba->sli.fcp_ring ||
1638 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1639
1640 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1641 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1642 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1643
1644 if (iocb)
1645 lpfc_sli_update_ring(phba, pring);
1646 else
1647 lpfc_sli_update_full_ring(phba, pring);
1648 }
1649
1650 return;
1651 }
1652
1653 /**
1654 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1655 * @phba: Pointer to HBA context object.
1656 * @hbqno: HBQ number.
1657 *
1658 * This function is called with hbalock held to get the next
1659 * available slot for the given HBQ. If there is free slot
1660 * available for the HBQ it will return pointer to the next available
1661 * HBQ entry else it will return NULL.
1662 **/
1663 static struct lpfc_hbq_entry *
lpfc_sli_next_hbq_slot(struct lpfc_hba * phba,uint32_t hbqno)1664 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1665 {
1666 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1667
1668 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1669 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1670 hbqp->next_hbqPutIdx = 0;
1671
1672 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1673 uint32_t raw_index = phba->hbq_get[hbqno];
1674 uint32_t getidx = le32_to_cpu(raw_index);
1675
1676 hbqp->local_hbqGetIdx = getidx;
1677
1678 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1679 lpfc_printf_log(phba, KERN_ERR,
1680 LOG_SLI | LOG_VPORT,
1681 "1802 HBQ %d: local_hbqGetIdx "
1682 "%u is > than hbqp->entry_count %u\n",
1683 hbqno, hbqp->local_hbqGetIdx,
1684 hbqp->entry_count);
1685
1686 phba->link_state = LPFC_HBA_ERROR;
1687 return NULL;
1688 }
1689
1690 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1691 return NULL;
1692 }
1693
1694 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1695 hbqp->hbqPutIdx;
1696 }
1697
1698 /**
1699 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1700 * @phba: Pointer to HBA context object.
1701 *
1702 * This function is called with no lock held to free all the
1703 * hbq buffers while uninitializing the SLI interface. It also
1704 * frees the HBQ buffers returned by the firmware but not yet
1705 * processed by the upper layers.
1706 **/
1707 void
lpfc_sli_hbqbuf_free_all(struct lpfc_hba * phba)1708 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1709 {
1710 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1711 struct hbq_dmabuf *hbq_buf;
1712 unsigned long flags;
1713 int i, hbq_count;
1714 uint32_t hbqno;
1715
1716 hbq_count = lpfc_sli_hbq_count();
1717 /* Return all memory used by all HBQs */
1718 spin_lock_irqsave(&phba->hbalock, flags);
1719 for (i = 0; i < hbq_count; ++i) {
1720 list_for_each_entry_safe(dmabuf, next_dmabuf,
1721 &phba->hbqs[i].hbq_buffer_list, list) {
1722 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1723 list_del(&hbq_buf->dbuf.list);
1724 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1725 }
1726 phba->hbqs[i].buffer_count = 0;
1727 }
1728 /* Return all HBQ buffer that are in-fly */
1729 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1730 list) {
1731 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1732 list_del(&hbq_buf->dbuf.list);
1733 if (hbq_buf->tag == -1) {
1734 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1735 (phba, hbq_buf);
1736 } else {
1737 hbqno = hbq_buf->tag >> 16;
1738 if (hbqno >= LPFC_MAX_HBQS)
1739 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1740 (phba, hbq_buf);
1741 else
1742 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1743 hbq_buf);
1744 }
1745 }
1746
1747 /* Mark the HBQs not in use */
1748 phba->hbq_in_use = 0;
1749 spin_unlock_irqrestore(&phba->hbalock, flags);
1750 }
1751
1752 /**
1753 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1754 * @phba: Pointer to HBA context object.
1755 * @hbqno: HBQ number.
1756 * @hbq_buf: Pointer to HBQ buffer.
1757 *
1758 * This function is called with the hbalock held to post a
1759 * hbq buffer to the firmware. If the function finds an empty
1760 * slot in the HBQ, it will post the buffer. The function will return
1761 * pointer to the hbq entry if it successfully post the buffer
1762 * else it will return NULL.
1763 **/
1764 static int
lpfc_sli_hbq_to_firmware(struct lpfc_hba * phba,uint32_t hbqno,struct hbq_dmabuf * hbq_buf)1765 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1766 struct hbq_dmabuf *hbq_buf)
1767 {
1768 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1769 }
1770
1771 /**
1772 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1773 * @phba: Pointer to HBA context object.
1774 * @hbqno: HBQ number.
1775 * @hbq_buf: Pointer to HBQ buffer.
1776 *
1777 * This function is called with the hbalock held to post a hbq buffer to the
1778 * firmware. If the function finds an empty slot in the HBQ, it will post the
1779 * buffer and place it on the hbq_buffer_list. The function will return zero if
1780 * it successfully post the buffer else it will return an error.
1781 **/
1782 static int
lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba * phba,uint32_t hbqno,struct hbq_dmabuf * hbq_buf)1783 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1784 struct hbq_dmabuf *hbq_buf)
1785 {
1786 struct lpfc_hbq_entry *hbqe;
1787 dma_addr_t physaddr = hbq_buf->dbuf.phys;
1788
1789 /* Get next HBQ entry slot to use */
1790 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1791 if (hbqe) {
1792 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1793
1794 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1795 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
1796 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
1797 hbqe->bde.tus.f.bdeFlags = 0;
1798 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1799 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1800 /* Sync SLIM */
1801 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1802 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1803 /* flush */
1804 readl(phba->hbq_put + hbqno);
1805 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1806 return 0;
1807 } else
1808 return -ENOMEM;
1809 }
1810
1811 /**
1812 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1813 * @phba: Pointer to HBA context object.
1814 * @hbqno: HBQ number.
1815 * @hbq_buf: Pointer to HBQ buffer.
1816 *
1817 * This function is called with the hbalock held to post an RQE to the SLI4
1818 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1819 * the hbq_buffer_list and return zero, otherwise it will return an error.
1820 **/
1821 static int
lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba * phba,uint32_t hbqno,struct hbq_dmabuf * hbq_buf)1822 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1823 struct hbq_dmabuf *hbq_buf)
1824 {
1825 int rc;
1826 struct lpfc_rqe hrqe;
1827 struct lpfc_rqe drqe;
1828
1829 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1830 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1831 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1832 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1833 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1834 &hrqe, &drqe);
1835 if (rc < 0)
1836 return rc;
1837 hbq_buf->tag = rc;
1838 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1839 return 0;
1840 }
1841
1842 /* HBQ for ELS and CT traffic. */
1843 static struct lpfc_hbq_init lpfc_els_hbq = {
1844 .rn = 1,
1845 .entry_count = 256,
1846 .mask_count = 0,
1847 .profile = 0,
1848 .ring_mask = (1 << LPFC_ELS_RING),
1849 .buffer_count = 0,
1850 .init_count = 40,
1851 .add_count = 40,
1852 };
1853
1854 /* HBQ for the extra ring if needed */
1855 static struct lpfc_hbq_init lpfc_extra_hbq = {
1856 .rn = 1,
1857 .entry_count = 200,
1858 .mask_count = 0,
1859 .profile = 0,
1860 .ring_mask = (1 << LPFC_EXTRA_RING),
1861 .buffer_count = 0,
1862 .init_count = 0,
1863 .add_count = 5,
1864 };
1865
1866 /* Array of HBQs */
1867 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1868 &lpfc_els_hbq,
1869 &lpfc_extra_hbq,
1870 };
1871
1872 /**
1873 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1874 * @phba: Pointer to HBA context object.
1875 * @hbqno: HBQ number.
1876 * @count: Number of HBQ buffers to be posted.
1877 *
1878 * This function is called with no lock held to post more hbq buffers to the
1879 * given HBQ. The function returns the number of HBQ buffers successfully
1880 * posted.
1881 **/
1882 static int
lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba * phba,uint32_t hbqno,uint32_t count)1883 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1884 {
1885 uint32_t i, posted = 0;
1886 unsigned long flags;
1887 struct hbq_dmabuf *hbq_buffer;
1888 LIST_HEAD(hbq_buf_list);
1889 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1890 return 0;
1891
1892 if ((phba->hbqs[hbqno].buffer_count + count) >
1893 lpfc_hbq_defs[hbqno]->entry_count)
1894 count = lpfc_hbq_defs[hbqno]->entry_count -
1895 phba->hbqs[hbqno].buffer_count;
1896 if (!count)
1897 return 0;
1898 /* Allocate HBQ entries */
1899 for (i = 0; i < count; i++) {
1900 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1901 if (!hbq_buffer)
1902 break;
1903 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1904 }
1905 /* Check whether HBQ is still in use */
1906 spin_lock_irqsave(&phba->hbalock, flags);
1907 if (!phba->hbq_in_use)
1908 goto err;
1909 while (!list_empty(&hbq_buf_list)) {
1910 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1911 dbuf.list);
1912 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1913 (hbqno << 16));
1914 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1915 phba->hbqs[hbqno].buffer_count++;
1916 posted++;
1917 } else
1918 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1919 }
1920 spin_unlock_irqrestore(&phba->hbalock, flags);
1921 return posted;
1922 err:
1923 spin_unlock_irqrestore(&phba->hbalock, flags);
1924 while (!list_empty(&hbq_buf_list)) {
1925 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1926 dbuf.list);
1927 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1928 }
1929 return 0;
1930 }
1931
1932 /**
1933 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1934 * @phba: Pointer to HBA context object.
1935 * @qno: HBQ number.
1936 *
1937 * This function posts more buffers to the HBQ. This function
1938 * is called with no lock held. The function returns the number of HBQ entries
1939 * successfully allocated.
1940 **/
1941 int
lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba * phba,uint32_t qno)1942 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1943 {
1944 if (phba->sli_rev == LPFC_SLI_REV4)
1945 return 0;
1946 else
1947 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1948 lpfc_hbq_defs[qno]->add_count);
1949 }
1950
1951 /**
1952 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1953 * @phba: Pointer to HBA context object.
1954 * @qno: HBQ queue number.
1955 *
1956 * This function is called from SLI initialization code path with
1957 * no lock held to post initial HBQ buffers to firmware. The
1958 * function returns the number of HBQ entries successfully allocated.
1959 **/
1960 static int
lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba * phba,uint32_t qno)1961 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1962 {
1963 if (phba->sli_rev == LPFC_SLI_REV4)
1964 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1965 lpfc_hbq_defs[qno]->entry_count);
1966 else
1967 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1968 lpfc_hbq_defs[qno]->init_count);
1969 }
1970
1971 /**
1972 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1973 * @phba: Pointer to HBA context object.
1974 * @hbqno: HBQ number.
1975 *
1976 * This function removes the first hbq buffer on an hbq list and returns a
1977 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1978 **/
1979 static struct hbq_dmabuf *
lpfc_sli_hbqbuf_get(struct list_head * rb_list)1980 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1981 {
1982 struct lpfc_dmabuf *d_buf;
1983
1984 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1985 if (!d_buf)
1986 return NULL;
1987 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1988 }
1989
1990 /**
1991 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
1992 * @phba: Pointer to HBA context object.
1993 * @tag: Tag of the hbq buffer.
1994 *
1995 * This function is called with hbalock held. This function searches
1996 * for the hbq buffer associated with the given tag in the hbq buffer
1997 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1998 * it returns NULL.
1999 **/
2000 static struct hbq_dmabuf *
lpfc_sli_hbqbuf_find(struct lpfc_hba * phba,uint32_t tag)2001 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2002 {
2003 struct lpfc_dmabuf *d_buf;
2004 struct hbq_dmabuf *hbq_buf;
2005 uint32_t hbqno;
2006
2007 hbqno = tag >> 16;
2008 if (hbqno >= LPFC_MAX_HBQS)
2009 return NULL;
2010
2011 spin_lock_irq(&phba->hbalock);
2012 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2013 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2014 if (hbq_buf->tag == tag) {
2015 spin_unlock_irq(&phba->hbalock);
2016 return hbq_buf;
2017 }
2018 }
2019 spin_unlock_irq(&phba->hbalock);
2020 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2021 "1803 Bad hbq tag. Data: x%x x%x\n",
2022 tag, phba->hbqs[tag >> 16].buffer_count);
2023 return NULL;
2024 }
2025
2026 /**
2027 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2028 * @phba: Pointer to HBA context object.
2029 * @hbq_buffer: Pointer to HBQ buffer.
2030 *
2031 * This function is called with hbalock. This function gives back
2032 * the hbq buffer to firmware. If the HBQ does not have space to
2033 * post the buffer, it will free the buffer.
2034 **/
2035 void
lpfc_sli_free_hbq(struct lpfc_hba * phba,struct hbq_dmabuf * hbq_buffer)2036 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2037 {
2038 uint32_t hbqno;
2039
2040 if (hbq_buffer) {
2041 hbqno = hbq_buffer->tag >> 16;
2042 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2043 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2044 }
2045 }
2046
2047 /**
2048 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2049 * @mbxCommand: mailbox command code.
2050 *
2051 * This function is called by the mailbox event handler function to verify
2052 * that the completed mailbox command is a legitimate mailbox command. If the
2053 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2054 * and the mailbox event handler will take the HBA offline.
2055 **/
2056 static int
lpfc_sli_chk_mbx_command(uint8_t mbxCommand)2057 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2058 {
2059 uint8_t ret;
2060
2061 switch (mbxCommand) {
2062 case MBX_LOAD_SM:
2063 case MBX_READ_NV:
2064 case MBX_WRITE_NV:
2065 case MBX_WRITE_VPARMS:
2066 case MBX_RUN_BIU_DIAG:
2067 case MBX_INIT_LINK:
2068 case MBX_DOWN_LINK:
2069 case MBX_CONFIG_LINK:
2070 case MBX_CONFIG_RING:
2071 case MBX_RESET_RING:
2072 case MBX_READ_CONFIG:
2073 case MBX_READ_RCONFIG:
2074 case MBX_READ_SPARM:
2075 case MBX_READ_STATUS:
2076 case MBX_READ_RPI:
2077 case MBX_READ_XRI:
2078 case MBX_READ_REV:
2079 case MBX_READ_LNK_STAT:
2080 case MBX_REG_LOGIN:
2081 case MBX_UNREG_LOGIN:
2082 case MBX_CLEAR_LA:
2083 case MBX_DUMP_MEMORY:
2084 case MBX_DUMP_CONTEXT:
2085 case MBX_RUN_DIAGS:
2086 case MBX_RESTART:
2087 case MBX_UPDATE_CFG:
2088 case MBX_DOWN_LOAD:
2089 case MBX_DEL_LD_ENTRY:
2090 case MBX_RUN_PROGRAM:
2091 case MBX_SET_MASK:
2092 case MBX_SET_VARIABLE:
2093 case MBX_UNREG_D_ID:
2094 case MBX_KILL_BOARD:
2095 case MBX_CONFIG_FARP:
2096 case MBX_BEACON:
2097 case MBX_LOAD_AREA:
2098 case MBX_RUN_BIU_DIAG64:
2099 case MBX_CONFIG_PORT:
2100 case MBX_READ_SPARM64:
2101 case MBX_READ_RPI64:
2102 case MBX_REG_LOGIN64:
2103 case MBX_READ_TOPOLOGY:
2104 case MBX_WRITE_WWN:
2105 case MBX_SET_DEBUG:
2106 case MBX_LOAD_EXP_ROM:
2107 case MBX_ASYNCEVT_ENABLE:
2108 case MBX_REG_VPI:
2109 case MBX_UNREG_VPI:
2110 case MBX_HEARTBEAT:
2111 case MBX_PORT_CAPABILITIES:
2112 case MBX_PORT_IOV_CONTROL:
2113 case MBX_SLI4_CONFIG:
2114 case MBX_SLI4_REQ_FTRS:
2115 case MBX_REG_FCFI:
2116 case MBX_UNREG_FCFI:
2117 case MBX_REG_VFI:
2118 case MBX_UNREG_VFI:
2119 case MBX_INIT_VPI:
2120 case MBX_INIT_VFI:
2121 case MBX_RESUME_RPI:
2122 case MBX_READ_EVENT_LOG_STATUS:
2123 case MBX_READ_EVENT_LOG:
2124 case MBX_SECURITY_MGMT:
2125 case MBX_AUTH_PORT:
2126 case MBX_ACCESS_VDATA:
2127 ret = mbxCommand;
2128 break;
2129 default:
2130 ret = MBX_SHUTDOWN;
2131 break;
2132 }
2133 return ret;
2134 }
2135
2136 /**
2137 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2138 * @phba: Pointer to HBA context object.
2139 * @pmboxq: Pointer to mailbox command.
2140 *
2141 * This is completion handler function for mailbox commands issued from
2142 * lpfc_sli_issue_mbox_wait function. This function is called by the
2143 * mailbox event handler function with no lock held. This function
2144 * will wake up thread waiting on the wait queue pointed by context1
2145 * of the mailbox.
2146 **/
2147 void
lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)2148 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2149 {
2150 wait_queue_head_t *pdone_q;
2151 unsigned long drvr_flag;
2152
2153 /*
2154 * If pdone_q is empty, the driver thread gave up waiting and
2155 * continued running.
2156 */
2157 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2158 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2159 pdone_q = (wait_queue_head_t *) pmboxq->context1;
2160 if (pdone_q)
2161 wake_up_interruptible(pdone_q);
2162 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2163 return;
2164 }
2165
2166
2167 /**
2168 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2169 * @phba: Pointer to HBA context object.
2170 * @pmb: Pointer to mailbox object.
2171 *
2172 * This function is the default mailbox completion handler. It
2173 * frees the memory resources associated with the completed mailbox
2174 * command. If the completed command is a REG_LOGIN mailbox command,
2175 * this function will issue a UREG_LOGIN to re-claim the RPI.
2176 **/
2177 void
lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)2178 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2179 {
2180 struct lpfc_vport *vport = pmb->vport;
2181 struct lpfc_dmabuf *mp;
2182 struct lpfc_nodelist *ndlp;
2183 struct Scsi_Host *shost;
2184 uint16_t rpi, vpi;
2185 int rc;
2186
2187 mp = (struct lpfc_dmabuf *) (pmb->context1);
2188
2189 if (mp) {
2190 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2191 kfree(mp);
2192 }
2193
2194 /*
2195 * If a REG_LOGIN succeeded after node is destroyed or node
2196 * is in re-discovery driver need to cleanup the RPI.
2197 */
2198 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2199 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2200 !pmb->u.mb.mbxStatus) {
2201 rpi = pmb->u.mb.un.varWords[0];
2202 vpi = pmb->u.mb.un.varRegLogin.vpi;
2203 lpfc_unreg_login(phba, vpi, rpi, pmb);
2204 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2205 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2206 if (rc != MBX_NOT_FINISHED)
2207 return;
2208 }
2209
2210 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2211 !(phba->pport->load_flag & FC_UNLOADING) &&
2212 !pmb->u.mb.mbxStatus) {
2213 shost = lpfc_shost_from_vport(vport);
2214 spin_lock_irq(shost->host_lock);
2215 vport->vpi_state |= LPFC_VPI_REGISTERED;
2216 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2217 spin_unlock_irq(shost->host_lock);
2218 }
2219
2220 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2221 ndlp = (struct lpfc_nodelist *)pmb->context2;
2222 lpfc_nlp_put(ndlp);
2223 pmb->context2 = NULL;
2224 }
2225
2226 /* Check security permission status on INIT_LINK mailbox command */
2227 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2228 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2229 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2230 "2860 SLI authentication is required "
2231 "for INIT_LINK but has not done yet\n");
2232
2233 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2234 lpfc_sli4_mbox_cmd_free(phba, pmb);
2235 else
2236 mempool_free(pmb, phba->mbox_mem_pool);
2237 }
2238
2239 /**
2240 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2241 * @phba: Pointer to HBA context object.
2242 *
2243 * This function is called with no lock held. This function processes all
2244 * the completed mailbox commands and gives it to upper layers. The interrupt
2245 * service routine processes mailbox completion interrupt and adds completed
2246 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2247 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2248 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2249 * function returns the mailbox commands to the upper layer by calling the
2250 * completion handler function of each mailbox.
2251 **/
2252 int
lpfc_sli_handle_mb_event(struct lpfc_hba * phba)2253 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2254 {
2255 MAILBOX_t *pmbox;
2256 LPFC_MBOXQ_t *pmb;
2257 int rc;
2258 LIST_HEAD(cmplq);
2259
2260 phba->sli.slistat.mbox_event++;
2261
2262 /* Get all completed mailboxe buffers into the cmplq */
2263 spin_lock_irq(&phba->hbalock);
2264 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2265 spin_unlock_irq(&phba->hbalock);
2266
2267 /* Get a Mailbox buffer to setup mailbox commands for callback */
2268 do {
2269 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2270 if (pmb == NULL)
2271 break;
2272
2273 pmbox = &pmb->u.mb;
2274
2275 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2276 if (pmb->vport) {
2277 lpfc_debugfs_disc_trc(pmb->vport,
2278 LPFC_DISC_TRC_MBOX_VPORT,
2279 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2280 (uint32_t)pmbox->mbxCommand,
2281 pmbox->un.varWords[0],
2282 pmbox->un.varWords[1]);
2283 }
2284 else {
2285 lpfc_debugfs_disc_trc(phba->pport,
2286 LPFC_DISC_TRC_MBOX,
2287 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2288 (uint32_t)pmbox->mbxCommand,
2289 pmbox->un.varWords[0],
2290 pmbox->un.varWords[1]);
2291 }
2292 }
2293
2294 /*
2295 * It is a fatal error if unknown mbox command completion.
2296 */
2297 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2298 MBX_SHUTDOWN) {
2299 /* Unknown mailbox command compl */
2300 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2301 "(%d):0323 Unknown Mailbox command "
2302 "x%x (x%x/x%x) Cmpl\n",
2303 pmb->vport ? pmb->vport->vpi : 0,
2304 pmbox->mbxCommand,
2305 lpfc_sli_config_mbox_subsys_get(phba,
2306 pmb),
2307 lpfc_sli_config_mbox_opcode_get(phba,
2308 pmb));
2309 phba->link_state = LPFC_HBA_ERROR;
2310 phba->work_hs = HS_FFER3;
2311 lpfc_handle_eratt(phba);
2312 continue;
2313 }
2314
2315 if (pmbox->mbxStatus) {
2316 phba->sli.slistat.mbox_stat_err++;
2317 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2318 /* Mbox cmd cmpl error - RETRYing */
2319 lpfc_printf_log(phba, KERN_INFO,
2320 LOG_MBOX | LOG_SLI,
2321 "(%d):0305 Mbox cmd cmpl "
2322 "error - RETRYing Data: x%x "
2323 "(x%x/x%x) x%x x%x x%x\n",
2324 pmb->vport ? pmb->vport->vpi : 0,
2325 pmbox->mbxCommand,
2326 lpfc_sli_config_mbox_subsys_get(phba,
2327 pmb),
2328 lpfc_sli_config_mbox_opcode_get(phba,
2329 pmb),
2330 pmbox->mbxStatus,
2331 pmbox->un.varWords[0],
2332 pmb->vport->port_state);
2333 pmbox->mbxStatus = 0;
2334 pmbox->mbxOwner = OWN_HOST;
2335 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2336 if (rc != MBX_NOT_FINISHED)
2337 continue;
2338 }
2339 }
2340
2341 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2342 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2343 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2344 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2345 "x%x x%x x%x\n",
2346 pmb->vport ? pmb->vport->vpi : 0,
2347 pmbox->mbxCommand,
2348 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2349 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2350 pmb->mbox_cmpl,
2351 *((uint32_t *) pmbox),
2352 pmbox->un.varWords[0],
2353 pmbox->un.varWords[1],
2354 pmbox->un.varWords[2],
2355 pmbox->un.varWords[3],
2356 pmbox->un.varWords[4],
2357 pmbox->un.varWords[5],
2358 pmbox->un.varWords[6],
2359 pmbox->un.varWords[7],
2360 pmbox->un.varWords[8],
2361 pmbox->un.varWords[9],
2362 pmbox->un.varWords[10]);
2363
2364 if (pmb->mbox_cmpl)
2365 pmb->mbox_cmpl(phba,pmb);
2366 } while (1);
2367 return 0;
2368 }
2369
2370 /**
2371 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2372 * @phba: Pointer to HBA context object.
2373 * @pring: Pointer to driver SLI ring object.
2374 * @tag: buffer tag.
2375 *
2376 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2377 * is set in the tag the buffer is posted for a particular exchange,
2378 * the function will return the buffer without replacing the buffer.
2379 * If the buffer is for unsolicited ELS or CT traffic, this function
2380 * returns the buffer and also posts another buffer to the firmware.
2381 **/
2382 static struct lpfc_dmabuf *
lpfc_sli_get_buff(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t tag)2383 lpfc_sli_get_buff(struct lpfc_hba *phba,
2384 struct lpfc_sli_ring *pring,
2385 uint32_t tag)
2386 {
2387 struct hbq_dmabuf *hbq_entry;
2388
2389 if (tag & QUE_BUFTAG_BIT)
2390 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2391 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2392 if (!hbq_entry)
2393 return NULL;
2394 return &hbq_entry->dbuf;
2395 }
2396
2397 /**
2398 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2399 * @phba: Pointer to HBA context object.
2400 * @pring: Pointer to driver SLI ring object.
2401 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2402 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2403 * @fch_type: the type for the first frame of the sequence.
2404 *
2405 * This function is called with no lock held. This function uses the r_ctl and
2406 * type of the received sequence to find the correct callback function to call
2407 * to process the sequence.
2408 **/
2409 static int
lpfc_complete_unsol_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * saveq,uint32_t fch_r_ctl,uint32_t fch_type)2410 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2411 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2412 uint32_t fch_type)
2413 {
2414 int i;
2415
2416 /* unSolicited Responses */
2417 if (pring->prt[0].profile) {
2418 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2419 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2420 saveq);
2421 return 1;
2422 }
2423 /* We must search, based on rctl / type
2424 for the right routine */
2425 for (i = 0; i < pring->num_mask; i++) {
2426 if ((pring->prt[i].rctl == fch_r_ctl) &&
2427 (pring->prt[i].type == fch_type)) {
2428 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2429 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2430 (phba, pring, saveq);
2431 return 1;
2432 }
2433 }
2434 return 0;
2435 }
2436
2437 /**
2438 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2439 * @phba: Pointer to HBA context object.
2440 * @pring: Pointer to driver SLI ring object.
2441 * @saveq: Pointer to the unsolicited iocb.
2442 *
2443 * This function is called with no lock held by the ring event handler
2444 * when there is an unsolicited iocb posted to the response ring by the
2445 * firmware. This function gets the buffer associated with the iocbs
2446 * and calls the event handler for the ring. This function handles both
2447 * qring buffers and hbq buffers.
2448 * When the function returns 1 the caller can free the iocb object otherwise
2449 * upper layer functions will free the iocb objects.
2450 **/
2451 static int
lpfc_sli_process_unsol_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * saveq)2452 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2453 struct lpfc_iocbq *saveq)
2454 {
2455 IOCB_t * irsp;
2456 WORD5 * w5p;
2457 uint32_t Rctl, Type;
2458 uint32_t match;
2459 struct lpfc_iocbq *iocbq;
2460 struct lpfc_dmabuf *dmzbuf;
2461
2462 match = 0;
2463 irsp = &(saveq->iocb);
2464
2465 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2466 if (pring->lpfc_sli_rcv_async_status)
2467 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2468 else
2469 lpfc_printf_log(phba,
2470 KERN_WARNING,
2471 LOG_SLI,
2472 "0316 Ring %d handler: unexpected "
2473 "ASYNC_STATUS iocb received evt_code "
2474 "0x%x\n",
2475 pring->ringno,
2476 irsp->un.asyncstat.evt_code);
2477 return 1;
2478 }
2479
2480 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2481 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2482 if (irsp->ulpBdeCount > 0) {
2483 dmzbuf = lpfc_sli_get_buff(phba, pring,
2484 irsp->un.ulpWord[3]);
2485 lpfc_in_buf_free(phba, dmzbuf);
2486 }
2487
2488 if (irsp->ulpBdeCount > 1) {
2489 dmzbuf = lpfc_sli_get_buff(phba, pring,
2490 irsp->unsli3.sli3Words[3]);
2491 lpfc_in_buf_free(phba, dmzbuf);
2492 }
2493
2494 if (irsp->ulpBdeCount > 2) {
2495 dmzbuf = lpfc_sli_get_buff(phba, pring,
2496 irsp->unsli3.sli3Words[7]);
2497 lpfc_in_buf_free(phba, dmzbuf);
2498 }
2499
2500 return 1;
2501 }
2502
2503 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2504 if (irsp->ulpBdeCount != 0) {
2505 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2506 irsp->un.ulpWord[3]);
2507 if (!saveq->context2)
2508 lpfc_printf_log(phba,
2509 KERN_ERR,
2510 LOG_SLI,
2511 "0341 Ring %d Cannot find buffer for "
2512 "an unsolicited iocb. tag 0x%x\n",
2513 pring->ringno,
2514 irsp->un.ulpWord[3]);
2515 }
2516 if (irsp->ulpBdeCount == 2) {
2517 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2518 irsp->unsli3.sli3Words[7]);
2519 if (!saveq->context3)
2520 lpfc_printf_log(phba,
2521 KERN_ERR,
2522 LOG_SLI,
2523 "0342 Ring %d Cannot find buffer for an"
2524 " unsolicited iocb. tag 0x%x\n",
2525 pring->ringno,
2526 irsp->unsli3.sli3Words[7]);
2527 }
2528 list_for_each_entry(iocbq, &saveq->list, list) {
2529 irsp = &(iocbq->iocb);
2530 if (irsp->ulpBdeCount != 0) {
2531 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2532 irsp->un.ulpWord[3]);
2533 if (!iocbq->context2)
2534 lpfc_printf_log(phba,
2535 KERN_ERR,
2536 LOG_SLI,
2537 "0343 Ring %d Cannot find "
2538 "buffer for an unsolicited iocb"
2539 ". tag 0x%x\n", pring->ringno,
2540 irsp->un.ulpWord[3]);
2541 }
2542 if (irsp->ulpBdeCount == 2) {
2543 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2544 irsp->unsli3.sli3Words[7]);
2545 if (!iocbq->context3)
2546 lpfc_printf_log(phba,
2547 KERN_ERR,
2548 LOG_SLI,
2549 "0344 Ring %d Cannot find "
2550 "buffer for an unsolicited "
2551 "iocb. tag 0x%x\n",
2552 pring->ringno,
2553 irsp->unsli3.sli3Words[7]);
2554 }
2555 }
2556 }
2557 if (irsp->ulpBdeCount != 0 &&
2558 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2559 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2560 int found = 0;
2561
2562 /* search continue save q for same XRI */
2563 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2564 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2565 saveq->iocb.unsli3.rcvsli3.ox_id) {
2566 list_add_tail(&saveq->list, &iocbq->list);
2567 found = 1;
2568 break;
2569 }
2570 }
2571 if (!found)
2572 list_add_tail(&saveq->clist,
2573 &pring->iocb_continue_saveq);
2574 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2575 list_del_init(&iocbq->clist);
2576 saveq = iocbq;
2577 irsp = &(saveq->iocb);
2578 } else
2579 return 0;
2580 }
2581 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2582 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2583 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2584 Rctl = FC_RCTL_ELS_REQ;
2585 Type = FC_TYPE_ELS;
2586 } else {
2587 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2588 Rctl = w5p->hcsw.Rctl;
2589 Type = w5p->hcsw.Type;
2590
2591 /* Firmware Workaround */
2592 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2593 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2594 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2595 Rctl = FC_RCTL_ELS_REQ;
2596 Type = FC_TYPE_ELS;
2597 w5p->hcsw.Rctl = Rctl;
2598 w5p->hcsw.Type = Type;
2599 }
2600 }
2601
2602 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2603 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2604 "0313 Ring %d handler: unexpected Rctl x%x "
2605 "Type x%x received\n",
2606 pring->ringno, Rctl, Type);
2607
2608 return 1;
2609 }
2610
2611 /**
2612 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2613 * @phba: Pointer to HBA context object.
2614 * @pring: Pointer to driver SLI ring object.
2615 * @prspiocb: Pointer to response iocb object.
2616 *
2617 * This function looks up the iocb_lookup table to get the command iocb
2618 * corresponding to the given response iocb using the iotag of the
2619 * response iocb. This function is called with the hbalock held.
2620 * This function returns the command iocb object if it finds the command
2621 * iocb else returns NULL.
2622 **/
2623 static struct lpfc_iocbq *
lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * prspiocb)2624 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2625 struct lpfc_sli_ring *pring,
2626 struct lpfc_iocbq *prspiocb)
2627 {
2628 struct lpfc_iocbq *cmd_iocb = NULL;
2629 uint16_t iotag;
2630
2631 iotag = prspiocb->iocb.ulpIoTag;
2632
2633 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2634 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2635 list_del_init(&cmd_iocb->list);
2636 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2637 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2638 }
2639 return cmd_iocb;
2640 }
2641
2642 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2643 "0317 iotag x%x is out off "
2644 "range: max iotag x%x wd0 x%x\n",
2645 iotag, phba->sli.last_iotag,
2646 *(((uint32_t *) &prspiocb->iocb) + 7));
2647 return NULL;
2648 }
2649
2650 /**
2651 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2652 * @phba: Pointer to HBA context object.
2653 * @pring: Pointer to driver SLI ring object.
2654 * @iotag: IOCB tag.
2655 *
2656 * This function looks up the iocb_lookup table to get the command iocb
2657 * corresponding to the given iotag. This function is called with the
2658 * hbalock held.
2659 * This function returns the command iocb object if it finds the command
2660 * iocb else returns NULL.
2661 **/
2662 static struct lpfc_iocbq *
lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint16_t iotag)2663 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2664 struct lpfc_sli_ring *pring, uint16_t iotag)
2665 {
2666 struct lpfc_iocbq *cmd_iocb;
2667
2668 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2669 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2670 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2671 /* remove from txcmpl queue list */
2672 list_del_init(&cmd_iocb->list);
2673 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2674 return cmd_iocb;
2675 }
2676 }
2677 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2678 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2679 iotag, phba->sli.last_iotag);
2680 return NULL;
2681 }
2682
2683 /**
2684 * lpfc_sli_process_sol_iocb - process solicited iocb completion
2685 * @phba: Pointer to HBA context object.
2686 * @pring: Pointer to driver SLI ring object.
2687 * @saveq: Pointer to the response iocb to be processed.
2688 *
2689 * This function is called by the ring event handler for non-fcp
2690 * rings when there is a new response iocb in the response ring.
2691 * The caller is not required to hold any locks. This function
2692 * gets the command iocb associated with the response iocb and
2693 * calls the completion handler for the command iocb. If there
2694 * is no completion handler, the function will free the resources
2695 * associated with command iocb. If the response iocb is for
2696 * an already aborted command iocb, the status of the completion
2697 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2698 * This function always returns 1.
2699 **/
2700 static int
lpfc_sli_process_sol_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * saveq)2701 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2702 struct lpfc_iocbq *saveq)
2703 {
2704 struct lpfc_iocbq *cmdiocbp;
2705 int rc = 1;
2706 unsigned long iflag;
2707
2708 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2709 spin_lock_irqsave(&phba->hbalock, iflag);
2710 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2711 spin_unlock_irqrestore(&phba->hbalock, iflag);
2712
2713 if (cmdiocbp) {
2714 if (cmdiocbp->iocb_cmpl) {
2715 /*
2716 * If an ELS command failed send an event to mgmt
2717 * application.
2718 */
2719 if (saveq->iocb.ulpStatus &&
2720 (pring->ringno == LPFC_ELS_RING) &&
2721 (cmdiocbp->iocb.ulpCommand ==
2722 CMD_ELS_REQUEST64_CR))
2723 lpfc_send_els_failure_event(phba,
2724 cmdiocbp, saveq);
2725
2726 /*
2727 * Post all ELS completions to the worker thread.
2728 * All other are passed to the completion callback.
2729 */
2730 if (pring->ringno == LPFC_ELS_RING) {
2731 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2732 (cmdiocbp->iocb_flag &
2733 LPFC_DRIVER_ABORTED)) {
2734 spin_lock_irqsave(&phba->hbalock,
2735 iflag);
2736 cmdiocbp->iocb_flag &=
2737 ~LPFC_DRIVER_ABORTED;
2738 spin_unlock_irqrestore(&phba->hbalock,
2739 iflag);
2740 saveq->iocb.ulpStatus =
2741 IOSTAT_LOCAL_REJECT;
2742 saveq->iocb.un.ulpWord[4] =
2743 IOERR_SLI_ABORTED;
2744
2745 /* Firmware could still be in progress
2746 * of DMAing payload, so don't free data
2747 * buffer till after a hbeat.
2748 */
2749 spin_lock_irqsave(&phba->hbalock,
2750 iflag);
2751 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2752 spin_unlock_irqrestore(&phba->hbalock,
2753 iflag);
2754 }
2755 if (phba->sli_rev == LPFC_SLI_REV4) {
2756 if (saveq->iocb_flag &
2757 LPFC_EXCHANGE_BUSY) {
2758 /* Set cmdiocb flag for the
2759 * exchange busy so sgl (xri)
2760 * will not be released until
2761 * the abort xri is received
2762 * from hba.
2763 */
2764 spin_lock_irqsave(
2765 &phba->hbalock, iflag);
2766 cmdiocbp->iocb_flag |=
2767 LPFC_EXCHANGE_BUSY;
2768 spin_unlock_irqrestore(
2769 &phba->hbalock, iflag);
2770 }
2771 if (cmdiocbp->iocb_flag &
2772 LPFC_DRIVER_ABORTED) {
2773 /*
2774 * Clear LPFC_DRIVER_ABORTED
2775 * bit in case it was driver
2776 * initiated abort.
2777 */
2778 spin_lock_irqsave(
2779 &phba->hbalock, iflag);
2780 cmdiocbp->iocb_flag &=
2781 ~LPFC_DRIVER_ABORTED;
2782 spin_unlock_irqrestore(
2783 &phba->hbalock, iflag);
2784 cmdiocbp->iocb.ulpStatus =
2785 IOSTAT_LOCAL_REJECT;
2786 cmdiocbp->iocb.un.ulpWord[4] =
2787 IOERR_ABORT_REQUESTED;
2788 /*
2789 * For SLI4, irsiocb contains
2790 * NO_XRI in sli_xritag, it
2791 * shall not affect releasing
2792 * sgl (xri) process.
2793 */
2794 saveq->iocb.ulpStatus =
2795 IOSTAT_LOCAL_REJECT;
2796 saveq->iocb.un.ulpWord[4] =
2797 IOERR_SLI_ABORTED;
2798 spin_lock_irqsave(
2799 &phba->hbalock, iflag);
2800 saveq->iocb_flag |=
2801 LPFC_DELAY_MEM_FREE;
2802 spin_unlock_irqrestore(
2803 &phba->hbalock, iflag);
2804 }
2805 }
2806 }
2807 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2808 } else
2809 lpfc_sli_release_iocbq(phba, cmdiocbp);
2810 } else {
2811 /*
2812 * Unknown initiating command based on the response iotag.
2813 * This could be the case on the ELS ring because of
2814 * lpfc_els_abort().
2815 */
2816 if (pring->ringno != LPFC_ELS_RING) {
2817 /*
2818 * Ring <ringno> handler: unexpected completion IoTag
2819 * <IoTag>
2820 */
2821 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2822 "0322 Ring %d handler: "
2823 "unexpected completion IoTag x%x "
2824 "Data: x%x x%x x%x x%x\n",
2825 pring->ringno,
2826 saveq->iocb.ulpIoTag,
2827 saveq->iocb.ulpStatus,
2828 saveq->iocb.un.ulpWord[4],
2829 saveq->iocb.ulpCommand,
2830 saveq->iocb.ulpContext);
2831 }
2832 }
2833
2834 return rc;
2835 }
2836
2837 /**
2838 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2839 * @phba: Pointer to HBA context object.
2840 * @pring: Pointer to driver SLI ring object.
2841 *
2842 * This function is called from the iocb ring event handlers when
2843 * put pointer is ahead of the get pointer for a ring. This function signal
2844 * an error attention condition to the worker thread and the worker
2845 * thread will transition the HBA to offline state.
2846 **/
2847 static void
lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)2848 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2849 {
2850 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2851 /*
2852 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2853 * rsp ring <portRspMax>
2854 */
2855 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2856 "0312 Ring %d handler: portRspPut %d "
2857 "is bigger than rsp ring %d\n",
2858 pring->ringno, le32_to_cpu(pgp->rspPutInx),
2859 pring->sli.sli3.numRiocb);
2860
2861 phba->link_state = LPFC_HBA_ERROR;
2862
2863 /*
2864 * All error attention handlers are posted to
2865 * worker thread
2866 */
2867 phba->work_ha |= HA_ERATT;
2868 phba->work_hs = HS_FFER3;
2869
2870 lpfc_worker_wake_up(phba);
2871
2872 return;
2873 }
2874
2875 /**
2876 * lpfc_poll_eratt - Error attention polling timer timeout handler
2877 * @ptr: Pointer to address of HBA context object.
2878 *
2879 * This function is invoked by the Error Attention polling timer when the
2880 * timer times out. It will check the SLI Error Attention register for
2881 * possible attention events. If so, it will post an Error Attention event
2882 * and wake up worker thread to process it. Otherwise, it will set up the
2883 * Error Attention polling timer for the next poll.
2884 **/
lpfc_poll_eratt(unsigned long ptr)2885 void lpfc_poll_eratt(unsigned long ptr)
2886 {
2887 struct lpfc_hba *phba;
2888 uint32_t eratt = 0, rem;
2889 uint64_t sli_intr, cnt;
2890
2891 phba = (struct lpfc_hba *)ptr;
2892
2893 /* Here we will also keep track of interrupts per sec of the hba */
2894 sli_intr = phba->sli.slistat.sli_intr;
2895
2896 if (phba->sli.slistat.sli_prev_intr > sli_intr)
2897 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
2898 sli_intr);
2899 else
2900 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
2901
2902 /* 64-bit integer division not supporte on 32-bit x86 - use do_div */
2903 rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
2904 phba->sli.slistat.sli_ips = cnt;
2905
2906 phba->sli.slistat.sli_prev_intr = sli_intr;
2907
2908 /* Check chip HA register for error event */
2909 eratt = lpfc_sli_check_eratt(phba);
2910
2911 if (eratt)
2912 /* Tell the worker thread there is work to do */
2913 lpfc_worker_wake_up(phba);
2914 else
2915 /* Restart the timer for next eratt poll */
2916 mod_timer(&phba->eratt_poll,
2917 jiffies +
2918 msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
2919 return;
2920 }
2921
2922
2923 /**
2924 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
2925 * @phba: Pointer to HBA context object.
2926 * @pring: Pointer to driver SLI ring object.
2927 * @mask: Host attention register mask for this ring.
2928 *
2929 * This function is called from the interrupt context when there is a ring
2930 * event for the fcp ring. The caller does not hold any lock.
2931 * The function processes each response iocb in the response ring until it
2932 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
2933 * LE bit set. The function will call the completion handler of the command iocb
2934 * if the response iocb indicates a completion for a command iocb or it is
2935 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2936 * function if this is an unsolicited iocb.
2937 * This routine presumes LPFC_FCP_RING handling and doesn't bother
2938 * to check it explicitly.
2939 */
2940 int
lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)2941 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2942 struct lpfc_sli_ring *pring, uint32_t mask)
2943 {
2944 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2945 IOCB_t *irsp = NULL;
2946 IOCB_t *entry = NULL;
2947 struct lpfc_iocbq *cmdiocbq = NULL;
2948 struct lpfc_iocbq rspiocbq;
2949 uint32_t status;
2950 uint32_t portRspPut, portRspMax;
2951 int rc = 1;
2952 lpfc_iocb_type type;
2953 unsigned long iflag;
2954 uint32_t rsp_cmpl = 0;
2955
2956 spin_lock_irqsave(&phba->hbalock, iflag);
2957 pring->stats.iocb_event++;
2958
2959 /*
2960 * The next available response entry should never exceed the maximum
2961 * entries. If it does, treat it as an adapter hardware error.
2962 */
2963 portRspMax = pring->sli.sli3.numRiocb;
2964 portRspPut = le32_to_cpu(pgp->rspPutInx);
2965 if (unlikely(portRspPut >= portRspMax)) {
2966 lpfc_sli_rsp_pointers_error(phba, pring);
2967 spin_unlock_irqrestore(&phba->hbalock, iflag);
2968 return 1;
2969 }
2970 if (phba->fcp_ring_in_use) {
2971 spin_unlock_irqrestore(&phba->hbalock, iflag);
2972 return 1;
2973 } else
2974 phba->fcp_ring_in_use = 1;
2975
2976 rmb();
2977 while (pring->sli.sli3.rspidx != portRspPut) {
2978 /*
2979 * Fetch an entry off the ring and copy it into a local data
2980 * structure. The copy involves a byte-swap since the
2981 * network byte order and pci byte orders are different.
2982 */
2983 entry = lpfc_resp_iocb(phba, pring);
2984 phba->last_completion_time = jiffies;
2985
2986 if (++pring->sli.sli3.rspidx >= portRspMax)
2987 pring->sli.sli3.rspidx = 0;
2988
2989 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2990 (uint32_t *) &rspiocbq.iocb,
2991 phba->iocb_rsp_size);
2992 INIT_LIST_HEAD(&(rspiocbq.list));
2993 irsp = &rspiocbq.iocb;
2994
2995 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2996 pring->stats.iocb_rsp++;
2997 rsp_cmpl++;
2998
2999 if (unlikely(irsp->ulpStatus)) {
3000 /*
3001 * If resource errors reported from HBA, reduce
3002 * queuedepths of the SCSI device.
3003 */
3004 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3005 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3006 IOERR_NO_RESOURCES)) {
3007 spin_unlock_irqrestore(&phba->hbalock, iflag);
3008 phba->lpfc_rampdown_queue_depth(phba);
3009 spin_lock_irqsave(&phba->hbalock, iflag);
3010 }
3011
3012 /* Rsp ring <ringno> error: IOCB */
3013 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3014 "0336 Rsp Ring %d error: IOCB Data: "
3015 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3016 pring->ringno,
3017 irsp->un.ulpWord[0],
3018 irsp->un.ulpWord[1],
3019 irsp->un.ulpWord[2],
3020 irsp->un.ulpWord[3],
3021 irsp->un.ulpWord[4],
3022 irsp->un.ulpWord[5],
3023 *(uint32_t *)&irsp->un1,
3024 *((uint32_t *)&irsp->un1 + 1));
3025 }
3026
3027 switch (type) {
3028 case LPFC_ABORT_IOCB:
3029 case LPFC_SOL_IOCB:
3030 /*
3031 * Idle exchange closed via ABTS from port. No iocb
3032 * resources need to be recovered.
3033 */
3034 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3035 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3036 "0333 IOCB cmd 0x%x"
3037 " processed. Skipping"
3038 " completion\n",
3039 irsp->ulpCommand);
3040 break;
3041 }
3042
3043 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3044 &rspiocbq);
3045 if (unlikely(!cmdiocbq))
3046 break;
3047 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3048 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3049 if (cmdiocbq->iocb_cmpl) {
3050 spin_unlock_irqrestore(&phba->hbalock, iflag);
3051 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3052 &rspiocbq);
3053 spin_lock_irqsave(&phba->hbalock, iflag);
3054 }
3055 break;
3056 case LPFC_UNSOL_IOCB:
3057 spin_unlock_irqrestore(&phba->hbalock, iflag);
3058 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3059 spin_lock_irqsave(&phba->hbalock, iflag);
3060 break;
3061 default:
3062 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3063 char adaptermsg[LPFC_MAX_ADPTMSG];
3064 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3065 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3066 MAX_MSG_DATA);
3067 dev_warn(&((phba->pcidev)->dev),
3068 "lpfc%d: %s\n",
3069 phba->brd_no, adaptermsg);
3070 } else {
3071 /* Unknown IOCB command */
3072 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3073 "0334 Unknown IOCB command "
3074 "Data: x%x, x%x x%x x%x x%x\n",
3075 type, irsp->ulpCommand,
3076 irsp->ulpStatus,
3077 irsp->ulpIoTag,
3078 irsp->ulpContext);
3079 }
3080 break;
3081 }
3082
3083 /*
3084 * The response IOCB has been processed. Update the ring
3085 * pointer in SLIM. If the port response put pointer has not
3086 * been updated, sync the pgp->rspPutInx and fetch the new port
3087 * response put pointer.
3088 */
3089 writel(pring->sli.sli3.rspidx,
3090 &phba->host_gp[pring->ringno].rspGetInx);
3091
3092 if (pring->sli.sli3.rspidx == portRspPut)
3093 portRspPut = le32_to_cpu(pgp->rspPutInx);
3094 }
3095
3096 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3097 pring->stats.iocb_rsp_full++;
3098 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3099 writel(status, phba->CAregaddr);
3100 readl(phba->CAregaddr);
3101 }
3102 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3103 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3104 pring->stats.iocb_cmd_empty++;
3105
3106 /* Force update of the local copy of cmdGetInx */
3107 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3108 lpfc_sli_resume_iocb(phba, pring);
3109
3110 if ((pring->lpfc_sli_cmd_available))
3111 (pring->lpfc_sli_cmd_available) (phba, pring);
3112
3113 }
3114
3115 phba->fcp_ring_in_use = 0;
3116 spin_unlock_irqrestore(&phba->hbalock, iflag);
3117 return rc;
3118 }
3119
3120 /**
3121 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3122 * @phba: Pointer to HBA context object.
3123 * @pring: Pointer to driver SLI ring object.
3124 * @rspiocbp: Pointer to driver response IOCB object.
3125 *
3126 * This function is called from the worker thread when there is a slow-path
3127 * response IOCB to process. This function chains all the response iocbs until
3128 * seeing the iocb with the LE bit set. The function will call
3129 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3130 * completion of a command iocb. The function will call the
3131 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3132 * The function frees the resources or calls the completion handler if this
3133 * iocb is an abort completion. The function returns NULL when the response
3134 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3135 * this function shall chain the iocb on to the iocb_continueq and return the
3136 * response iocb passed in.
3137 **/
3138 static struct lpfc_iocbq *
lpfc_sli_sp_handle_rspiocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * rspiocbp)3139 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3140 struct lpfc_iocbq *rspiocbp)
3141 {
3142 struct lpfc_iocbq *saveq;
3143 struct lpfc_iocbq *cmdiocbp;
3144 struct lpfc_iocbq *next_iocb;
3145 IOCB_t *irsp = NULL;
3146 uint32_t free_saveq;
3147 uint8_t iocb_cmd_type;
3148 lpfc_iocb_type type;
3149 unsigned long iflag;
3150 int rc;
3151
3152 spin_lock_irqsave(&phba->hbalock, iflag);
3153 /* First add the response iocb to the countinueq list */
3154 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3155 pring->iocb_continueq_cnt++;
3156
3157 /* Now, determine whether the list is completed for processing */
3158 irsp = &rspiocbp->iocb;
3159 if (irsp->ulpLe) {
3160 /*
3161 * By default, the driver expects to free all resources
3162 * associated with this iocb completion.
3163 */
3164 free_saveq = 1;
3165 saveq = list_get_first(&pring->iocb_continueq,
3166 struct lpfc_iocbq, list);
3167 irsp = &(saveq->iocb);
3168 list_del_init(&pring->iocb_continueq);
3169 pring->iocb_continueq_cnt = 0;
3170
3171 pring->stats.iocb_rsp++;
3172
3173 /*
3174 * If resource errors reported from HBA, reduce
3175 * queuedepths of the SCSI device.
3176 */
3177 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3178 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3179 IOERR_NO_RESOURCES)) {
3180 spin_unlock_irqrestore(&phba->hbalock, iflag);
3181 phba->lpfc_rampdown_queue_depth(phba);
3182 spin_lock_irqsave(&phba->hbalock, iflag);
3183 }
3184
3185 if (irsp->ulpStatus) {
3186 /* Rsp ring <ringno> error: IOCB */
3187 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3188 "0328 Rsp Ring %d error: "
3189 "IOCB Data: "
3190 "x%x x%x x%x x%x "
3191 "x%x x%x x%x x%x "
3192 "x%x x%x x%x x%x "
3193 "x%x x%x x%x x%x\n",
3194 pring->ringno,
3195 irsp->un.ulpWord[0],
3196 irsp->un.ulpWord[1],
3197 irsp->un.ulpWord[2],
3198 irsp->un.ulpWord[3],
3199 irsp->un.ulpWord[4],
3200 irsp->un.ulpWord[5],
3201 *(((uint32_t *) irsp) + 6),
3202 *(((uint32_t *) irsp) + 7),
3203 *(((uint32_t *) irsp) + 8),
3204 *(((uint32_t *) irsp) + 9),
3205 *(((uint32_t *) irsp) + 10),
3206 *(((uint32_t *) irsp) + 11),
3207 *(((uint32_t *) irsp) + 12),
3208 *(((uint32_t *) irsp) + 13),
3209 *(((uint32_t *) irsp) + 14),
3210 *(((uint32_t *) irsp) + 15));
3211 }
3212
3213 /*
3214 * Fetch the IOCB command type and call the correct completion
3215 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3216 * get freed back to the lpfc_iocb_list by the discovery
3217 * kernel thread.
3218 */
3219 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3220 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3221 switch (type) {
3222 case LPFC_SOL_IOCB:
3223 spin_unlock_irqrestore(&phba->hbalock, iflag);
3224 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3225 spin_lock_irqsave(&phba->hbalock, iflag);
3226 break;
3227
3228 case LPFC_UNSOL_IOCB:
3229 spin_unlock_irqrestore(&phba->hbalock, iflag);
3230 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3231 spin_lock_irqsave(&phba->hbalock, iflag);
3232 if (!rc)
3233 free_saveq = 0;
3234 break;
3235
3236 case LPFC_ABORT_IOCB:
3237 cmdiocbp = NULL;
3238 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3239 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3240 saveq);
3241 if (cmdiocbp) {
3242 /* Call the specified completion routine */
3243 if (cmdiocbp->iocb_cmpl) {
3244 spin_unlock_irqrestore(&phba->hbalock,
3245 iflag);
3246 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3247 saveq);
3248 spin_lock_irqsave(&phba->hbalock,
3249 iflag);
3250 } else
3251 __lpfc_sli_release_iocbq(phba,
3252 cmdiocbp);
3253 }
3254 break;
3255
3256 case LPFC_UNKNOWN_IOCB:
3257 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3258 char adaptermsg[LPFC_MAX_ADPTMSG];
3259 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3260 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3261 MAX_MSG_DATA);
3262 dev_warn(&((phba->pcidev)->dev),
3263 "lpfc%d: %s\n",
3264 phba->brd_no, adaptermsg);
3265 } else {
3266 /* Unknown IOCB command */
3267 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3268 "0335 Unknown IOCB "
3269 "command Data: x%x "
3270 "x%x x%x x%x\n",
3271 irsp->ulpCommand,
3272 irsp->ulpStatus,
3273 irsp->ulpIoTag,
3274 irsp->ulpContext);
3275 }
3276 break;
3277 }
3278
3279 if (free_saveq) {
3280 list_for_each_entry_safe(rspiocbp, next_iocb,
3281 &saveq->list, list) {
3282 list_del(&rspiocbp->list);
3283 __lpfc_sli_release_iocbq(phba, rspiocbp);
3284 }
3285 __lpfc_sli_release_iocbq(phba, saveq);
3286 }
3287 rspiocbp = NULL;
3288 }
3289 spin_unlock_irqrestore(&phba->hbalock, iflag);
3290 return rspiocbp;
3291 }
3292
3293 /**
3294 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3295 * @phba: Pointer to HBA context object.
3296 * @pring: Pointer to driver SLI ring object.
3297 * @mask: Host attention register mask for this ring.
3298 *
3299 * This routine wraps the actual slow_ring event process routine from the
3300 * API jump table function pointer from the lpfc_hba struct.
3301 **/
3302 void
lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)3303 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3304 struct lpfc_sli_ring *pring, uint32_t mask)
3305 {
3306 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3307 }
3308
3309 /**
3310 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3311 * @phba: Pointer to HBA context object.
3312 * @pring: Pointer to driver SLI ring object.
3313 * @mask: Host attention register mask for this ring.
3314 *
3315 * This function is called from the worker thread when there is a ring event
3316 * for non-fcp rings. The caller does not hold any lock. The function will
3317 * remove each response iocb in the response ring and calls the handle
3318 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3319 **/
3320 static void
lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)3321 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3322 struct lpfc_sli_ring *pring, uint32_t mask)
3323 {
3324 struct lpfc_pgp *pgp;
3325 IOCB_t *entry;
3326 IOCB_t *irsp = NULL;
3327 struct lpfc_iocbq *rspiocbp = NULL;
3328 uint32_t portRspPut, portRspMax;
3329 unsigned long iflag;
3330 uint32_t status;
3331
3332 pgp = &phba->port_gp[pring->ringno];
3333 spin_lock_irqsave(&phba->hbalock, iflag);
3334 pring->stats.iocb_event++;
3335
3336 /*
3337 * The next available response entry should never exceed the maximum
3338 * entries. If it does, treat it as an adapter hardware error.
3339 */
3340 portRspMax = pring->sli.sli3.numRiocb;
3341 portRspPut = le32_to_cpu(pgp->rspPutInx);
3342 if (portRspPut >= portRspMax) {
3343 /*
3344 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3345 * rsp ring <portRspMax>
3346 */
3347 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3348 "0303 Ring %d handler: portRspPut %d "
3349 "is bigger than rsp ring %d\n",
3350 pring->ringno, portRspPut, portRspMax);
3351
3352 phba->link_state = LPFC_HBA_ERROR;
3353 spin_unlock_irqrestore(&phba->hbalock, iflag);
3354
3355 phba->work_hs = HS_FFER3;
3356 lpfc_handle_eratt(phba);
3357
3358 return;
3359 }
3360
3361 rmb();
3362 while (pring->sli.sli3.rspidx != portRspPut) {
3363 /*
3364 * Build a completion list and call the appropriate handler.
3365 * The process is to get the next available response iocb, get
3366 * a free iocb from the list, copy the response data into the
3367 * free iocb, insert to the continuation list, and update the
3368 * next response index to slim. This process makes response
3369 * iocb's in the ring available to DMA as fast as possible but
3370 * pays a penalty for a copy operation. Since the iocb is
3371 * only 32 bytes, this penalty is considered small relative to
3372 * the PCI reads for register values and a slim write. When
3373 * the ulpLe field is set, the entire Command has been
3374 * received.
3375 */
3376 entry = lpfc_resp_iocb(phba, pring);
3377
3378 phba->last_completion_time = jiffies;
3379 rspiocbp = __lpfc_sli_get_iocbq(phba);
3380 if (rspiocbp == NULL) {
3381 printk(KERN_ERR "%s: out of buffers! Failing "
3382 "completion.\n", __func__);
3383 break;
3384 }
3385
3386 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3387 phba->iocb_rsp_size);
3388 irsp = &rspiocbp->iocb;
3389
3390 if (++pring->sli.sli3.rspidx >= portRspMax)
3391 pring->sli.sli3.rspidx = 0;
3392
3393 if (pring->ringno == LPFC_ELS_RING) {
3394 lpfc_debugfs_slow_ring_trc(phba,
3395 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3396 *(((uint32_t *) irsp) + 4),
3397 *(((uint32_t *) irsp) + 6),
3398 *(((uint32_t *) irsp) + 7));
3399 }
3400
3401 writel(pring->sli.sli3.rspidx,
3402 &phba->host_gp[pring->ringno].rspGetInx);
3403
3404 spin_unlock_irqrestore(&phba->hbalock, iflag);
3405 /* Handle the response IOCB */
3406 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3407 spin_lock_irqsave(&phba->hbalock, iflag);
3408
3409 /*
3410 * If the port response put pointer has not been updated, sync
3411 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3412 * response put pointer.
3413 */
3414 if (pring->sli.sli3.rspidx == portRspPut) {
3415 portRspPut = le32_to_cpu(pgp->rspPutInx);
3416 }
3417 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3418
3419 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3420 /* At least one response entry has been freed */
3421 pring->stats.iocb_rsp_full++;
3422 /* SET RxRE_RSP in Chip Att register */
3423 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3424 writel(status, phba->CAregaddr);
3425 readl(phba->CAregaddr); /* flush */
3426 }
3427 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3428 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3429 pring->stats.iocb_cmd_empty++;
3430
3431 /* Force update of the local copy of cmdGetInx */
3432 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3433 lpfc_sli_resume_iocb(phba, pring);
3434
3435 if ((pring->lpfc_sli_cmd_available))
3436 (pring->lpfc_sli_cmd_available) (phba, pring);
3437
3438 }
3439
3440 spin_unlock_irqrestore(&phba->hbalock, iflag);
3441 return;
3442 }
3443
3444 /**
3445 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3446 * @phba: Pointer to HBA context object.
3447 * @pring: Pointer to driver SLI ring object.
3448 * @mask: Host attention register mask for this ring.
3449 *
3450 * This function is called from the worker thread when there is a pending
3451 * ELS response iocb on the driver internal slow-path response iocb worker
3452 * queue. The caller does not hold any lock. The function will remove each
3453 * response iocb from the response worker queue and calls the handle
3454 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3455 **/
3456 static void
lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)3457 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3458 struct lpfc_sli_ring *pring, uint32_t mask)
3459 {
3460 struct lpfc_iocbq *irspiocbq;
3461 struct hbq_dmabuf *dmabuf;
3462 struct lpfc_cq_event *cq_event;
3463 unsigned long iflag;
3464
3465 spin_lock_irqsave(&phba->hbalock, iflag);
3466 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3467 spin_unlock_irqrestore(&phba->hbalock, iflag);
3468 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3469 /* Get the response iocb from the head of work queue */
3470 spin_lock_irqsave(&phba->hbalock, iflag);
3471 list_remove_head(&phba->sli4_hba.sp_queue_event,
3472 cq_event, struct lpfc_cq_event, list);
3473 spin_unlock_irqrestore(&phba->hbalock, iflag);
3474
3475 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3476 case CQE_CODE_COMPL_WQE:
3477 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3478 cq_event);
3479 /* Translate ELS WCQE to response IOCBQ */
3480 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3481 irspiocbq);
3482 if (irspiocbq)
3483 lpfc_sli_sp_handle_rspiocb(phba, pring,
3484 irspiocbq);
3485 break;
3486 case CQE_CODE_RECEIVE:
3487 case CQE_CODE_RECEIVE_V1:
3488 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3489 cq_event);
3490 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3491 break;
3492 default:
3493 break;
3494 }
3495 }
3496 }
3497
3498 /**
3499 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3500 * @phba: Pointer to HBA context object.
3501 * @pring: Pointer to driver SLI ring object.
3502 *
3503 * This function aborts all iocbs in the given ring and frees all the iocb
3504 * objects in txq. This function issues an abort iocb for all the iocb commands
3505 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3506 * the return of this function. The caller is not required to hold any locks.
3507 **/
3508 void
lpfc_sli_abort_iocb_ring(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)3509 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3510 {
3511 LIST_HEAD(completions);
3512 struct lpfc_iocbq *iocb, *next_iocb;
3513
3514 if (pring->ringno == LPFC_ELS_RING) {
3515 lpfc_fabric_abort_hba(phba);
3516 }
3517
3518 /* Error everything on txq and txcmplq
3519 * First do the txq.
3520 */
3521 spin_lock_irq(&phba->hbalock);
3522 list_splice_init(&pring->txq, &completions);
3523
3524 /* Next issue ABTS for everything on the txcmplq */
3525 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3526 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3527
3528 spin_unlock_irq(&phba->hbalock);
3529
3530 /* Cancel all the IOCBs from the completions list */
3531 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3532 IOERR_SLI_ABORTED);
3533 }
3534
3535 /**
3536 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3537 * @phba: Pointer to HBA context object.
3538 *
3539 * This function flushes all iocbs in the fcp ring and frees all the iocb
3540 * objects in txq and txcmplq. This function will not issue abort iocbs
3541 * for all the iocb commands in txcmplq, they will just be returned with
3542 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3543 * slot has been permanently disabled.
3544 **/
3545 void
lpfc_sli_flush_fcp_rings(struct lpfc_hba * phba)3546 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3547 {
3548 LIST_HEAD(txq);
3549 LIST_HEAD(txcmplq);
3550 struct lpfc_sli *psli = &phba->sli;
3551 struct lpfc_sli_ring *pring;
3552
3553 /* Currently, only one fcp ring */
3554 pring = &psli->ring[psli->fcp_ring];
3555
3556 spin_lock_irq(&phba->hbalock);
3557 /* Retrieve everything on txq */
3558 list_splice_init(&pring->txq, &txq);
3559
3560 /* Retrieve everything on the txcmplq */
3561 list_splice_init(&pring->txcmplq, &txcmplq);
3562
3563 /* Indicate the I/O queues are flushed */
3564 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3565 spin_unlock_irq(&phba->hbalock);
3566
3567 /* Flush the txq */
3568 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3569 IOERR_SLI_DOWN);
3570
3571 /* Flush the txcmpq */
3572 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3573 IOERR_SLI_DOWN);
3574 }
3575
3576 /**
3577 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
3578 * @phba: Pointer to HBA context object.
3579 * @mask: Bit mask to be checked.
3580 *
3581 * This function reads the host status register and compares
3582 * with the provided bit mask to check if HBA completed
3583 * the restart. This function will wait in a loop for the
3584 * HBA to complete restart. If the HBA does not restart within
3585 * 15 iterations, the function will reset the HBA again. The
3586 * function returns 1 when HBA fail to restart otherwise returns
3587 * zero.
3588 **/
3589 static int
lpfc_sli_brdready_s3(struct lpfc_hba * phba,uint32_t mask)3590 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3591 {
3592 uint32_t status;
3593 int i = 0;
3594 int retval = 0;
3595
3596 /* Read the HBA Host Status Register */
3597 if (lpfc_readl(phba->HSregaddr, &status))
3598 return 1;
3599
3600 /*
3601 * Check status register every 100ms for 5 retries, then every
3602 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3603 * every 2.5 sec for 4.
3604 * Break our of the loop if errors occurred during init.
3605 */
3606 while (((status & mask) != mask) &&
3607 !(status & HS_FFERM) &&
3608 i++ < 20) {
3609
3610 if (i <= 5)
3611 msleep(10);
3612 else if (i <= 10)
3613 msleep(500);
3614 else
3615 msleep(2500);
3616
3617 if (i == 15) {
3618 /* Do post */
3619 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3620 lpfc_sli_brdrestart(phba);
3621 }
3622 /* Read the HBA Host Status Register */
3623 if (lpfc_readl(phba->HSregaddr, &status)) {
3624 retval = 1;
3625 break;
3626 }
3627 }
3628
3629 /* Check to see if any errors occurred during init */
3630 if ((status & HS_FFERM) || (i >= 20)) {
3631 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3632 "2751 Adapter failed to restart, "
3633 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3634 status,
3635 readl(phba->MBslimaddr + 0xa8),
3636 readl(phba->MBslimaddr + 0xac));
3637 phba->link_state = LPFC_HBA_ERROR;
3638 retval = 1;
3639 }
3640
3641 return retval;
3642 }
3643
3644 /**
3645 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3646 * @phba: Pointer to HBA context object.
3647 * @mask: Bit mask to be checked.
3648 *
3649 * This function checks the host status register to check if HBA is
3650 * ready. This function will wait in a loop for the HBA to be ready
3651 * If the HBA is not ready , the function will will reset the HBA PCI
3652 * function again. The function returns 1 when HBA fail to be ready
3653 * otherwise returns zero.
3654 **/
3655 static int
lpfc_sli_brdready_s4(struct lpfc_hba * phba,uint32_t mask)3656 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3657 {
3658 uint32_t status;
3659 int retval = 0;
3660
3661 /* Read the HBA Host Status Register */
3662 status = lpfc_sli4_post_status_check(phba);
3663
3664 if (status) {
3665 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3666 lpfc_sli_brdrestart(phba);
3667 status = lpfc_sli4_post_status_check(phba);
3668 }
3669
3670 /* Check to see if any errors occurred during init */
3671 if (status) {
3672 phba->link_state = LPFC_HBA_ERROR;
3673 retval = 1;
3674 } else
3675 phba->sli4_hba.intr_enable = 0;
3676
3677 return retval;
3678 }
3679
3680 /**
3681 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3682 * @phba: Pointer to HBA context object.
3683 * @mask: Bit mask to be checked.
3684 *
3685 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3686 * from the API jump table function pointer from the lpfc_hba struct.
3687 **/
3688 int
lpfc_sli_brdready(struct lpfc_hba * phba,uint32_t mask)3689 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3690 {
3691 return phba->lpfc_sli_brdready(phba, mask);
3692 }
3693
3694 #define BARRIER_TEST_PATTERN (0xdeadbeef)
3695
3696 /**
3697 * lpfc_reset_barrier - Make HBA ready for HBA reset
3698 * @phba: Pointer to HBA context object.
3699 *
3700 * This function is called before resetting an HBA. This function is called
3701 * with hbalock held and requests HBA to quiesce DMAs before a reset.
3702 **/
lpfc_reset_barrier(struct lpfc_hba * phba)3703 void lpfc_reset_barrier(struct lpfc_hba *phba)
3704 {
3705 uint32_t __iomem *resp_buf;
3706 uint32_t __iomem *mbox_buf;
3707 volatile uint32_t mbox;
3708 uint32_t hc_copy, ha_copy, resp_data;
3709 int i;
3710 uint8_t hdrtype;
3711
3712 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3713 if (hdrtype != 0x80 ||
3714 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3715 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3716 return;
3717
3718 /*
3719 * Tell the other part of the chip to suspend temporarily all
3720 * its DMA activity.
3721 */
3722 resp_buf = phba->MBslimaddr;
3723
3724 /* Disable the error attention */
3725 if (lpfc_readl(phba->HCregaddr, &hc_copy))
3726 return;
3727 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3728 readl(phba->HCregaddr); /* flush */
3729 phba->link_flag |= LS_IGNORE_ERATT;
3730
3731 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3732 return;
3733 if (ha_copy & HA_ERATT) {
3734 /* Clear Chip error bit */
3735 writel(HA_ERATT, phba->HAregaddr);
3736 phba->pport->stopped = 1;
3737 }
3738
3739 mbox = 0;
3740 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3741 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3742
3743 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
3744 mbox_buf = phba->MBslimaddr;
3745 writel(mbox, mbox_buf);
3746
3747 for (i = 0; i < 50; i++) {
3748 if (lpfc_readl((resp_buf + 1), &resp_data))
3749 return;
3750 if (resp_data != ~(BARRIER_TEST_PATTERN))
3751 mdelay(1);
3752 else
3753 break;
3754 }
3755 resp_data = 0;
3756 if (lpfc_readl((resp_buf + 1), &resp_data))
3757 return;
3758 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
3759 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
3760 phba->pport->stopped)
3761 goto restore_hc;
3762 else
3763 goto clear_errat;
3764 }
3765
3766 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
3767 resp_data = 0;
3768 for (i = 0; i < 500; i++) {
3769 if (lpfc_readl(resp_buf, &resp_data))
3770 return;
3771 if (resp_data != mbox)
3772 mdelay(1);
3773 else
3774 break;
3775 }
3776
3777 clear_errat:
3778
3779 while (++i < 500) {
3780 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3781 return;
3782 if (!(ha_copy & HA_ERATT))
3783 mdelay(1);
3784 else
3785 break;
3786 }
3787
3788 if (readl(phba->HAregaddr) & HA_ERATT) {
3789 writel(HA_ERATT, phba->HAregaddr);
3790 phba->pport->stopped = 1;
3791 }
3792
3793 restore_hc:
3794 phba->link_flag &= ~LS_IGNORE_ERATT;
3795 writel(hc_copy, phba->HCregaddr);
3796 readl(phba->HCregaddr); /* flush */
3797 }
3798
3799 /**
3800 * lpfc_sli_brdkill - Issue a kill_board mailbox command
3801 * @phba: Pointer to HBA context object.
3802 *
3803 * This function issues a kill_board mailbox command and waits for
3804 * the error attention interrupt. This function is called for stopping
3805 * the firmware processing. The caller is not required to hold any
3806 * locks. This function calls lpfc_hba_down_post function to free
3807 * any pending commands after the kill. The function will return 1 when it
3808 * fails to kill the board else will return 0.
3809 **/
3810 int
lpfc_sli_brdkill(struct lpfc_hba * phba)3811 lpfc_sli_brdkill(struct lpfc_hba *phba)
3812 {
3813 struct lpfc_sli *psli;
3814 LPFC_MBOXQ_t *pmb;
3815 uint32_t status;
3816 uint32_t ha_copy;
3817 int retval;
3818 int i = 0;
3819
3820 psli = &phba->sli;
3821
3822 /* Kill HBA */
3823 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3824 "0329 Kill HBA Data: x%x x%x\n",
3825 phba->pport->port_state, psli->sli_flag);
3826
3827 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3828 if (!pmb)
3829 return 1;
3830
3831 /* Disable the error attention */
3832 spin_lock_irq(&phba->hbalock);
3833 if (lpfc_readl(phba->HCregaddr, &status)) {
3834 spin_unlock_irq(&phba->hbalock);
3835 mempool_free(pmb, phba->mbox_mem_pool);
3836 return 1;
3837 }
3838 status &= ~HC_ERINT_ENA;
3839 writel(status, phba->HCregaddr);
3840 readl(phba->HCregaddr); /* flush */
3841 phba->link_flag |= LS_IGNORE_ERATT;
3842 spin_unlock_irq(&phba->hbalock);
3843
3844 lpfc_kill_board(phba, pmb);
3845 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3846 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3847
3848 if (retval != MBX_SUCCESS) {
3849 if (retval != MBX_BUSY)
3850 mempool_free(pmb, phba->mbox_mem_pool);
3851 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3852 "2752 KILL_BOARD command failed retval %d\n",
3853 retval);
3854 spin_lock_irq(&phba->hbalock);
3855 phba->link_flag &= ~LS_IGNORE_ERATT;
3856 spin_unlock_irq(&phba->hbalock);
3857 return 1;
3858 }
3859
3860 spin_lock_irq(&phba->hbalock);
3861 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3862 spin_unlock_irq(&phba->hbalock);
3863
3864 mempool_free(pmb, phba->mbox_mem_pool);
3865
3866 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3867 * attention every 100ms for 3 seconds. If we don't get ERATT after
3868 * 3 seconds we still set HBA_ERROR state because the status of the
3869 * board is now undefined.
3870 */
3871 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3872 return 1;
3873 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3874 mdelay(100);
3875 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3876 return 1;
3877 }
3878
3879 del_timer_sync(&psli->mbox_tmo);
3880 if (ha_copy & HA_ERATT) {
3881 writel(HA_ERATT, phba->HAregaddr);
3882 phba->pport->stopped = 1;
3883 }
3884 spin_lock_irq(&phba->hbalock);
3885 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3886 psli->mbox_active = NULL;
3887 phba->link_flag &= ~LS_IGNORE_ERATT;
3888 spin_unlock_irq(&phba->hbalock);
3889
3890 lpfc_hba_down_post(phba);
3891 phba->link_state = LPFC_HBA_ERROR;
3892
3893 return ha_copy & HA_ERATT ? 0 : 1;
3894 }
3895
3896 /**
3897 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
3898 * @phba: Pointer to HBA context object.
3899 *
3900 * This function resets the HBA by writing HC_INITFF to the control
3901 * register. After the HBA resets, this function resets all the iocb ring
3902 * indices. This function disables PCI layer parity checking during
3903 * the reset.
3904 * This function returns 0 always.
3905 * The caller is not required to hold any locks.
3906 **/
3907 int
lpfc_sli_brdreset(struct lpfc_hba * phba)3908 lpfc_sli_brdreset(struct lpfc_hba *phba)
3909 {
3910 struct lpfc_sli *psli;
3911 struct lpfc_sli_ring *pring;
3912 uint16_t cfg_value;
3913 int i;
3914
3915 psli = &phba->sli;
3916
3917 /* Reset HBA */
3918 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3919 "0325 Reset HBA Data: x%x x%x\n",
3920 phba->pport->port_state, psli->sli_flag);
3921
3922 /* perform board reset */
3923 phba->fc_eventTag = 0;
3924 phba->link_events = 0;
3925 phba->pport->fc_myDID = 0;
3926 phba->pport->fc_prevDID = 0;
3927
3928 /* Turn off parity checking and serr during the physical reset */
3929 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3930 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3931 (cfg_value &
3932 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3933
3934 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3935
3936 /* Now toggle INITFF bit in the Host Control Register */
3937 writel(HC_INITFF, phba->HCregaddr);
3938 mdelay(1);
3939 readl(phba->HCregaddr); /* flush */
3940 writel(0, phba->HCregaddr);
3941 readl(phba->HCregaddr); /* flush */
3942
3943 /* Restore PCI cmd register */
3944 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3945
3946 /* Initialize relevant SLI info */
3947 for (i = 0; i < psli->num_rings; i++) {
3948 pring = &psli->ring[i];
3949 pring->flag = 0;
3950 pring->sli.sli3.rspidx = 0;
3951 pring->sli.sli3.next_cmdidx = 0;
3952 pring->sli.sli3.local_getidx = 0;
3953 pring->sli.sli3.cmdidx = 0;
3954 pring->missbufcnt = 0;
3955 }
3956
3957 phba->link_state = LPFC_WARM_START;
3958 return 0;
3959 }
3960
3961 /**
3962 * lpfc_sli4_brdreset - Reset a sli-4 HBA
3963 * @phba: Pointer to HBA context object.
3964 *
3965 * This function resets a SLI4 HBA. This function disables PCI layer parity
3966 * checking during resets the device. The caller is not required to hold
3967 * any locks.
3968 *
3969 * This function returns 0 always.
3970 **/
3971 int
lpfc_sli4_brdreset(struct lpfc_hba * phba)3972 lpfc_sli4_brdreset(struct lpfc_hba *phba)
3973 {
3974 struct lpfc_sli *psli = &phba->sli;
3975 uint16_t cfg_value;
3976 int rc;
3977
3978 /* Reset HBA */
3979 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3980 "0295 Reset HBA Data: x%x x%x\n",
3981 phba->pport->port_state, psli->sli_flag);
3982
3983 /* perform board reset */
3984 phba->fc_eventTag = 0;
3985 phba->link_events = 0;
3986 phba->pport->fc_myDID = 0;
3987 phba->pport->fc_prevDID = 0;
3988
3989 spin_lock_irq(&phba->hbalock);
3990 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3991 phba->fcf.fcf_flag = 0;
3992 spin_unlock_irq(&phba->hbalock);
3993
3994 /* Now physically reset the device */
3995 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3996 "0389 Performing PCI function reset!\n");
3997
3998 /* Turn off parity checking and serr during the physical reset */
3999 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4000 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4001 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4002
4003 /* Perform FCoE PCI function reset before freeing queue memory */
4004 rc = lpfc_pci_function_reset(phba);
4005 lpfc_sli4_queue_destroy(phba);
4006
4007 /* Restore PCI cmd register */
4008 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4009
4010 return rc;
4011 }
4012
4013 /**
4014 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4015 * @phba: Pointer to HBA context object.
4016 *
4017 * This function is called in the SLI initialization code path to
4018 * restart the HBA. The caller is not required to hold any lock.
4019 * This function writes MBX_RESTART mailbox command to the SLIM and
4020 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4021 * function to free any pending commands. The function enables
4022 * POST only during the first initialization. The function returns zero.
4023 * The function does not guarantee completion of MBX_RESTART mailbox
4024 * command before the return of this function.
4025 **/
4026 static int
lpfc_sli_brdrestart_s3(struct lpfc_hba * phba)4027 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4028 {
4029 MAILBOX_t *mb;
4030 struct lpfc_sli *psli;
4031 volatile uint32_t word0;
4032 void __iomem *to_slim;
4033 uint32_t hba_aer_enabled;
4034
4035 spin_lock_irq(&phba->hbalock);
4036
4037 /* Take PCIe device Advanced Error Reporting (AER) state */
4038 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4039
4040 psli = &phba->sli;
4041
4042 /* Restart HBA */
4043 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4044 "0337 Restart HBA Data: x%x x%x\n",
4045 phba->pport->port_state, psli->sli_flag);
4046
4047 word0 = 0;
4048 mb = (MAILBOX_t *) &word0;
4049 mb->mbxCommand = MBX_RESTART;
4050 mb->mbxHc = 1;
4051
4052 lpfc_reset_barrier(phba);
4053
4054 to_slim = phba->MBslimaddr;
4055 writel(*(uint32_t *) mb, to_slim);
4056 readl(to_slim); /* flush */
4057
4058 /* Only skip post after fc_ffinit is completed */
4059 if (phba->pport->port_state)
4060 word0 = 1; /* This is really setting up word1 */
4061 else
4062 word0 = 0; /* This is really setting up word1 */
4063 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4064 writel(*(uint32_t *) mb, to_slim);
4065 readl(to_slim); /* flush */
4066
4067 lpfc_sli_brdreset(phba);
4068 phba->pport->stopped = 0;
4069 phba->link_state = LPFC_INIT_START;
4070 phba->hba_flag = 0;
4071 spin_unlock_irq(&phba->hbalock);
4072
4073 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4074 psli->stats_start = get_seconds();
4075
4076 /* Give the INITFF and Post time to settle. */
4077 mdelay(100);
4078
4079 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4080 if (hba_aer_enabled)
4081 pci_disable_pcie_error_reporting(phba->pcidev);
4082
4083 lpfc_hba_down_post(phba);
4084
4085 return 0;
4086 }
4087
4088 /**
4089 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4090 * @phba: Pointer to HBA context object.
4091 *
4092 * This function is called in the SLI initialization code path to restart
4093 * a SLI4 HBA. The caller is not required to hold any lock.
4094 * At the end of the function, it calls lpfc_hba_down_post function to
4095 * free any pending commands.
4096 **/
4097 static int
lpfc_sli_brdrestart_s4(struct lpfc_hba * phba)4098 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4099 {
4100 struct lpfc_sli *psli = &phba->sli;
4101 uint32_t hba_aer_enabled;
4102 int rc;
4103
4104 /* Restart HBA */
4105 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4106 "0296 Restart HBA Data: x%x x%x\n",
4107 phba->pport->port_state, psli->sli_flag);
4108
4109 /* Take PCIe device Advanced Error Reporting (AER) state */
4110 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4111
4112 rc = lpfc_sli4_brdreset(phba);
4113
4114 spin_lock_irq(&phba->hbalock);
4115 phba->pport->stopped = 0;
4116 phba->link_state = LPFC_INIT_START;
4117 phba->hba_flag = 0;
4118 spin_unlock_irq(&phba->hbalock);
4119
4120 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4121 psli->stats_start = get_seconds();
4122
4123 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4124 if (hba_aer_enabled)
4125 pci_disable_pcie_error_reporting(phba->pcidev);
4126
4127 lpfc_hba_down_post(phba);
4128
4129 return rc;
4130 }
4131
4132 /**
4133 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4134 * @phba: Pointer to HBA context object.
4135 *
4136 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4137 * API jump table function pointer from the lpfc_hba struct.
4138 **/
4139 int
lpfc_sli_brdrestart(struct lpfc_hba * phba)4140 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4141 {
4142 return phba->lpfc_sli_brdrestart(phba);
4143 }
4144
4145 /**
4146 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4147 * @phba: Pointer to HBA context object.
4148 *
4149 * This function is called after a HBA restart to wait for successful
4150 * restart of the HBA. Successful restart of the HBA is indicated by
4151 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4152 * iteration, the function will restart the HBA again. The function returns
4153 * zero if HBA successfully restarted else returns negative error code.
4154 **/
4155 static int
lpfc_sli_chipset_init(struct lpfc_hba * phba)4156 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4157 {
4158 uint32_t status, i = 0;
4159
4160 /* Read the HBA Host Status Register */
4161 if (lpfc_readl(phba->HSregaddr, &status))
4162 return -EIO;
4163
4164 /* Check status register to see what current state is */
4165 i = 0;
4166 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4167
4168 /* Check every 10ms for 10 retries, then every 100ms for 90
4169 * retries, then every 1 sec for 50 retires for a total of
4170 * ~60 seconds before reset the board again and check every
4171 * 1 sec for 50 retries. The up to 60 seconds before the
4172 * board ready is required by the Falcon FIPS zeroization
4173 * complete, and any reset the board in between shall cause
4174 * restart of zeroization, further delay the board ready.
4175 */
4176 if (i++ >= 200) {
4177 /* Adapter failed to init, timeout, status reg
4178 <status> */
4179 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4180 "0436 Adapter failed to init, "
4181 "timeout, status reg x%x, "
4182 "FW Data: A8 x%x AC x%x\n", status,
4183 readl(phba->MBslimaddr + 0xa8),
4184 readl(phba->MBslimaddr + 0xac));
4185 phba->link_state = LPFC_HBA_ERROR;
4186 return -ETIMEDOUT;
4187 }
4188
4189 /* Check to see if any errors occurred during init */
4190 if (status & HS_FFERM) {
4191 /* ERROR: During chipset initialization */
4192 /* Adapter failed to init, chipset, status reg
4193 <status> */
4194 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4195 "0437 Adapter failed to init, "
4196 "chipset, status reg x%x, "
4197 "FW Data: A8 x%x AC x%x\n", status,
4198 readl(phba->MBslimaddr + 0xa8),
4199 readl(phba->MBslimaddr + 0xac));
4200 phba->link_state = LPFC_HBA_ERROR;
4201 return -EIO;
4202 }
4203
4204 if (i <= 10)
4205 msleep(10);
4206 else if (i <= 100)
4207 msleep(100);
4208 else
4209 msleep(1000);
4210
4211 if (i == 150) {
4212 /* Do post */
4213 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4214 lpfc_sli_brdrestart(phba);
4215 }
4216 /* Read the HBA Host Status Register */
4217 if (lpfc_readl(phba->HSregaddr, &status))
4218 return -EIO;
4219 }
4220
4221 /* Check to see if any errors occurred during init */
4222 if (status & HS_FFERM) {
4223 /* ERROR: During chipset initialization */
4224 /* Adapter failed to init, chipset, status reg <status> */
4225 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4226 "0438 Adapter failed to init, chipset, "
4227 "status reg x%x, "
4228 "FW Data: A8 x%x AC x%x\n", status,
4229 readl(phba->MBslimaddr + 0xa8),
4230 readl(phba->MBslimaddr + 0xac));
4231 phba->link_state = LPFC_HBA_ERROR;
4232 return -EIO;
4233 }
4234
4235 /* Clear all interrupt enable conditions */
4236 writel(0, phba->HCregaddr);
4237 readl(phba->HCregaddr); /* flush */
4238
4239 /* setup host attn register */
4240 writel(0xffffffff, phba->HAregaddr);
4241 readl(phba->HAregaddr); /* flush */
4242 return 0;
4243 }
4244
4245 /**
4246 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4247 *
4248 * This function calculates and returns the number of HBQs required to be
4249 * configured.
4250 **/
4251 int
lpfc_sli_hbq_count(void)4252 lpfc_sli_hbq_count(void)
4253 {
4254 return ARRAY_SIZE(lpfc_hbq_defs);
4255 }
4256
4257 /**
4258 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4259 *
4260 * This function adds the number of hbq entries in every HBQ to get
4261 * the total number of hbq entries required for the HBA and returns
4262 * the total count.
4263 **/
4264 static int
lpfc_sli_hbq_entry_count(void)4265 lpfc_sli_hbq_entry_count(void)
4266 {
4267 int hbq_count = lpfc_sli_hbq_count();
4268 int count = 0;
4269 int i;
4270
4271 for (i = 0; i < hbq_count; ++i)
4272 count += lpfc_hbq_defs[i]->entry_count;
4273 return count;
4274 }
4275
4276 /**
4277 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4278 *
4279 * This function calculates amount of memory required for all hbq entries
4280 * to be configured and returns the total memory required.
4281 **/
4282 int
lpfc_sli_hbq_size(void)4283 lpfc_sli_hbq_size(void)
4284 {
4285 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4286 }
4287
4288 /**
4289 * lpfc_sli_hbq_setup - configure and initialize HBQs
4290 * @phba: Pointer to HBA context object.
4291 *
4292 * This function is called during the SLI initialization to configure
4293 * all the HBQs and post buffers to the HBQ. The caller is not
4294 * required to hold any locks. This function will return zero if successful
4295 * else it will return negative error code.
4296 **/
4297 static int
lpfc_sli_hbq_setup(struct lpfc_hba * phba)4298 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4299 {
4300 int hbq_count = lpfc_sli_hbq_count();
4301 LPFC_MBOXQ_t *pmb;
4302 MAILBOX_t *pmbox;
4303 uint32_t hbqno;
4304 uint32_t hbq_entry_index;
4305
4306 /* Get a Mailbox buffer to setup mailbox
4307 * commands for HBA initialization
4308 */
4309 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4310
4311 if (!pmb)
4312 return -ENOMEM;
4313
4314 pmbox = &pmb->u.mb;
4315
4316 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4317 phba->link_state = LPFC_INIT_MBX_CMDS;
4318 phba->hbq_in_use = 1;
4319
4320 hbq_entry_index = 0;
4321 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4322 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4323 phba->hbqs[hbqno].hbqPutIdx = 0;
4324 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4325 phba->hbqs[hbqno].entry_count =
4326 lpfc_hbq_defs[hbqno]->entry_count;
4327 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4328 hbq_entry_index, pmb);
4329 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4330
4331 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4332 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4333 mbxStatus <status>, ring <num> */
4334
4335 lpfc_printf_log(phba, KERN_ERR,
4336 LOG_SLI | LOG_VPORT,
4337 "1805 Adapter failed to init. "
4338 "Data: x%x x%x x%x\n",
4339 pmbox->mbxCommand,
4340 pmbox->mbxStatus, hbqno);
4341
4342 phba->link_state = LPFC_HBA_ERROR;
4343 mempool_free(pmb, phba->mbox_mem_pool);
4344 return -ENXIO;
4345 }
4346 }
4347 phba->hbq_count = hbq_count;
4348
4349 mempool_free(pmb, phba->mbox_mem_pool);
4350
4351 /* Initially populate or replenish the HBQs */
4352 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4353 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4354 return 0;
4355 }
4356
4357 /**
4358 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4359 * @phba: Pointer to HBA context object.
4360 *
4361 * This function is called during the SLI initialization to configure
4362 * all the HBQs and post buffers to the HBQ. The caller is not
4363 * required to hold any locks. This function will return zero if successful
4364 * else it will return negative error code.
4365 **/
4366 static int
lpfc_sli4_rb_setup(struct lpfc_hba * phba)4367 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4368 {
4369 phba->hbq_in_use = 1;
4370 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
4371 phba->hbq_count = 1;
4372 /* Initially populate or replenish the HBQs */
4373 lpfc_sli_hbqbuf_init_hbqs(phba, 0);
4374 return 0;
4375 }
4376
4377 /**
4378 * lpfc_sli_config_port - Issue config port mailbox command
4379 * @phba: Pointer to HBA context object.
4380 * @sli_mode: sli mode - 2/3
4381 *
4382 * This function is called by the sli intialization code path
4383 * to issue config_port mailbox command. This function restarts the
4384 * HBA firmware and issues a config_port mailbox command to configure
4385 * the SLI interface in the sli mode specified by sli_mode
4386 * variable. The caller is not required to hold any locks.
4387 * The function returns 0 if successful, else returns negative error
4388 * code.
4389 **/
4390 int
lpfc_sli_config_port(struct lpfc_hba * phba,int sli_mode)4391 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4392 {
4393 LPFC_MBOXQ_t *pmb;
4394 uint32_t resetcount = 0, rc = 0, done = 0;
4395
4396 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4397 if (!pmb) {
4398 phba->link_state = LPFC_HBA_ERROR;
4399 return -ENOMEM;
4400 }
4401
4402 phba->sli_rev = sli_mode;
4403 while (resetcount < 2 && !done) {
4404 spin_lock_irq(&phba->hbalock);
4405 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4406 spin_unlock_irq(&phba->hbalock);
4407 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4408 lpfc_sli_brdrestart(phba);
4409 rc = lpfc_sli_chipset_init(phba);
4410 if (rc)
4411 break;
4412
4413 spin_lock_irq(&phba->hbalock);
4414 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4415 spin_unlock_irq(&phba->hbalock);
4416 resetcount++;
4417
4418 /* Call pre CONFIG_PORT mailbox command initialization. A
4419 * value of 0 means the call was successful. Any other
4420 * nonzero value is a failure, but if ERESTART is returned,
4421 * the driver may reset the HBA and try again.
4422 */
4423 rc = lpfc_config_port_prep(phba);
4424 if (rc == -ERESTART) {
4425 phba->link_state = LPFC_LINK_UNKNOWN;
4426 continue;
4427 } else if (rc)
4428 break;
4429
4430 phba->link_state = LPFC_INIT_MBX_CMDS;
4431 lpfc_config_port(phba, pmb);
4432 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4433 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4434 LPFC_SLI3_HBQ_ENABLED |
4435 LPFC_SLI3_CRP_ENABLED |
4436 LPFC_SLI3_BG_ENABLED |
4437 LPFC_SLI3_DSS_ENABLED);
4438 if (rc != MBX_SUCCESS) {
4439 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4440 "0442 Adapter failed to init, mbxCmd x%x "
4441 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4442 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4443 spin_lock_irq(&phba->hbalock);
4444 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4445 spin_unlock_irq(&phba->hbalock);
4446 rc = -ENXIO;
4447 } else {
4448 /* Allow asynchronous mailbox command to go through */
4449 spin_lock_irq(&phba->hbalock);
4450 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4451 spin_unlock_irq(&phba->hbalock);
4452 done = 1;
4453
4454 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4455 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4456 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4457 "3110 Port did not grant ASABT\n");
4458 }
4459 }
4460 if (!done) {
4461 rc = -EINVAL;
4462 goto do_prep_failed;
4463 }
4464 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4465 if (!pmb->u.mb.un.varCfgPort.cMA) {
4466 rc = -ENXIO;
4467 goto do_prep_failed;
4468 }
4469 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
4470 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
4471 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4472 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4473 phba->max_vpi : phba->max_vports;
4474
4475 } else
4476 phba->max_vpi = 0;
4477 phba->fips_level = 0;
4478 phba->fips_spec_rev = 0;
4479 if (pmb->u.mb.un.varCfgPort.gdss) {
4480 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
4481 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4482 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4483 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4484 "2850 Security Crypto Active. FIPS x%d "
4485 "(Spec Rev: x%d)",
4486 phba->fips_level, phba->fips_spec_rev);
4487 }
4488 if (pmb->u.mb.un.varCfgPort.sec_err) {
4489 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4490 "2856 Config Port Security Crypto "
4491 "Error: x%x ",
4492 pmb->u.mb.un.varCfgPort.sec_err);
4493 }
4494 if (pmb->u.mb.un.varCfgPort.gerbm)
4495 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
4496 if (pmb->u.mb.un.varCfgPort.gcrp)
4497 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
4498
4499 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4500 phba->port_gp = phba->mbox->us.s3_pgp.port;
4501
4502 if (phba->cfg_enable_bg) {
4503 if (pmb->u.mb.un.varCfgPort.gbg)
4504 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4505 else
4506 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4507 "0443 Adapter did not grant "
4508 "BlockGuard\n");
4509 }
4510 } else {
4511 phba->hbq_get = NULL;
4512 phba->port_gp = phba->mbox->us.s2.port;
4513 phba->max_vpi = 0;
4514 }
4515 do_prep_failed:
4516 mempool_free(pmb, phba->mbox_mem_pool);
4517 return rc;
4518 }
4519
4520
4521 /**
4522 * lpfc_sli_hba_setup - SLI intialization function
4523 * @phba: Pointer to HBA context object.
4524 *
4525 * This function is the main SLI intialization function. This function
4526 * is called by the HBA intialization code, HBA reset code and HBA
4527 * error attention handler code. Caller is not required to hold any
4528 * locks. This function issues config_port mailbox command to configure
4529 * the SLI, setup iocb rings and HBQ rings. In the end the function
4530 * calls the config_port_post function to issue init_link mailbox
4531 * command and to start the discovery. The function will return zero
4532 * if successful, else it will return negative error code.
4533 **/
4534 int
lpfc_sli_hba_setup(struct lpfc_hba * phba)4535 lpfc_sli_hba_setup(struct lpfc_hba *phba)
4536 {
4537 uint32_t rc;
4538 int mode = 3, i;
4539 int longs;
4540
4541 switch (lpfc_sli_mode) {
4542 case 2:
4543 if (phba->cfg_enable_npiv) {
4544 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4545 "1824 NPIV enabled: Override lpfc_sli_mode "
4546 "parameter (%d) to auto (0).\n",
4547 lpfc_sli_mode);
4548 break;
4549 }
4550 mode = 2;
4551 break;
4552 case 0:
4553 case 3:
4554 break;
4555 default:
4556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4557 "1819 Unrecognized lpfc_sli_mode "
4558 "parameter: %d.\n", lpfc_sli_mode);
4559
4560 break;
4561 }
4562
4563 rc = lpfc_sli_config_port(phba, mode);
4564
4565 if (rc && lpfc_sli_mode == 3)
4566 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4567 "1820 Unable to select SLI-3. "
4568 "Not supported by adapter.\n");
4569 if (rc && mode != 2)
4570 rc = lpfc_sli_config_port(phba, 2);
4571 if (rc)
4572 goto lpfc_sli_hba_setup_error;
4573
4574 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4575 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4576 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4577 if (!rc) {
4578 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4579 "2709 This device supports "
4580 "Advanced Error Reporting (AER)\n");
4581 spin_lock_irq(&phba->hbalock);
4582 phba->hba_flag |= HBA_AER_ENABLED;
4583 spin_unlock_irq(&phba->hbalock);
4584 } else {
4585 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4586 "2708 This device does not support "
4587 "Advanced Error Reporting (AER)\n");
4588 phba->cfg_aer_support = 0;
4589 }
4590 }
4591
4592 if (phba->sli_rev == 3) {
4593 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4594 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
4595 } else {
4596 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4597 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
4598 phba->sli3_options = 0;
4599 }
4600
4601 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4602 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4603 phba->sli_rev, phba->max_vpi);
4604 rc = lpfc_sli_ring_map(phba);
4605
4606 if (rc)
4607 goto lpfc_sli_hba_setup_error;
4608
4609 /* Initialize VPIs. */
4610 if (phba->sli_rev == LPFC_SLI_REV3) {
4611 /*
4612 * The VPI bitmask and physical ID array are allocated
4613 * and initialized once only - at driver load. A port
4614 * reset doesn't need to reinitialize this memory.
4615 */
4616 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4617 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4618 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4619 GFP_KERNEL);
4620 if (!phba->vpi_bmask) {
4621 rc = -ENOMEM;
4622 goto lpfc_sli_hba_setup_error;
4623 }
4624
4625 phba->vpi_ids = kzalloc(
4626 (phba->max_vpi+1) * sizeof(uint16_t),
4627 GFP_KERNEL);
4628 if (!phba->vpi_ids) {
4629 kfree(phba->vpi_bmask);
4630 rc = -ENOMEM;
4631 goto lpfc_sli_hba_setup_error;
4632 }
4633 for (i = 0; i < phba->max_vpi; i++)
4634 phba->vpi_ids[i] = i;
4635 }
4636 }
4637
4638 /* Init HBQs */
4639 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4640 rc = lpfc_sli_hbq_setup(phba);
4641 if (rc)
4642 goto lpfc_sli_hba_setup_error;
4643 }
4644 spin_lock_irq(&phba->hbalock);
4645 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4646 spin_unlock_irq(&phba->hbalock);
4647
4648 rc = lpfc_config_port_post(phba);
4649 if (rc)
4650 goto lpfc_sli_hba_setup_error;
4651
4652 return rc;
4653
4654 lpfc_sli_hba_setup_error:
4655 phba->link_state = LPFC_HBA_ERROR;
4656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4657 "0445 Firmware initialization failed\n");
4658 return rc;
4659 }
4660
4661 /**
4662 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4663 * @phba: Pointer to HBA context object.
4664 * @mboxq: mailbox pointer.
4665 * This function issue a dump mailbox command to read config region
4666 * 23 and parse the records in the region and populate driver
4667 * data structure.
4668 **/
4669 static int
lpfc_sli4_read_fcoe_params(struct lpfc_hba * phba)4670 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
4671 {
4672 LPFC_MBOXQ_t *mboxq;
4673 struct lpfc_dmabuf *mp;
4674 struct lpfc_mqe *mqe;
4675 uint32_t data_length;
4676 int rc;
4677
4678 /* Program the default value of vlan_id and fc_map */
4679 phba->valid_vlan = 0;
4680 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4681 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4682 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4683
4684 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4685 if (!mboxq)
4686 return -ENOMEM;
4687
4688 mqe = &mboxq->u.mqe;
4689 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4690 rc = -ENOMEM;
4691 goto out_free_mboxq;
4692 }
4693
4694 mp = (struct lpfc_dmabuf *) mboxq->context1;
4695 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4696
4697 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4698 "(%d):2571 Mailbox cmd x%x Status x%x "
4699 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4700 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4701 "CQ: x%x x%x x%x x%x\n",
4702 mboxq->vport ? mboxq->vport->vpi : 0,
4703 bf_get(lpfc_mqe_command, mqe),
4704 bf_get(lpfc_mqe_status, mqe),
4705 mqe->un.mb_words[0], mqe->un.mb_words[1],
4706 mqe->un.mb_words[2], mqe->un.mb_words[3],
4707 mqe->un.mb_words[4], mqe->un.mb_words[5],
4708 mqe->un.mb_words[6], mqe->un.mb_words[7],
4709 mqe->un.mb_words[8], mqe->un.mb_words[9],
4710 mqe->un.mb_words[10], mqe->un.mb_words[11],
4711 mqe->un.mb_words[12], mqe->un.mb_words[13],
4712 mqe->un.mb_words[14], mqe->un.mb_words[15],
4713 mqe->un.mb_words[16], mqe->un.mb_words[50],
4714 mboxq->mcqe.word0,
4715 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4716 mboxq->mcqe.trailer);
4717
4718 if (rc) {
4719 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4720 kfree(mp);
4721 rc = -EIO;
4722 goto out_free_mboxq;
4723 }
4724 data_length = mqe->un.mb_words[5];
4725 if (data_length > DMP_RGN23_SIZE) {
4726 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4727 kfree(mp);
4728 rc = -EIO;
4729 goto out_free_mboxq;
4730 }
4731
4732 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4733 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4734 kfree(mp);
4735 rc = 0;
4736
4737 out_free_mboxq:
4738 mempool_free(mboxq, phba->mbox_mem_pool);
4739 return rc;
4740 }
4741
4742 /**
4743 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4744 * @phba: pointer to lpfc hba data structure.
4745 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4746 * @vpd: pointer to the memory to hold resulting port vpd data.
4747 * @vpd_size: On input, the number of bytes allocated to @vpd.
4748 * On output, the number of data bytes in @vpd.
4749 *
4750 * This routine executes a READ_REV SLI4 mailbox command. In
4751 * addition, this routine gets the port vpd data.
4752 *
4753 * Return codes
4754 * 0 - successful
4755 * -ENOMEM - could not allocated memory.
4756 **/
4757 static int
lpfc_sli4_read_rev(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq,uint8_t * vpd,uint32_t * vpd_size)4758 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4759 uint8_t *vpd, uint32_t *vpd_size)
4760 {
4761 int rc = 0;
4762 uint32_t dma_size;
4763 struct lpfc_dmabuf *dmabuf;
4764 struct lpfc_mqe *mqe;
4765
4766 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4767 if (!dmabuf)
4768 return -ENOMEM;
4769
4770 /*
4771 * Get a DMA buffer for the vpd data resulting from the READ_REV
4772 * mailbox command.
4773 */
4774 dma_size = *vpd_size;
4775 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4776 dma_size,
4777 &dmabuf->phys,
4778 GFP_KERNEL);
4779 if (!dmabuf->virt) {
4780 kfree(dmabuf);
4781 return -ENOMEM;
4782 }
4783 memset(dmabuf->virt, 0, dma_size);
4784
4785 /*
4786 * The SLI4 implementation of READ_REV conflicts at word1,
4787 * bits 31:16 and SLI4 adds vpd functionality not present
4788 * in SLI3. This code corrects the conflicts.
4789 */
4790 lpfc_read_rev(phba, mboxq);
4791 mqe = &mboxq->u.mqe;
4792 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4793 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4794 mqe->un.read_rev.word1 &= 0x0000FFFF;
4795 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4796 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4797
4798 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4799 if (rc) {
4800 dma_free_coherent(&phba->pcidev->dev, dma_size,
4801 dmabuf->virt, dmabuf->phys);
4802 kfree(dmabuf);
4803 return -EIO;
4804 }
4805
4806 /*
4807 * The available vpd length cannot be bigger than the
4808 * DMA buffer passed to the port. Catch the less than
4809 * case and update the caller's size.
4810 */
4811 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4812 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4813
4814 memcpy(vpd, dmabuf->virt, *vpd_size);
4815
4816 dma_free_coherent(&phba->pcidev->dev, dma_size,
4817 dmabuf->virt, dmabuf->phys);
4818 kfree(dmabuf);
4819 return 0;
4820 }
4821
4822 /**
4823 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
4824 * @phba: pointer to lpfc hba data structure.
4825 *
4826 * This routine retrieves SLI4 device physical port name this PCI function
4827 * is attached to.
4828 *
4829 * Return codes
4830 * 0 - successful
4831 * otherwise - failed to retrieve physical port name
4832 **/
4833 static int
lpfc_sli4_retrieve_pport_name(struct lpfc_hba * phba)4834 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
4835 {
4836 LPFC_MBOXQ_t *mboxq;
4837 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
4838 struct lpfc_controller_attribute *cntl_attr;
4839 struct lpfc_mbx_get_port_name *get_port_name;
4840 void *virtaddr = NULL;
4841 uint32_t alloclen, reqlen;
4842 uint32_t shdr_status, shdr_add_status;
4843 union lpfc_sli4_cfg_shdr *shdr;
4844 char cport_name = 0;
4845 int rc;
4846
4847 /* We assume nothing at this point */
4848 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4849 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
4850
4851 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4852 if (!mboxq)
4853 return -ENOMEM;
4854 /* obtain link type and link number via READ_CONFIG */
4855 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4856 lpfc_sli4_read_config(phba);
4857 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
4858 goto retrieve_ppname;
4859
4860 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
4861 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
4862 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4863 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
4864 LPFC_SLI4_MBX_NEMBED);
4865 if (alloclen < reqlen) {
4866 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4867 "3084 Allocated DMA memory size (%d) is "
4868 "less than the requested DMA memory size "
4869 "(%d)\n", alloclen, reqlen);
4870 rc = -ENOMEM;
4871 goto out_free_mboxq;
4872 }
4873 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4874 virtaddr = mboxq->sge_array->addr[0];
4875 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
4876 shdr = &mbx_cntl_attr->cfg_shdr;
4877 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4878 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4879 if (shdr_status || shdr_add_status || rc) {
4880 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4881 "3085 Mailbox x%x (x%x/x%x) failed, "
4882 "rc:x%x, status:x%x, add_status:x%x\n",
4883 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4884 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4885 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4886 rc, shdr_status, shdr_add_status);
4887 rc = -ENXIO;
4888 goto out_free_mboxq;
4889 }
4890 cntl_attr = &mbx_cntl_attr->cntl_attr;
4891 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
4892 phba->sli4_hba.lnk_info.lnk_tp =
4893 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
4894 phba->sli4_hba.lnk_info.lnk_no =
4895 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
4896 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4897 "3086 lnk_type:%d, lnk_numb:%d\n",
4898 phba->sli4_hba.lnk_info.lnk_tp,
4899 phba->sli4_hba.lnk_info.lnk_no);
4900
4901 retrieve_ppname:
4902 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4903 LPFC_MBOX_OPCODE_GET_PORT_NAME,
4904 sizeof(struct lpfc_mbx_get_port_name) -
4905 sizeof(struct lpfc_sli4_cfg_mhdr),
4906 LPFC_SLI4_MBX_EMBED);
4907 get_port_name = &mboxq->u.mqe.un.get_port_name;
4908 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
4909 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
4910 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
4911 phba->sli4_hba.lnk_info.lnk_tp);
4912 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4913 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4914 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4915 if (shdr_status || shdr_add_status || rc) {
4916 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4917 "3087 Mailbox x%x (x%x/x%x) failed: "
4918 "rc:x%x, status:x%x, add_status:x%x\n",
4919 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4920 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4921 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4922 rc, shdr_status, shdr_add_status);
4923 rc = -ENXIO;
4924 goto out_free_mboxq;
4925 }
4926 switch (phba->sli4_hba.lnk_info.lnk_no) {
4927 case LPFC_LINK_NUMBER_0:
4928 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
4929 &get_port_name->u.response);
4930 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4931 break;
4932 case LPFC_LINK_NUMBER_1:
4933 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
4934 &get_port_name->u.response);
4935 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4936 break;
4937 case LPFC_LINK_NUMBER_2:
4938 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
4939 &get_port_name->u.response);
4940 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4941 break;
4942 case LPFC_LINK_NUMBER_3:
4943 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
4944 &get_port_name->u.response);
4945 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4946 break;
4947 default:
4948 break;
4949 }
4950
4951 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
4952 phba->Port[0] = cport_name;
4953 phba->Port[1] = '\0';
4954 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4955 "3091 SLI get port name: %s\n", phba->Port);
4956 }
4957
4958 out_free_mboxq:
4959 if (rc != MBX_TIMEOUT) {
4960 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
4961 lpfc_sli4_mbox_cmd_free(phba, mboxq);
4962 else
4963 mempool_free(mboxq, phba->mbox_mem_pool);
4964 }
4965 return rc;
4966 }
4967
4968 /**
4969 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4970 * @phba: pointer to lpfc hba data structure.
4971 *
4972 * This routine is called to explicitly arm the SLI4 device's completion and
4973 * event queues
4974 **/
4975 static void
lpfc_sli4_arm_cqeq_intr(struct lpfc_hba * phba)4976 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4977 {
4978 int fcp_eqidx;
4979
4980 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4981 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4982 fcp_eqidx = 0;
4983 if (phba->sli4_hba.fcp_cq) {
4984 do {
4985 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4986 LPFC_QUEUE_REARM);
4987 } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
4988 }
4989 if (phba->sli4_hba.hba_eq) {
4990 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
4991 fcp_eqidx++)
4992 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
4993 LPFC_QUEUE_REARM);
4994 }
4995 }
4996
4997 /**
4998 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
4999 * @phba: Pointer to HBA context object.
5000 * @type: The resource extent type.
5001 * @extnt_count: buffer to hold port available extent count.
5002 * @extnt_size: buffer to hold element count per extent.
5003 *
5004 * This function calls the port and retrievs the number of available
5005 * extents and their size for a particular extent type.
5006 *
5007 * Returns: 0 if successful. Nonzero otherwise.
5008 **/
5009 int
lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba * phba,uint16_t type,uint16_t * extnt_count,uint16_t * extnt_size)5010 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5011 uint16_t *extnt_count, uint16_t *extnt_size)
5012 {
5013 int rc = 0;
5014 uint32_t length;
5015 uint32_t mbox_tmo;
5016 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5017 LPFC_MBOXQ_t *mbox;
5018
5019 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5020 if (!mbox)
5021 return -ENOMEM;
5022
5023 /* Find out how many extents are available for this resource type */
5024 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5025 sizeof(struct lpfc_sli4_cfg_mhdr));
5026 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5027 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5028 length, LPFC_SLI4_MBX_EMBED);
5029
5030 /* Send an extents count of 0 - the GET doesn't use it. */
5031 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5032 LPFC_SLI4_MBX_EMBED);
5033 if (unlikely(rc)) {
5034 rc = -EIO;
5035 goto err_exit;
5036 }
5037
5038 if (!phba->sli4_hba.intr_enable)
5039 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5040 else {
5041 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5042 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5043 }
5044 if (unlikely(rc)) {
5045 rc = -EIO;
5046 goto err_exit;
5047 }
5048
5049 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5050 if (bf_get(lpfc_mbox_hdr_status,
5051 &rsrc_info->header.cfg_shdr.response)) {
5052 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5053 "2930 Failed to get resource extents "
5054 "Status 0x%x Add'l Status 0x%x\n",
5055 bf_get(lpfc_mbox_hdr_status,
5056 &rsrc_info->header.cfg_shdr.response),
5057 bf_get(lpfc_mbox_hdr_add_status,
5058 &rsrc_info->header.cfg_shdr.response));
5059 rc = -EIO;
5060 goto err_exit;
5061 }
5062
5063 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5064 &rsrc_info->u.rsp);
5065 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5066 &rsrc_info->u.rsp);
5067
5068 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5069 "3162 Retrieved extents type-%d from port: count:%d, "
5070 "size:%d\n", type, *extnt_count, *extnt_size);
5071
5072 err_exit:
5073 mempool_free(mbox, phba->mbox_mem_pool);
5074 return rc;
5075 }
5076
5077 /**
5078 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5079 * @phba: Pointer to HBA context object.
5080 * @type: The extent type to check.
5081 *
5082 * This function reads the current available extents from the port and checks
5083 * if the extent count or extent size has changed since the last access.
5084 * Callers use this routine post port reset to understand if there is a
5085 * extent reprovisioning requirement.
5086 *
5087 * Returns:
5088 * -Error: error indicates problem.
5089 * 1: Extent count or size has changed.
5090 * 0: No changes.
5091 **/
5092 static int
lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba * phba,uint16_t type)5093 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5094 {
5095 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5096 uint16_t size_diff, rsrc_ext_size;
5097 int rc = 0;
5098 struct lpfc_rsrc_blks *rsrc_entry;
5099 struct list_head *rsrc_blk_list = NULL;
5100
5101 size_diff = 0;
5102 curr_ext_cnt = 0;
5103 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5104 &rsrc_ext_cnt,
5105 &rsrc_ext_size);
5106 if (unlikely(rc))
5107 return -EIO;
5108
5109 switch (type) {
5110 case LPFC_RSC_TYPE_FCOE_RPI:
5111 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5112 break;
5113 case LPFC_RSC_TYPE_FCOE_VPI:
5114 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5115 break;
5116 case LPFC_RSC_TYPE_FCOE_XRI:
5117 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5118 break;
5119 case LPFC_RSC_TYPE_FCOE_VFI:
5120 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5121 break;
5122 default:
5123 break;
5124 }
5125
5126 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5127 curr_ext_cnt++;
5128 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5129 size_diff++;
5130 }
5131
5132 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5133 rc = 1;
5134
5135 return rc;
5136 }
5137
5138 /**
5139 * lpfc_sli4_cfg_post_extnts -
5140 * @phba: Pointer to HBA context object.
5141 * @extnt_cnt - number of available extents.
5142 * @type - the extent type (rpi, xri, vfi, vpi).
5143 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5144 * @mbox - pointer to the caller's allocated mailbox structure.
5145 *
5146 * This function executes the extents allocation request. It also
5147 * takes care of the amount of memory needed to allocate or get the
5148 * allocated extents. It is the caller's responsibility to evaluate
5149 * the response.
5150 *
5151 * Returns:
5152 * -Error: Error value describes the condition found.
5153 * 0: if successful
5154 **/
5155 static int
lpfc_sli4_cfg_post_extnts(struct lpfc_hba * phba,uint16_t extnt_cnt,uint16_t type,bool * emb,LPFC_MBOXQ_t * mbox)5156 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5157 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5158 {
5159 int rc = 0;
5160 uint32_t req_len;
5161 uint32_t emb_len;
5162 uint32_t alloc_len, mbox_tmo;
5163
5164 /* Calculate the total requested length of the dma memory */
5165 req_len = extnt_cnt * sizeof(uint16_t);
5166
5167 /*
5168 * Calculate the size of an embedded mailbox. The uint32_t
5169 * accounts for extents-specific word.
5170 */
5171 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5172 sizeof(uint32_t);
5173
5174 /*
5175 * Presume the allocation and response will fit into an embedded
5176 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5177 */
5178 *emb = LPFC_SLI4_MBX_EMBED;
5179 if (req_len > emb_len) {
5180 req_len = extnt_cnt * sizeof(uint16_t) +
5181 sizeof(union lpfc_sli4_cfg_shdr) +
5182 sizeof(uint32_t);
5183 *emb = LPFC_SLI4_MBX_NEMBED;
5184 }
5185
5186 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5187 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5188 req_len, *emb);
5189 if (alloc_len < req_len) {
5190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5191 "2982 Allocated DMA memory size (x%x) is "
5192 "less than the requested DMA memory "
5193 "size (x%x)\n", alloc_len, req_len);
5194 return -ENOMEM;
5195 }
5196 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5197 if (unlikely(rc))
5198 return -EIO;
5199
5200 if (!phba->sli4_hba.intr_enable)
5201 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5202 else {
5203 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5204 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5205 }
5206
5207 if (unlikely(rc))
5208 rc = -EIO;
5209 return rc;
5210 }
5211
5212 /**
5213 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5214 * @phba: Pointer to HBA context object.
5215 * @type: The resource extent type to allocate.
5216 *
5217 * This function allocates the number of elements for the specified
5218 * resource type.
5219 **/
5220 static int
lpfc_sli4_alloc_extent(struct lpfc_hba * phba,uint16_t type)5221 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5222 {
5223 bool emb = false;
5224 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5225 uint16_t rsrc_id, rsrc_start, j, k;
5226 uint16_t *ids;
5227 int i, rc;
5228 unsigned long longs;
5229 unsigned long *bmask;
5230 struct lpfc_rsrc_blks *rsrc_blks;
5231 LPFC_MBOXQ_t *mbox;
5232 uint32_t length;
5233 struct lpfc_id_range *id_array = NULL;
5234 void *virtaddr = NULL;
5235 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5236 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5237 struct list_head *ext_blk_list;
5238
5239 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5240 &rsrc_cnt,
5241 &rsrc_size);
5242 if (unlikely(rc))
5243 return -EIO;
5244
5245 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5246 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5247 "3009 No available Resource Extents "
5248 "for resource type 0x%x: Count: 0x%x, "
5249 "Size 0x%x\n", type, rsrc_cnt,
5250 rsrc_size);
5251 return -ENOMEM;
5252 }
5253
5254 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5255 "2903 Post resource extents type-0x%x: "
5256 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5257
5258 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5259 if (!mbox)
5260 return -ENOMEM;
5261
5262 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5263 if (unlikely(rc)) {
5264 rc = -EIO;
5265 goto err_exit;
5266 }
5267
5268 /*
5269 * Figure out where the response is located. Then get local pointers
5270 * to the response data. The port does not guarantee to respond to
5271 * all extents counts request so update the local variable with the
5272 * allocated count from the port.
5273 */
5274 if (emb == LPFC_SLI4_MBX_EMBED) {
5275 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5276 id_array = &rsrc_ext->u.rsp.id[0];
5277 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5278 } else {
5279 virtaddr = mbox->sge_array->addr[0];
5280 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5281 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5282 id_array = &n_rsrc->id;
5283 }
5284
5285 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5286 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5287
5288 /*
5289 * Based on the resource size and count, correct the base and max
5290 * resource values.
5291 */
5292 length = sizeof(struct lpfc_rsrc_blks);
5293 switch (type) {
5294 case LPFC_RSC_TYPE_FCOE_RPI:
5295 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5296 sizeof(unsigned long),
5297 GFP_KERNEL);
5298 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5299 rc = -ENOMEM;
5300 goto err_exit;
5301 }
5302 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5303 sizeof(uint16_t),
5304 GFP_KERNEL);
5305 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5306 kfree(phba->sli4_hba.rpi_bmask);
5307 rc = -ENOMEM;
5308 goto err_exit;
5309 }
5310
5311 /*
5312 * The next_rpi was initialized with the maximum available
5313 * count but the port may allocate a smaller number. Catch
5314 * that case and update the next_rpi.
5315 */
5316 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5317
5318 /* Initialize local ptrs for common extent processing later. */
5319 bmask = phba->sli4_hba.rpi_bmask;
5320 ids = phba->sli4_hba.rpi_ids;
5321 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5322 break;
5323 case LPFC_RSC_TYPE_FCOE_VPI:
5324 phba->vpi_bmask = kzalloc(longs *
5325 sizeof(unsigned long),
5326 GFP_KERNEL);
5327 if (unlikely(!phba->vpi_bmask)) {
5328 rc = -ENOMEM;
5329 goto err_exit;
5330 }
5331 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5332 sizeof(uint16_t),
5333 GFP_KERNEL);
5334 if (unlikely(!phba->vpi_ids)) {
5335 kfree(phba->vpi_bmask);
5336 rc = -ENOMEM;
5337 goto err_exit;
5338 }
5339
5340 /* Initialize local ptrs for common extent processing later. */
5341 bmask = phba->vpi_bmask;
5342 ids = phba->vpi_ids;
5343 ext_blk_list = &phba->lpfc_vpi_blk_list;
5344 break;
5345 case LPFC_RSC_TYPE_FCOE_XRI:
5346 phba->sli4_hba.xri_bmask = kzalloc(longs *
5347 sizeof(unsigned long),
5348 GFP_KERNEL);
5349 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5350 rc = -ENOMEM;
5351 goto err_exit;
5352 }
5353 phba->sli4_hba.max_cfg_param.xri_used = 0;
5354 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5355 sizeof(uint16_t),
5356 GFP_KERNEL);
5357 if (unlikely(!phba->sli4_hba.xri_ids)) {
5358 kfree(phba->sli4_hba.xri_bmask);
5359 rc = -ENOMEM;
5360 goto err_exit;
5361 }
5362
5363 /* Initialize local ptrs for common extent processing later. */
5364 bmask = phba->sli4_hba.xri_bmask;
5365 ids = phba->sli4_hba.xri_ids;
5366 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5367 break;
5368 case LPFC_RSC_TYPE_FCOE_VFI:
5369 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5370 sizeof(unsigned long),
5371 GFP_KERNEL);
5372 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5373 rc = -ENOMEM;
5374 goto err_exit;
5375 }
5376 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5377 sizeof(uint16_t),
5378 GFP_KERNEL);
5379 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5380 kfree(phba->sli4_hba.vfi_bmask);
5381 rc = -ENOMEM;
5382 goto err_exit;
5383 }
5384
5385 /* Initialize local ptrs for common extent processing later. */
5386 bmask = phba->sli4_hba.vfi_bmask;
5387 ids = phba->sli4_hba.vfi_ids;
5388 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5389 break;
5390 default:
5391 /* Unsupported Opcode. Fail call. */
5392 id_array = NULL;
5393 bmask = NULL;
5394 ids = NULL;
5395 ext_blk_list = NULL;
5396 goto err_exit;
5397 }
5398
5399 /*
5400 * Complete initializing the extent configuration with the
5401 * allocated ids assigned to this function. The bitmask serves
5402 * as an index into the array and manages the available ids. The
5403 * array just stores the ids communicated to the port via the wqes.
5404 */
5405 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5406 if ((i % 2) == 0)
5407 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5408 &id_array[k]);
5409 else
5410 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5411 &id_array[k]);
5412
5413 rsrc_blks = kzalloc(length, GFP_KERNEL);
5414 if (unlikely(!rsrc_blks)) {
5415 rc = -ENOMEM;
5416 kfree(bmask);
5417 kfree(ids);
5418 goto err_exit;
5419 }
5420 rsrc_blks->rsrc_start = rsrc_id;
5421 rsrc_blks->rsrc_size = rsrc_size;
5422 list_add_tail(&rsrc_blks->list, ext_blk_list);
5423 rsrc_start = rsrc_id;
5424 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
5425 phba->sli4_hba.scsi_xri_start = rsrc_start +
5426 lpfc_sli4_get_els_iocb_cnt(phba);
5427
5428 while (rsrc_id < (rsrc_start + rsrc_size)) {
5429 ids[j] = rsrc_id;
5430 rsrc_id++;
5431 j++;
5432 }
5433 /* Entire word processed. Get next word.*/
5434 if ((i % 2) == 1)
5435 k++;
5436 }
5437 err_exit:
5438 lpfc_sli4_mbox_cmd_free(phba, mbox);
5439 return rc;
5440 }
5441
5442 /**
5443 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5444 * @phba: Pointer to HBA context object.
5445 * @type: the extent's type.
5446 *
5447 * This function deallocates all extents of a particular resource type.
5448 * SLI4 does not allow for deallocating a particular extent range. It
5449 * is the caller's responsibility to release all kernel memory resources.
5450 **/
5451 static int
lpfc_sli4_dealloc_extent(struct lpfc_hba * phba,uint16_t type)5452 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5453 {
5454 int rc;
5455 uint32_t length, mbox_tmo = 0;
5456 LPFC_MBOXQ_t *mbox;
5457 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5458 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5459
5460 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5461 if (!mbox)
5462 return -ENOMEM;
5463
5464 /*
5465 * This function sends an embedded mailbox because it only sends the
5466 * the resource type. All extents of this type are released by the
5467 * port.
5468 */
5469 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5470 sizeof(struct lpfc_sli4_cfg_mhdr));
5471 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5472 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5473 length, LPFC_SLI4_MBX_EMBED);
5474
5475 /* Send an extents count of 0 - the dealloc doesn't use it. */
5476 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5477 LPFC_SLI4_MBX_EMBED);
5478 if (unlikely(rc)) {
5479 rc = -EIO;
5480 goto out_free_mbox;
5481 }
5482 if (!phba->sli4_hba.intr_enable)
5483 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5484 else {
5485 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5486 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5487 }
5488 if (unlikely(rc)) {
5489 rc = -EIO;
5490 goto out_free_mbox;
5491 }
5492
5493 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5494 if (bf_get(lpfc_mbox_hdr_status,
5495 &dealloc_rsrc->header.cfg_shdr.response)) {
5496 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5497 "2919 Failed to release resource extents "
5498 "for type %d - Status 0x%x Add'l Status 0x%x. "
5499 "Resource memory not released.\n",
5500 type,
5501 bf_get(lpfc_mbox_hdr_status,
5502 &dealloc_rsrc->header.cfg_shdr.response),
5503 bf_get(lpfc_mbox_hdr_add_status,
5504 &dealloc_rsrc->header.cfg_shdr.response));
5505 rc = -EIO;
5506 goto out_free_mbox;
5507 }
5508
5509 /* Release kernel memory resources for the specific type. */
5510 switch (type) {
5511 case LPFC_RSC_TYPE_FCOE_VPI:
5512 kfree(phba->vpi_bmask);
5513 kfree(phba->vpi_ids);
5514 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5515 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5516 &phba->lpfc_vpi_blk_list, list) {
5517 list_del_init(&rsrc_blk->list);
5518 kfree(rsrc_blk);
5519 }
5520 phba->sli4_hba.max_cfg_param.vpi_used = 0;
5521 break;
5522 case LPFC_RSC_TYPE_FCOE_XRI:
5523 kfree(phba->sli4_hba.xri_bmask);
5524 kfree(phba->sli4_hba.xri_ids);
5525 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5526 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5527 list_del_init(&rsrc_blk->list);
5528 kfree(rsrc_blk);
5529 }
5530 break;
5531 case LPFC_RSC_TYPE_FCOE_VFI:
5532 kfree(phba->sli4_hba.vfi_bmask);
5533 kfree(phba->sli4_hba.vfi_ids);
5534 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5535 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5536 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5537 list_del_init(&rsrc_blk->list);
5538 kfree(rsrc_blk);
5539 }
5540 break;
5541 case LPFC_RSC_TYPE_FCOE_RPI:
5542 /* RPI bitmask and physical id array are cleaned up earlier. */
5543 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5544 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5545 list_del_init(&rsrc_blk->list);
5546 kfree(rsrc_blk);
5547 }
5548 break;
5549 default:
5550 break;
5551 }
5552
5553 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5554
5555 out_free_mbox:
5556 mempool_free(mbox, phba->mbox_mem_pool);
5557 return rc;
5558 }
5559
5560 /**
5561 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5562 * @phba: Pointer to HBA context object.
5563 *
5564 * This function allocates all SLI4 resource identifiers.
5565 **/
5566 int
lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba * phba)5567 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5568 {
5569 int i, rc, error = 0;
5570 uint16_t count, base;
5571 unsigned long longs;
5572
5573 if (!phba->sli4_hba.rpi_hdrs_in_use)
5574 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
5575 if (phba->sli4_hba.extents_in_use) {
5576 /*
5577 * The port supports resource extents. The XRI, VPI, VFI, RPI
5578 * resource extent count must be read and allocated before
5579 * provisioning the resource id arrays.
5580 */
5581 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5582 LPFC_IDX_RSRC_RDY) {
5583 /*
5584 * Extent-based resources are set - the driver could
5585 * be in a port reset. Figure out if any corrective
5586 * actions need to be taken.
5587 */
5588 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5589 LPFC_RSC_TYPE_FCOE_VFI);
5590 if (rc != 0)
5591 error++;
5592 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5593 LPFC_RSC_TYPE_FCOE_VPI);
5594 if (rc != 0)
5595 error++;
5596 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5597 LPFC_RSC_TYPE_FCOE_XRI);
5598 if (rc != 0)
5599 error++;
5600 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5601 LPFC_RSC_TYPE_FCOE_RPI);
5602 if (rc != 0)
5603 error++;
5604
5605 /*
5606 * It's possible that the number of resources
5607 * provided to this port instance changed between
5608 * resets. Detect this condition and reallocate
5609 * resources. Otherwise, there is no action.
5610 */
5611 if (error) {
5612 lpfc_printf_log(phba, KERN_INFO,
5613 LOG_MBOX | LOG_INIT,
5614 "2931 Detected extent resource "
5615 "change. Reallocating all "
5616 "extents.\n");
5617 rc = lpfc_sli4_dealloc_extent(phba,
5618 LPFC_RSC_TYPE_FCOE_VFI);
5619 rc = lpfc_sli4_dealloc_extent(phba,
5620 LPFC_RSC_TYPE_FCOE_VPI);
5621 rc = lpfc_sli4_dealloc_extent(phba,
5622 LPFC_RSC_TYPE_FCOE_XRI);
5623 rc = lpfc_sli4_dealloc_extent(phba,
5624 LPFC_RSC_TYPE_FCOE_RPI);
5625 } else
5626 return 0;
5627 }
5628
5629 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5630 if (unlikely(rc))
5631 goto err_exit;
5632
5633 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5634 if (unlikely(rc))
5635 goto err_exit;
5636
5637 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5638 if (unlikely(rc))
5639 goto err_exit;
5640
5641 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5642 if (unlikely(rc))
5643 goto err_exit;
5644 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5645 LPFC_IDX_RSRC_RDY);
5646 return rc;
5647 } else {
5648 /*
5649 * The port does not support resource extents. The XRI, VPI,
5650 * VFI, RPI resource ids were determined from READ_CONFIG.
5651 * Just allocate the bitmasks and provision the resource id
5652 * arrays. If a port reset is active, the resources don't
5653 * need any action - just exit.
5654 */
5655 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5656 LPFC_IDX_RSRC_RDY) {
5657 lpfc_sli4_dealloc_resource_identifiers(phba);
5658 lpfc_sli4_remove_rpis(phba);
5659 }
5660 /* RPIs. */
5661 count = phba->sli4_hba.max_cfg_param.max_rpi;
5662 if (count <= 0) {
5663 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5664 "3279 Invalid provisioning of "
5665 "rpi:%d\n", count);
5666 rc = -EINVAL;
5667 goto err_exit;
5668 }
5669 base = phba->sli4_hba.max_cfg_param.rpi_base;
5670 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5671 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5672 sizeof(unsigned long),
5673 GFP_KERNEL);
5674 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5675 rc = -ENOMEM;
5676 goto err_exit;
5677 }
5678 phba->sli4_hba.rpi_ids = kzalloc(count *
5679 sizeof(uint16_t),
5680 GFP_KERNEL);
5681 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5682 rc = -ENOMEM;
5683 goto free_rpi_bmask;
5684 }
5685
5686 for (i = 0; i < count; i++)
5687 phba->sli4_hba.rpi_ids[i] = base + i;
5688
5689 /* VPIs. */
5690 count = phba->sli4_hba.max_cfg_param.max_vpi;
5691 if (count <= 0) {
5692 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5693 "3280 Invalid provisioning of "
5694 "vpi:%d\n", count);
5695 rc = -EINVAL;
5696 goto free_rpi_ids;
5697 }
5698 base = phba->sli4_hba.max_cfg_param.vpi_base;
5699 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5700 phba->vpi_bmask = kzalloc(longs *
5701 sizeof(unsigned long),
5702 GFP_KERNEL);
5703 if (unlikely(!phba->vpi_bmask)) {
5704 rc = -ENOMEM;
5705 goto free_rpi_ids;
5706 }
5707 phba->vpi_ids = kzalloc(count *
5708 sizeof(uint16_t),
5709 GFP_KERNEL);
5710 if (unlikely(!phba->vpi_ids)) {
5711 rc = -ENOMEM;
5712 goto free_vpi_bmask;
5713 }
5714
5715 for (i = 0; i < count; i++)
5716 phba->vpi_ids[i] = base + i;
5717
5718 /* XRIs. */
5719 count = phba->sli4_hba.max_cfg_param.max_xri;
5720 if (count <= 0) {
5721 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5722 "3281 Invalid provisioning of "
5723 "xri:%d\n", count);
5724 rc = -EINVAL;
5725 goto free_vpi_ids;
5726 }
5727 base = phba->sli4_hba.max_cfg_param.xri_base;
5728 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5729 phba->sli4_hba.xri_bmask = kzalloc(longs *
5730 sizeof(unsigned long),
5731 GFP_KERNEL);
5732 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5733 rc = -ENOMEM;
5734 goto free_vpi_ids;
5735 }
5736 phba->sli4_hba.max_cfg_param.xri_used = 0;
5737 phba->sli4_hba.xri_ids = kzalloc(count *
5738 sizeof(uint16_t),
5739 GFP_KERNEL);
5740 if (unlikely(!phba->sli4_hba.xri_ids)) {
5741 rc = -ENOMEM;
5742 goto free_xri_bmask;
5743 }
5744
5745 for (i = 0; i < count; i++)
5746 phba->sli4_hba.xri_ids[i] = base + i;
5747
5748 /* VFIs. */
5749 count = phba->sli4_hba.max_cfg_param.max_vfi;
5750 if (count <= 0) {
5751 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5752 "3282 Invalid provisioning of "
5753 "vfi:%d\n", count);
5754 rc = -EINVAL;
5755 goto free_xri_ids;
5756 }
5757 base = phba->sli4_hba.max_cfg_param.vfi_base;
5758 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5759 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5760 sizeof(unsigned long),
5761 GFP_KERNEL);
5762 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5763 rc = -ENOMEM;
5764 goto free_xri_ids;
5765 }
5766 phba->sli4_hba.vfi_ids = kzalloc(count *
5767 sizeof(uint16_t),
5768 GFP_KERNEL);
5769 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5770 rc = -ENOMEM;
5771 goto free_vfi_bmask;
5772 }
5773
5774 for (i = 0; i < count; i++)
5775 phba->sli4_hba.vfi_ids[i] = base + i;
5776
5777 /*
5778 * Mark all resources ready. An HBA reset doesn't need
5779 * to reset the initialization.
5780 */
5781 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5782 LPFC_IDX_RSRC_RDY);
5783 return 0;
5784 }
5785
5786 free_vfi_bmask:
5787 kfree(phba->sli4_hba.vfi_bmask);
5788 free_xri_ids:
5789 kfree(phba->sli4_hba.xri_ids);
5790 free_xri_bmask:
5791 kfree(phba->sli4_hba.xri_bmask);
5792 free_vpi_ids:
5793 kfree(phba->vpi_ids);
5794 free_vpi_bmask:
5795 kfree(phba->vpi_bmask);
5796 free_rpi_ids:
5797 kfree(phba->sli4_hba.rpi_ids);
5798 free_rpi_bmask:
5799 kfree(phba->sli4_hba.rpi_bmask);
5800 err_exit:
5801 return rc;
5802 }
5803
5804 /**
5805 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
5806 * @phba: Pointer to HBA context object.
5807 *
5808 * This function allocates the number of elements for the specified
5809 * resource type.
5810 **/
5811 int
lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba * phba)5812 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5813 {
5814 if (phba->sli4_hba.extents_in_use) {
5815 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5816 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5817 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5818 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5819 } else {
5820 kfree(phba->vpi_bmask);
5821 phba->sli4_hba.max_cfg_param.vpi_used = 0;
5822 kfree(phba->vpi_ids);
5823 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5824 kfree(phba->sli4_hba.xri_bmask);
5825 kfree(phba->sli4_hba.xri_ids);
5826 kfree(phba->sli4_hba.vfi_bmask);
5827 kfree(phba->sli4_hba.vfi_ids);
5828 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5829 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5830 }
5831
5832 return 0;
5833 }
5834
5835 /**
5836 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
5837 * @phba: Pointer to HBA context object.
5838 * @type: The resource extent type.
5839 * @extnt_count: buffer to hold port extent count response
5840 * @extnt_size: buffer to hold port extent size response.
5841 *
5842 * This function calls the port to read the host allocated extents
5843 * for a particular type.
5844 **/
5845 int
lpfc_sli4_get_allocated_extnts(struct lpfc_hba * phba,uint16_t type,uint16_t * extnt_cnt,uint16_t * extnt_size)5846 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5847 uint16_t *extnt_cnt, uint16_t *extnt_size)
5848 {
5849 bool emb;
5850 int rc = 0;
5851 uint16_t curr_blks = 0;
5852 uint32_t req_len, emb_len;
5853 uint32_t alloc_len, mbox_tmo;
5854 struct list_head *blk_list_head;
5855 struct lpfc_rsrc_blks *rsrc_blk;
5856 LPFC_MBOXQ_t *mbox;
5857 void *virtaddr = NULL;
5858 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5859 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5860 union lpfc_sli4_cfg_shdr *shdr;
5861
5862 switch (type) {
5863 case LPFC_RSC_TYPE_FCOE_VPI:
5864 blk_list_head = &phba->lpfc_vpi_blk_list;
5865 break;
5866 case LPFC_RSC_TYPE_FCOE_XRI:
5867 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
5868 break;
5869 case LPFC_RSC_TYPE_FCOE_VFI:
5870 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
5871 break;
5872 case LPFC_RSC_TYPE_FCOE_RPI:
5873 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
5874 break;
5875 default:
5876 return -EIO;
5877 }
5878
5879 /* Count the number of extents currently allocatd for this type. */
5880 list_for_each_entry(rsrc_blk, blk_list_head, list) {
5881 if (curr_blks == 0) {
5882 /*
5883 * The GET_ALLOCATED mailbox does not return the size,
5884 * just the count. The size should be just the size
5885 * stored in the current allocated block and all sizes
5886 * for an extent type are the same so set the return
5887 * value now.
5888 */
5889 *extnt_size = rsrc_blk->rsrc_size;
5890 }
5891 curr_blks++;
5892 }
5893
5894 /* Calculate the total requested length of the dma memory. */
5895 req_len = curr_blks * sizeof(uint16_t);
5896
5897 /*
5898 * Calculate the size of an embedded mailbox. The uint32_t
5899 * accounts for extents-specific word.
5900 */
5901 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5902 sizeof(uint32_t);
5903
5904 /*
5905 * Presume the allocation and response will fit into an embedded
5906 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5907 */
5908 emb = LPFC_SLI4_MBX_EMBED;
5909 req_len = emb_len;
5910 if (req_len > emb_len) {
5911 req_len = curr_blks * sizeof(uint16_t) +
5912 sizeof(union lpfc_sli4_cfg_shdr) +
5913 sizeof(uint32_t);
5914 emb = LPFC_SLI4_MBX_NEMBED;
5915 }
5916
5917 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5918 if (!mbox)
5919 return -ENOMEM;
5920 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
5921
5922 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5923 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
5924 req_len, emb);
5925 if (alloc_len < req_len) {
5926 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5927 "2983 Allocated DMA memory size (x%x) is "
5928 "less than the requested DMA memory "
5929 "size (x%x)\n", alloc_len, req_len);
5930 rc = -ENOMEM;
5931 goto err_exit;
5932 }
5933 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
5934 if (unlikely(rc)) {
5935 rc = -EIO;
5936 goto err_exit;
5937 }
5938
5939 if (!phba->sli4_hba.intr_enable)
5940 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5941 else {
5942 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5943 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5944 }
5945
5946 if (unlikely(rc)) {
5947 rc = -EIO;
5948 goto err_exit;
5949 }
5950
5951 /*
5952 * Figure out where the response is located. Then get local pointers
5953 * to the response data. The port does not guarantee to respond to
5954 * all extents counts request so update the local variable with the
5955 * allocated count from the port.
5956 */
5957 if (emb == LPFC_SLI4_MBX_EMBED) {
5958 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5959 shdr = &rsrc_ext->header.cfg_shdr;
5960 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5961 } else {
5962 virtaddr = mbox->sge_array->addr[0];
5963 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5964 shdr = &n_rsrc->cfg_shdr;
5965 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5966 }
5967
5968 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
5969 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5970 "2984 Failed to read allocated resources "
5971 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
5972 type,
5973 bf_get(lpfc_mbox_hdr_status, &shdr->response),
5974 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
5975 rc = -EIO;
5976 goto err_exit;
5977 }
5978 err_exit:
5979 lpfc_sli4_mbox_cmd_free(phba, mbox);
5980 return rc;
5981 }
5982
5983 /**
5984 * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
5985 * @phba: pointer to lpfc hba data structure.
5986 *
5987 * This routine walks the list of els buffers that have been allocated and
5988 * repost them to the port by using SGL block post. This is needed after a
5989 * pci_function_reset/warm_start or start. It attempts to construct blocks
5990 * of els buffer sgls which contains contiguous xris and uses the non-embedded
5991 * SGL block post mailbox commands to post them to the port. For single els
5992 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
5993 * mailbox command for posting.
5994 *
5995 * Returns: 0 = success, non-zero failure.
5996 **/
5997 static int
lpfc_sli4_repost_els_sgl_list(struct lpfc_hba * phba)5998 lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
5999 {
6000 struct lpfc_sglq *sglq_entry = NULL;
6001 struct lpfc_sglq *sglq_entry_next = NULL;
6002 struct lpfc_sglq *sglq_entry_first = NULL;
6003 int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
6004 int last_xritag = NO_XRI;
6005 LIST_HEAD(prep_sgl_list);
6006 LIST_HEAD(blck_sgl_list);
6007 LIST_HEAD(allc_sgl_list);
6008 LIST_HEAD(post_sgl_list);
6009 LIST_HEAD(free_sgl_list);
6010
6011 spin_lock_irq(&phba->hbalock);
6012 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
6013 spin_unlock_irq(&phba->hbalock);
6014
6015 total_cnt = phba->sli4_hba.els_xri_cnt;
6016 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6017 &allc_sgl_list, list) {
6018 list_del_init(&sglq_entry->list);
6019 block_cnt++;
6020 if ((last_xritag != NO_XRI) &&
6021 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6022 /* a hole in xri block, form a sgl posting block */
6023 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6024 post_cnt = block_cnt - 1;
6025 /* prepare list for next posting block */
6026 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6027 block_cnt = 1;
6028 } else {
6029 /* prepare list for next posting block */
6030 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6031 /* enough sgls for non-embed sgl mbox command */
6032 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6033 list_splice_init(&prep_sgl_list,
6034 &blck_sgl_list);
6035 post_cnt = block_cnt;
6036 block_cnt = 0;
6037 }
6038 }
6039 num_posted++;
6040
6041 /* keep track of last sgl's xritag */
6042 last_xritag = sglq_entry->sli4_xritag;
6043
6044 /* end of repost sgl list condition for els buffers */
6045 if (num_posted == phba->sli4_hba.els_xri_cnt) {
6046 if (post_cnt == 0) {
6047 list_splice_init(&prep_sgl_list,
6048 &blck_sgl_list);
6049 post_cnt = block_cnt;
6050 } else if (block_cnt == 1) {
6051 status = lpfc_sli4_post_sgl(phba,
6052 sglq_entry->phys, 0,
6053 sglq_entry->sli4_xritag);
6054 if (!status) {
6055 /* successful, put sgl to posted list */
6056 list_add_tail(&sglq_entry->list,
6057 &post_sgl_list);
6058 } else {
6059 /* Failure, put sgl to free list */
6060 lpfc_printf_log(phba, KERN_WARNING,
6061 LOG_SLI,
6062 "3159 Failed to post els "
6063 "sgl, xritag:x%x\n",
6064 sglq_entry->sli4_xritag);
6065 list_add_tail(&sglq_entry->list,
6066 &free_sgl_list);
6067 total_cnt--;
6068 }
6069 }
6070 }
6071
6072 /* continue until a nembed page worth of sgls */
6073 if (post_cnt == 0)
6074 continue;
6075
6076 /* post the els buffer list sgls as a block */
6077 status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
6078 post_cnt);
6079
6080 if (!status) {
6081 /* success, put sgl list to posted sgl list */
6082 list_splice_init(&blck_sgl_list, &post_sgl_list);
6083 } else {
6084 /* Failure, put sgl list to free sgl list */
6085 sglq_entry_first = list_first_entry(&blck_sgl_list,
6086 struct lpfc_sglq,
6087 list);
6088 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6089 "3160 Failed to post els sgl-list, "
6090 "xritag:x%x-x%x\n",
6091 sglq_entry_first->sli4_xritag,
6092 (sglq_entry_first->sli4_xritag +
6093 post_cnt - 1));
6094 list_splice_init(&blck_sgl_list, &free_sgl_list);
6095 total_cnt -= post_cnt;
6096 }
6097
6098 /* don't reset xirtag due to hole in xri block */
6099 if (block_cnt == 0)
6100 last_xritag = NO_XRI;
6101
6102 /* reset els sgl post count for next round of posting */
6103 post_cnt = 0;
6104 }
6105 /* update the number of XRIs posted for ELS */
6106 phba->sli4_hba.els_xri_cnt = total_cnt;
6107
6108 /* free the els sgls failed to post */
6109 lpfc_free_sgl_list(phba, &free_sgl_list);
6110
6111 /* push els sgls posted to the availble list */
6112 if (!list_empty(&post_sgl_list)) {
6113 spin_lock_irq(&phba->hbalock);
6114 list_splice_init(&post_sgl_list,
6115 &phba->sli4_hba.lpfc_sgl_list);
6116 spin_unlock_irq(&phba->hbalock);
6117 } else {
6118 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6119 "3161 Failure to post els sgl to port.\n");
6120 return -EIO;
6121 }
6122 return 0;
6123 }
6124
6125 /**
6126 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
6127 * @phba: Pointer to HBA context object.
6128 *
6129 * This function is the main SLI4 device intialization PCI function. This
6130 * function is called by the HBA intialization code, HBA reset code and
6131 * HBA error attention handler code. Caller is not required to hold any
6132 * locks.
6133 **/
6134 int
lpfc_sli4_hba_setup(struct lpfc_hba * phba)6135 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6136 {
6137 int rc;
6138 LPFC_MBOXQ_t *mboxq;
6139 struct lpfc_mqe *mqe;
6140 uint8_t *vpd;
6141 uint32_t vpd_size;
6142 uint32_t ftr_rsp = 0;
6143 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6144 struct lpfc_vport *vport = phba->pport;
6145 struct lpfc_dmabuf *mp;
6146
6147 /* Perform a PCI function reset to start from clean */
6148 rc = lpfc_pci_function_reset(phba);
6149 if (unlikely(rc))
6150 return -ENODEV;
6151
6152 /* Check the HBA Host Status Register for readyness */
6153 rc = lpfc_sli4_post_status_check(phba);
6154 if (unlikely(rc))
6155 return -ENODEV;
6156 else {
6157 spin_lock_irq(&phba->hbalock);
6158 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6159 spin_unlock_irq(&phba->hbalock);
6160 }
6161
6162 /*
6163 * Allocate a single mailbox container for initializing the
6164 * port.
6165 */
6166 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6167 if (!mboxq)
6168 return -ENOMEM;
6169
6170 /* Issue READ_REV to collect vpd and FW information. */
6171 vpd_size = SLI4_PAGE_SIZE;
6172 vpd = kzalloc(vpd_size, GFP_KERNEL);
6173 if (!vpd) {
6174 rc = -ENOMEM;
6175 goto out_free_mbox;
6176 }
6177
6178 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
6179 if (unlikely(rc)) {
6180 kfree(vpd);
6181 goto out_free_mbox;
6182 }
6183 mqe = &mboxq->u.mqe;
6184 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6185 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
6186 phba->hba_flag |= HBA_FCOE_MODE;
6187 else
6188 phba->hba_flag &= ~HBA_FCOE_MODE;
6189
6190 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6191 LPFC_DCBX_CEE_MODE)
6192 phba->hba_flag |= HBA_FIP_SUPPORT;
6193 else
6194 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6195
6196 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6197
6198 if (phba->sli_rev != LPFC_SLI_REV4) {
6199 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6200 "0376 READ_REV Error. SLI Level %d "
6201 "FCoE enabled %d\n",
6202 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
6203 rc = -EIO;
6204 kfree(vpd);
6205 goto out_free_mbox;
6206 }
6207
6208 /*
6209 * Continue initialization with default values even if driver failed
6210 * to read FCoE param config regions, only read parameters if the
6211 * board is FCoE
6212 */
6213 if (phba->hba_flag & HBA_FCOE_MODE &&
6214 lpfc_sli4_read_fcoe_params(phba))
6215 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6216 "2570 Failed to read FCoE parameters\n");
6217
6218 /*
6219 * Retrieve sli4 device physical port name, failure of doing it
6220 * is considered as non-fatal.
6221 */
6222 rc = lpfc_sli4_retrieve_pport_name(phba);
6223 if (!rc)
6224 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6225 "3080 Successful retrieving SLI4 device "
6226 "physical port name: %s.\n", phba->Port);
6227
6228 /*
6229 * Evaluate the read rev and vpd data. Populate the driver
6230 * state with the results. If this routine fails, the failure
6231 * is not fatal as the driver will use generic values.
6232 */
6233 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6234 if (unlikely(!rc)) {
6235 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6236 "0377 Error %d parsing vpd. "
6237 "Using defaults.\n", rc);
6238 rc = 0;
6239 }
6240 kfree(vpd);
6241
6242 /* Save information as VPD data */
6243 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6244 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6245 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6246 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6247 &mqe->un.read_rev);
6248 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6249 &mqe->un.read_rev);
6250 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6251 &mqe->un.read_rev);
6252 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6253 &mqe->un.read_rev);
6254 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6255 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6256 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6257 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6258 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6259 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6260 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6261 "(%d):0380 READ_REV Status x%x "
6262 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6263 mboxq->vport ? mboxq->vport->vpi : 0,
6264 bf_get(lpfc_mqe_status, mqe),
6265 phba->vpd.rev.opFwName,
6266 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6267 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
6268
6269 /*
6270 * Discover the port's supported feature set and match it against the
6271 * hosts requests.
6272 */
6273 lpfc_request_features(phba, mboxq);
6274 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6275 if (unlikely(rc)) {
6276 rc = -EIO;
6277 goto out_free_mbox;
6278 }
6279
6280 /*
6281 * The port must support FCP initiator mode as this is the
6282 * only mode running in the host.
6283 */
6284 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6285 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6286 "0378 No support for fcpi mode.\n");
6287 ftr_rsp++;
6288 }
6289 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6290 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6291 else
6292 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
6293 /*
6294 * If the port cannot support the host's requested features
6295 * then turn off the global config parameters to disable the
6296 * feature in the driver. This is not a fatal error.
6297 */
6298 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6299 if (phba->cfg_enable_bg) {
6300 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6301 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6302 else
6303 ftr_rsp++;
6304 }
6305
6306 if (phba->max_vpi && phba->cfg_enable_npiv &&
6307 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6308 ftr_rsp++;
6309
6310 if (ftr_rsp) {
6311 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6312 "0379 Feature Mismatch Data: x%08x %08x "
6313 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6314 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6315 phba->cfg_enable_npiv, phba->max_vpi);
6316 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6317 phba->cfg_enable_bg = 0;
6318 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6319 phba->cfg_enable_npiv = 0;
6320 }
6321
6322 /* These SLI3 features are assumed in SLI4 */
6323 spin_lock_irq(&phba->hbalock);
6324 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6325 spin_unlock_irq(&phba->hbalock);
6326
6327 /*
6328 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
6329 * calls depends on these resources to complete port setup.
6330 */
6331 rc = lpfc_sli4_alloc_resource_identifiers(phba);
6332 if (rc) {
6333 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6334 "2920 Failed to alloc Resource IDs "
6335 "rc = x%x\n", rc);
6336 goto out_free_mbox;
6337 }
6338
6339 /* Read the port's service parameters. */
6340 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6341 if (rc) {
6342 phba->link_state = LPFC_HBA_ERROR;
6343 rc = -ENOMEM;
6344 goto out_free_mbox;
6345 }
6346
6347 mboxq->vport = vport;
6348 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6349 mp = (struct lpfc_dmabuf *) mboxq->context1;
6350 if (rc == MBX_SUCCESS) {
6351 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6352 rc = 0;
6353 }
6354
6355 /*
6356 * This memory was allocated by the lpfc_read_sparam routine. Release
6357 * it to the mbuf pool.
6358 */
6359 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6360 kfree(mp);
6361 mboxq->context1 = NULL;
6362 if (unlikely(rc)) {
6363 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6364 "0382 READ_SPARAM command failed "
6365 "status %d, mbxStatus x%x\n",
6366 rc, bf_get(lpfc_mqe_status, mqe));
6367 phba->link_state = LPFC_HBA_ERROR;
6368 rc = -EIO;
6369 goto out_free_mbox;
6370 }
6371
6372 lpfc_update_vport_wwn(vport);
6373
6374 /* Update the fc_host data structures with new wwn. */
6375 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6376 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6377
6378 /* update host els and scsi xri-sgl sizes and mappings */
6379 rc = lpfc_sli4_xri_sgl_update(phba);
6380 if (unlikely(rc)) {
6381 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6382 "1400 Failed to update xri-sgl size and "
6383 "mapping: %d\n", rc);
6384 goto out_free_mbox;
6385 }
6386
6387 /* register the els sgl pool to the port */
6388 rc = lpfc_sli4_repost_els_sgl_list(phba);
6389 if (unlikely(rc)) {
6390 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6391 "0582 Error %d during els sgl post "
6392 "operation\n", rc);
6393 rc = -ENODEV;
6394 goto out_free_mbox;
6395 }
6396
6397 /* register the allocated scsi sgl pool to the port */
6398 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6399 if (unlikely(rc)) {
6400 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6401 "0383 Error %d during scsi sgl post "
6402 "operation\n", rc);
6403 /* Some Scsi buffers were moved to the abort scsi list */
6404 /* A pci function reset will repost them */
6405 rc = -ENODEV;
6406 goto out_free_mbox;
6407 }
6408
6409 /* Post the rpi header region to the device. */
6410 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
6411 if (unlikely(rc)) {
6412 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6413 "0393 Error %d during rpi post operation\n",
6414 rc);
6415 rc = -ENODEV;
6416 goto out_free_mbox;
6417 }
6418 lpfc_sli4_node_prep(phba);
6419
6420 /* Create all the SLI4 queues */
6421 rc = lpfc_sli4_queue_create(phba);
6422 if (rc) {
6423 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6424 "3089 Failed to allocate queues\n");
6425 rc = -ENODEV;
6426 goto out_stop_timers;
6427 }
6428 /* Set up all the queues to the device */
6429 rc = lpfc_sli4_queue_setup(phba);
6430 if (unlikely(rc)) {
6431 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6432 "0381 Error %d during queue setup.\n ", rc);
6433 goto out_destroy_queue;
6434 }
6435
6436 /* Arm the CQs and then EQs on device */
6437 lpfc_sli4_arm_cqeq_intr(phba);
6438
6439 /* Indicate device interrupt mode */
6440 phba->sli4_hba.intr_enable = 1;
6441
6442 /* Allow asynchronous mailbox command to go through */
6443 spin_lock_irq(&phba->hbalock);
6444 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6445 spin_unlock_irq(&phba->hbalock);
6446
6447 /* Post receive buffers to the device */
6448 lpfc_sli4_rb_setup(phba);
6449
6450 /* Reset HBA FCF states after HBA reset */
6451 phba->fcf.fcf_flag = 0;
6452 phba->fcf.current_rec.flag = 0;
6453
6454 /* Start the ELS watchdog timer */
6455 mod_timer(&vport->els_tmofunc,
6456 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
6457
6458 /* Start heart beat timer */
6459 mod_timer(&phba->hb_tmofunc,
6460 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
6461 phba->hb_outstanding = 0;
6462 phba->last_completion_time = jiffies;
6463
6464 /* Start error attention (ERATT) polling timer */
6465 mod_timer(&phba->eratt_poll,
6466 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
6467
6468 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
6469 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
6470 rc = pci_enable_pcie_error_reporting(phba->pcidev);
6471 if (!rc) {
6472 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6473 "2829 This device supports "
6474 "Advanced Error Reporting (AER)\n");
6475 spin_lock_irq(&phba->hbalock);
6476 phba->hba_flag |= HBA_AER_ENABLED;
6477 spin_unlock_irq(&phba->hbalock);
6478 } else {
6479 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6480 "2830 This device does not support "
6481 "Advanced Error Reporting (AER)\n");
6482 phba->cfg_aer_support = 0;
6483 }
6484 rc = 0;
6485 }
6486
6487 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
6488 /*
6489 * The FC Port needs to register FCFI (index 0)
6490 */
6491 lpfc_reg_fcfi(phba, mboxq);
6492 mboxq->vport = phba->pport;
6493 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6494 if (rc != MBX_SUCCESS)
6495 goto out_unset_queue;
6496 rc = 0;
6497 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6498 &mboxq->u.mqe.un.reg_fcfi);
6499
6500 /* Check if the port is configured to be disabled */
6501 lpfc_sli_read_link_ste(phba);
6502 }
6503
6504 /*
6505 * The port is ready, set the host's link state to LINK_DOWN
6506 * in preparation for link interrupts.
6507 */
6508 spin_lock_irq(&phba->hbalock);
6509 phba->link_state = LPFC_LINK_DOWN;
6510 spin_unlock_irq(&phba->hbalock);
6511 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
6512 (phba->hba_flag & LINK_DISABLED)) {
6513 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6514 "3103 Adapter Link is disabled.\n");
6515 lpfc_down_link(phba, mboxq);
6516 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6517 if (rc != MBX_SUCCESS) {
6518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6519 "3104 Adapter failed to issue "
6520 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
6521 goto out_unset_queue;
6522 }
6523 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
6524 /* don't perform init_link on SLI4 FC port loopback test */
6525 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
6526 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
6527 if (rc)
6528 goto out_unset_queue;
6529 }
6530 }
6531 mempool_free(mboxq, phba->mbox_mem_pool);
6532 return rc;
6533 out_unset_queue:
6534 /* Unset all the queues set up in this routine when error out */
6535 lpfc_sli4_queue_unset(phba);
6536 out_destroy_queue:
6537 lpfc_sli4_queue_destroy(phba);
6538 out_stop_timers:
6539 lpfc_stop_hba_timers(phba);
6540 out_free_mbox:
6541 mempool_free(mboxq, phba->mbox_mem_pool);
6542 return rc;
6543 }
6544
6545 /**
6546 * lpfc_mbox_timeout - Timeout call back function for mbox timer
6547 * @ptr: context object - pointer to hba structure.
6548 *
6549 * This is the callback function for mailbox timer. The mailbox
6550 * timer is armed when a new mailbox command is issued and the timer
6551 * is deleted when the mailbox complete. The function is called by
6552 * the kernel timer code when a mailbox does not complete within
6553 * expected time. This function wakes up the worker thread to
6554 * process the mailbox timeout and returns. All the processing is
6555 * done by the worker thread function lpfc_mbox_timeout_handler.
6556 **/
6557 void
lpfc_mbox_timeout(unsigned long ptr)6558 lpfc_mbox_timeout(unsigned long ptr)
6559 {
6560 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
6561 unsigned long iflag;
6562 uint32_t tmo_posted;
6563
6564 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
6565 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
6566 if (!tmo_posted)
6567 phba->pport->work_port_events |= WORKER_MBOX_TMO;
6568 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
6569
6570 if (!tmo_posted)
6571 lpfc_worker_wake_up(phba);
6572 return;
6573 }
6574
6575
6576 /**
6577 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
6578 * @phba: Pointer to HBA context object.
6579 *
6580 * This function is called from worker thread when a mailbox command times out.
6581 * The caller is not required to hold any locks. This function will reset the
6582 * HBA and recover all the pending commands.
6583 **/
6584 void
lpfc_mbox_timeout_handler(struct lpfc_hba * phba)6585 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6586 {
6587 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
6588 MAILBOX_t *mb = &pmbox->u.mb;
6589 struct lpfc_sli *psli = &phba->sli;
6590 struct lpfc_sli_ring *pring;
6591
6592 /* Check the pmbox pointer first. There is a race condition
6593 * between the mbox timeout handler getting executed in the
6594 * worklist and the mailbox actually completing. When this
6595 * race condition occurs, the mbox_active will be NULL.
6596 */
6597 spin_lock_irq(&phba->hbalock);
6598 if (pmbox == NULL) {
6599 lpfc_printf_log(phba, KERN_WARNING,
6600 LOG_MBOX | LOG_SLI,
6601 "0353 Active Mailbox cleared - mailbox timeout "
6602 "exiting\n");
6603 spin_unlock_irq(&phba->hbalock);
6604 return;
6605 }
6606
6607 /* Mbox cmd <mbxCommand> timeout */
6608 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6609 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
6610 mb->mbxCommand,
6611 phba->pport->port_state,
6612 phba->sli.sli_flag,
6613 phba->sli.mbox_active);
6614 spin_unlock_irq(&phba->hbalock);
6615
6616 /* Setting state unknown so lpfc_sli_abort_iocb_ring
6617 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
6618 * it to fail all outstanding SCSI IO.
6619 */
6620 spin_lock_irq(&phba->pport->work_port_lock);
6621 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
6622 spin_unlock_irq(&phba->pport->work_port_lock);
6623 spin_lock_irq(&phba->hbalock);
6624 phba->link_state = LPFC_LINK_UNKNOWN;
6625 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
6626 spin_unlock_irq(&phba->hbalock);
6627
6628 pring = &psli->ring[psli->fcp_ring];
6629 lpfc_sli_abort_iocb_ring(phba, pring);
6630
6631 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6632 "0345 Resetting board due to mailbox timeout\n");
6633
6634 /* Reset the HBA device */
6635 lpfc_reset_hba(phba);
6636 }
6637
6638 /**
6639 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
6640 * @phba: Pointer to HBA context object.
6641 * @pmbox: Pointer to mailbox object.
6642 * @flag: Flag indicating how the mailbox need to be processed.
6643 *
6644 * This function is called by discovery code and HBA management code
6645 * to submit a mailbox command to firmware with SLI-3 interface spec. This
6646 * function gets the hbalock to protect the data structures.
6647 * The mailbox command can be submitted in polling mode, in which case
6648 * this function will wait in a polling loop for the completion of the
6649 * mailbox.
6650 * If the mailbox is submitted in no_wait mode (not polling) the
6651 * function will submit the command and returns immediately without waiting
6652 * for the mailbox completion. The no_wait is supported only when HBA
6653 * is in SLI2/SLI3 mode - interrupts are enabled.
6654 * The SLI interface allows only one mailbox pending at a time. If the
6655 * mailbox is issued in polling mode and there is already a mailbox
6656 * pending, then the function will return an error. If the mailbox is issued
6657 * in NO_WAIT mode and there is a mailbox pending already, the function
6658 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
6659 * The sli layer owns the mailbox object until the completion of mailbox
6660 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
6661 * return codes the caller owns the mailbox command after the return of
6662 * the function.
6663 **/
6664 static int
lpfc_sli_issue_mbox_s3(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmbox,uint32_t flag)6665 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
6666 uint32_t flag)
6667 {
6668 MAILBOX_t *mbx;
6669 struct lpfc_sli *psli = &phba->sli;
6670 uint32_t status, evtctr;
6671 uint32_t ha_copy, hc_copy;
6672 int i;
6673 unsigned long timeout;
6674 unsigned long drvr_flag = 0;
6675 uint32_t word0, ldata;
6676 void __iomem *to_slim;
6677 int processing_queue = 0;
6678
6679 spin_lock_irqsave(&phba->hbalock, drvr_flag);
6680 if (!pmbox) {
6681 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6682 /* processing mbox queue from intr_handler */
6683 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
6684 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6685 return MBX_SUCCESS;
6686 }
6687 processing_queue = 1;
6688 pmbox = lpfc_mbox_get(phba);
6689 if (!pmbox) {
6690 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6691 return MBX_SUCCESS;
6692 }
6693 }
6694
6695 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
6696 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
6697 if(!pmbox->vport) {
6698 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6699 lpfc_printf_log(phba, KERN_ERR,
6700 LOG_MBOX | LOG_VPORT,
6701 "1806 Mbox x%x failed. No vport\n",
6702 pmbox->u.mb.mbxCommand);
6703 dump_stack();
6704 goto out_not_finished;
6705 }
6706 }
6707
6708 /* If the PCI channel is in offline state, do not post mbox. */
6709 if (unlikely(pci_channel_offline(phba->pcidev))) {
6710 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6711 goto out_not_finished;
6712 }
6713
6714 /* If HBA has a deferred error attention, fail the iocb. */
6715 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
6716 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6717 goto out_not_finished;
6718 }
6719
6720 psli = &phba->sli;
6721
6722 mbx = &pmbox->u.mb;
6723 status = MBX_SUCCESS;
6724
6725 if (phba->link_state == LPFC_HBA_ERROR) {
6726 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6727
6728 /* Mbox command <mbxCommand> cannot issue */
6729 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6730 "(%d):0311 Mailbox command x%x cannot "
6731 "issue Data: x%x x%x\n",
6732 pmbox->vport ? pmbox->vport->vpi : 0,
6733 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
6734 goto out_not_finished;
6735 }
6736
6737 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
6738 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
6739 !(hc_copy & HC_MBINT_ENA)) {
6740 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6741 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6742 "(%d):2528 Mailbox command x%x cannot "
6743 "issue Data: x%x x%x\n",
6744 pmbox->vport ? pmbox->vport->vpi : 0,
6745 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
6746 goto out_not_finished;
6747 }
6748 }
6749
6750 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6751 /* Polling for a mbox command when another one is already active
6752 * is not allowed in SLI. Also, the driver must have established
6753 * SLI2 mode to queue and process multiple mbox commands.
6754 */
6755
6756 if (flag & MBX_POLL) {
6757 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6758
6759 /* Mbox command <mbxCommand> cannot issue */
6760 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6761 "(%d):2529 Mailbox command x%x "
6762 "cannot issue Data: x%x x%x\n",
6763 pmbox->vport ? pmbox->vport->vpi : 0,
6764 pmbox->u.mb.mbxCommand,
6765 psli->sli_flag, flag);
6766 goto out_not_finished;
6767 }
6768
6769 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
6770 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6771 /* Mbox command <mbxCommand> cannot issue */
6772 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6773 "(%d):2530 Mailbox command x%x "
6774 "cannot issue Data: x%x x%x\n",
6775 pmbox->vport ? pmbox->vport->vpi : 0,
6776 pmbox->u.mb.mbxCommand,
6777 psli->sli_flag, flag);
6778 goto out_not_finished;
6779 }
6780
6781 /* Another mailbox command is still being processed, queue this
6782 * command to be processed later.
6783 */
6784 lpfc_mbox_put(phba, pmbox);
6785
6786 /* Mbox cmd issue - BUSY */
6787 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6788 "(%d):0308 Mbox cmd issue - BUSY Data: "
6789 "x%x x%x x%x x%x\n",
6790 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
6791 mbx->mbxCommand, phba->pport->port_state,
6792 psli->sli_flag, flag);
6793
6794 psli->slistat.mbox_busy++;
6795 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6796
6797 if (pmbox->vport) {
6798 lpfc_debugfs_disc_trc(pmbox->vport,
6799 LPFC_DISC_TRC_MBOX_VPORT,
6800 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
6801 (uint32_t)mbx->mbxCommand,
6802 mbx->un.varWords[0], mbx->un.varWords[1]);
6803 }
6804 else {
6805 lpfc_debugfs_disc_trc(phba->pport,
6806 LPFC_DISC_TRC_MBOX,
6807 "MBOX Bsy: cmd:x%x mb:x%x x%x",
6808 (uint32_t)mbx->mbxCommand,
6809 mbx->un.varWords[0], mbx->un.varWords[1]);
6810 }
6811
6812 return MBX_BUSY;
6813 }
6814
6815 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
6816
6817 /* If we are not polling, we MUST be in SLI2 mode */
6818 if (flag != MBX_POLL) {
6819 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
6820 (mbx->mbxCommand != MBX_KILL_BOARD)) {
6821 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6822 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6823 /* Mbox command <mbxCommand> cannot issue */
6824 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6825 "(%d):2531 Mailbox command x%x "
6826 "cannot issue Data: x%x x%x\n",
6827 pmbox->vport ? pmbox->vport->vpi : 0,
6828 pmbox->u.mb.mbxCommand,
6829 psli->sli_flag, flag);
6830 goto out_not_finished;
6831 }
6832 /* timeout active mbox command */
6833 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
6834 1000);
6835 mod_timer(&psli->mbox_tmo, jiffies + timeout);
6836 }
6837
6838 /* Mailbox cmd <cmd> issue */
6839 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6840 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
6841 "x%x\n",
6842 pmbox->vport ? pmbox->vport->vpi : 0,
6843 mbx->mbxCommand, phba->pport->port_state,
6844 psli->sli_flag, flag);
6845
6846 if (mbx->mbxCommand != MBX_HEARTBEAT) {
6847 if (pmbox->vport) {
6848 lpfc_debugfs_disc_trc(pmbox->vport,
6849 LPFC_DISC_TRC_MBOX_VPORT,
6850 "MBOX Send vport: cmd:x%x mb:x%x x%x",
6851 (uint32_t)mbx->mbxCommand,
6852 mbx->un.varWords[0], mbx->un.varWords[1]);
6853 }
6854 else {
6855 lpfc_debugfs_disc_trc(phba->pport,
6856 LPFC_DISC_TRC_MBOX,
6857 "MBOX Send: cmd:x%x mb:x%x x%x",
6858 (uint32_t)mbx->mbxCommand,
6859 mbx->un.varWords[0], mbx->un.varWords[1]);
6860 }
6861 }
6862
6863 psli->slistat.mbox_cmd++;
6864 evtctr = psli->slistat.mbox_event;
6865
6866 /* next set own bit for the adapter and copy over command word */
6867 mbx->mbxOwner = OWN_CHIP;
6868
6869 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
6870 /* Populate mbox extension offset word. */
6871 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
6872 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
6873 = (uint8_t *)phba->mbox_ext
6874 - (uint8_t *)phba->mbox;
6875 }
6876
6877 /* Copy the mailbox extension data */
6878 if (pmbox->in_ext_byte_len && pmbox->context2) {
6879 lpfc_sli_pcimem_bcopy(pmbox->context2,
6880 (uint8_t *)phba->mbox_ext,
6881 pmbox->in_ext_byte_len);
6882 }
6883 /* Copy command data to host SLIM area */
6884 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
6885 } else {
6886 /* Populate mbox extension offset word. */
6887 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
6888 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
6889 = MAILBOX_HBA_EXT_OFFSET;
6890
6891 /* Copy the mailbox extension data */
6892 if (pmbox->in_ext_byte_len && pmbox->context2) {
6893 lpfc_memcpy_to_slim(phba->MBslimaddr +
6894 MAILBOX_HBA_EXT_OFFSET,
6895 pmbox->context2, pmbox->in_ext_byte_len);
6896
6897 }
6898 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
6899 /* copy command data into host mbox for cmpl */
6900 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
6901 }
6902
6903 /* First copy mbox command data to HBA SLIM, skip past first
6904 word */
6905 to_slim = phba->MBslimaddr + sizeof (uint32_t);
6906 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
6907 MAILBOX_CMD_SIZE - sizeof (uint32_t));
6908
6909 /* Next copy over first word, with mbxOwner set */
6910 ldata = *((uint32_t *)mbx);
6911 to_slim = phba->MBslimaddr;
6912 writel(ldata, to_slim);
6913 readl(to_slim); /* flush */
6914
6915 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
6916 /* switch over to host mailbox */
6917 psli->sli_flag |= LPFC_SLI_ACTIVE;
6918 }
6919 }
6920
6921 wmb();
6922
6923 switch (flag) {
6924 case MBX_NOWAIT:
6925 /* Set up reference to mailbox command */
6926 psli->mbox_active = pmbox;
6927 /* Interrupt board to do it */
6928 writel(CA_MBATT, phba->CAregaddr);
6929 readl(phba->CAregaddr); /* flush */
6930 /* Don't wait for it to finish, just return */
6931 break;
6932
6933 case MBX_POLL:
6934 /* Set up null reference to mailbox command */
6935 psli->mbox_active = NULL;
6936 /* Interrupt board to do it */
6937 writel(CA_MBATT, phba->CAregaddr);
6938 readl(phba->CAregaddr); /* flush */
6939
6940 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
6941 /* First read mbox status word */
6942 word0 = *((uint32_t *)phba->mbox);
6943 word0 = le32_to_cpu(word0);
6944 } else {
6945 /* First read mbox status word */
6946 if (lpfc_readl(phba->MBslimaddr, &word0)) {
6947 spin_unlock_irqrestore(&phba->hbalock,
6948 drvr_flag);
6949 goto out_not_finished;
6950 }
6951 }
6952
6953 /* Read the HBA Host Attention Register */
6954 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
6955 spin_unlock_irqrestore(&phba->hbalock,
6956 drvr_flag);
6957 goto out_not_finished;
6958 }
6959 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
6960 1000) + jiffies;
6961 i = 0;
6962 /* Wait for command to complete */
6963 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
6964 (!(ha_copy & HA_MBATT) &&
6965 (phba->link_state > LPFC_WARM_START))) {
6966 if (time_after(jiffies, timeout)) {
6967 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6968 spin_unlock_irqrestore(&phba->hbalock,
6969 drvr_flag);
6970 goto out_not_finished;
6971 }
6972
6973 /* Check if we took a mbox interrupt while we were
6974 polling */
6975 if (((word0 & OWN_CHIP) != OWN_CHIP)
6976 && (evtctr != psli->slistat.mbox_event))
6977 break;
6978
6979 if (i++ > 10) {
6980 spin_unlock_irqrestore(&phba->hbalock,
6981 drvr_flag);
6982 msleep(1);
6983 spin_lock_irqsave(&phba->hbalock, drvr_flag);
6984 }
6985
6986 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
6987 /* First copy command data */
6988 word0 = *((uint32_t *)phba->mbox);
6989 word0 = le32_to_cpu(word0);
6990 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
6991 MAILBOX_t *slimmb;
6992 uint32_t slimword0;
6993 /* Check real SLIM for any errors */
6994 slimword0 = readl(phba->MBslimaddr);
6995 slimmb = (MAILBOX_t *) & slimword0;
6996 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
6997 && slimmb->mbxStatus) {
6998 psli->sli_flag &=
6999 ~LPFC_SLI_ACTIVE;
7000 word0 = slimword0;
7001 }
7002 }
7003 } else {
7004 /* First copy command data */
7005 word0 = readl(phba->MBslimaddr);
7006 }
7007 /* Read the HBA Host Attention Register */
7008 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7009 spin_unlock_irqrestore(&phba->hbalock,
7010 drvr_flag);
7011 goto out_not_finished;
7012 }
7013 }
7014
7015 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7016 /* copy results back to user */
7017 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
7018 /* Copy the mailbox extension data */
7019 if (pmbox->out_ext_byte_len && pmbox->context2) {
7020 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
7021 pmbox->context2,
7022 pmbox->out_ext_byte_len);
7023 }
7024 } else {
7025 /* First copy command data */
7026 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
7027 MAILBOX_CMD_SIZE);
7028 /* Copy the mailbox extension data */
7029 if (pmbox->out_ext_byte_len && pmbox->context2) {
7030 lpfc_memcpy_from_slim(pmbox->context2,
7031 phba->MBslimaddr +
7032 MAILBOX_HBA_EXT_OFFSET,
7033 pmbox->out_ext_byte_len);
7034 }
7035 }
7036
7037 writel(HA_MBATT, phba->HAregaddr);
7038 readl(phba->HAregaddr); /* flush */
7039
7040 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7041 status = mbx->mbxStatus;
7042 }
7043
7044 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7045 return status;
7046
7047 out_not_finished:
7048 if (processing_queue) {
7049 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
7050 lpfc_mbox_cmpl_put(phba, pmbox);
7051 }
7052 return MBX_NOT_FINISHED;
7053 }
7054
7055 /**
7056 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
7057 * @phba: Pointer to HBA context object.
7058 *
7059 * The function blocks the posting of SLI4 asynchronous mailbox commands from
7060 * the driver internal pending mailbox queue. It will then try to wait out the
7061 * possible outstanding mailbox command before return.
7062 *
7063 * Returns:
7064 * 0 - the outstanding mailbox command completed; otherwise, the wait for
7065 * the outstanding mailbox command timed out.
7066 **/
7067 static int
lpfc_sli4_async_mbox_block(struct lpfc_hba * phba)7068 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7069 {
7070 struct lpfc_sli *psli = &phba->sli;
7071 int rc = 0;
7072 unsigned long timeout = 0;
7073
7074 /* Mark the asynchronous mailbox command posting as blocked */
7075 spin_lock_irq(&phba->hbalock);
7076 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7077 /* Determine how long we might wait for the active mailbox
7078 * command to be gracefully completed by firmware.
7079 */
7080 if (phba->sli.mbox_active)
7081 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
7082 phba->sli.mbox_active) *
7083 1000) + jiffies;
7084 spin_unlock_irq(&phba->hbalock);
7085
7086 /* Wait for the outstnading mailbox command to complete */
7087 while (phba->sli.mbox_active) {
7088 /* Check active mailbox complete status every 2ms */
7089 msleep(2);
7090 if (time_after(jiffies, timeout)) {
7091 /* Timeout, marked the outstanding cmd not complete */
7092 rc = 1;
7093 break;
7094 }
7095 }
7096
7097 /* Can not cleanly block async mailbox command, fails it */
7098 if (rc) {
7099 spin_lock_irq(&phba->hbalock);
7100 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7101 spin_unlock_irq(&phba->hbalock);
7102 }
7103 return rc;
7104 }
7105
7106 /**
7107 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
7108 * @phba: Pointer to HBA context object.
7109 *
7110 * The function unblocks and resume posting of SLI4 asynchronous mailbox
7111 * commands from the driver internal pending mailbox queue. It makes sure
7112 * that there is no outstanding mailbox command before resuming posting
7113 * asynchronous mailbox commands. If, for any reason, there is outstanding
7114 * mailbox command, it will try to wait it out before resuming asynchronous
7115 * mailbox command posting.
7116 **/
7117 static void
lpfc_sli4_async_mbox_unblock(struct lpfc_hba * phba)7118 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7119 {
7120 struct lpfc_sli *psli = &phba->sli;
7121
7122 spin_lock_irq(&phba->hbalock);
7123 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7124 /* Asynchronous mailbox posting is not blocked, do nothing */
7125 spin_unlock_irq(&phba->hbalock);
7126 return;
7127 }
7128
7129 /* Outstanding synchronous mailbox command is guaranteed to be done,
7130 * successful or timeout, after timing-out the outstanding mailbox
7131 * command shall always be removed, so just unblock posting async
7132 * mailbox command and resume
7133 */
7134 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7135 spin_unlock_irq(&phba->hbalock);
7136
7137 /* wake up worker thread to post asynchronlous mailbox command */
7138 lpfc_worker_wake_up(phba);
7139 }
7140
7141 /**
7142 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7143 * @phba: Pointer to HBA context object.
7144 * @mboxq: Pointer to mailbox object.
7145 *
7146 * The function waits for the bootstrap mailbox register ready bit from
7147 * port for twice the regular mailbox command timeout value.
7148 *
7149 * 0 - no timeout on waiting for bootstrap mailbox register ready.
7150 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7151 **/
7152 static int
lpfc_sli4_wait_bmbx_ready(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)7153 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7154 {
7155 uint32_t db_ready;
7156 unsigned long timeout;
7157 struct lpfc_register bmbx_reg;
7158
7159 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7160 * 1000) + jiffies;
7161
7162 do {
7163 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7164 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7165 if (!db_ready)
7166 msleep(2);
7167
7168 if (time_after(jiffies, timeout))
7169 return MBXERR_ERROR;
7170 } while (!db_ready);
7171
7172 return 0;
7173 }
7174
7175 /**
7176 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7177 * @phba: Pointer to HBA context object.
7178 * @mboxq: Pointer to mailbox object.
7179 *
7180 * The function posts a mailbox to the port. The mailbox is expected
7181 * to be comletely filled in and ready for the port to operate on it.
7182 * This routine executes a synchronous completion operation on the
7183 * mailbox by polling for its completion.
7184 *
7185 * The caller must not be holding any locks when calling this routine.
7186 *
7187 * Returns:
7188 * MBX_SUCCESS - mailbox posted successfully
7189 * Any of the MBX error values.
7190 **/
7191 static int
lpfc_sli4_post_sync_mbox(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)7192 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7193 {
7194 int rc = MBX_SUCCESS;
7195 unsigned long iflag;
7196 uint32_t mcqe_status;
7197 uint32_t mbx_cmnd;
7198 struct lpfc_sli *psli = &phba->sli;
7199 struct lpfc_mqe *mb = &mboxq->u.mqe;
7200 struct lpfc_bmbx_create *mbox_rgn;
7201 struct dma_address *dma_address;
7202
7203 /*
7204 * Only one mailbox can be active to the bootstrap mailbox region
7205 * at a time and there is no queueing provided.
7206 */
7207 spin_lock_irqsave(&phba->hbalock, iflag);
7208 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7209 spin_unlock_irqrestore(&phba->hbalock, iflag);
7210 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7211 "(%d):2532 Mailbox command x%x (x%x/x%x) "
7212 "cannot issue Data: x%x x%x\n",
7213 mboxq->vport ? mboxq->vport->vpi : 0,
7214 mboxq->u.mb.mbxCommand,
7215 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7216 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7217 psli->sli_flag, MBX_POLL);
7218 return MBXERR_ERROR;
7219 }
7220 /* The server grabs the token and owns it until release */
7221 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7222 phba->sli.mbox_active = mboxq;
7223 spin_unlock_irqrestore(&phba->hbalock, iflag);
7224
7225 /* wait for bootstrap mbox register for readyness */
7226 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7227 if (rc)
7228 goto exit;
7229
7230 /*
7231 * Initialize the bootstrap memory region to avoid stale data areas
7232 * in the mailbox post. Then copy the caller's mailbox contents to
7233 * the bmbx mailbox region.
7234 */
7235 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
7236 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
7237 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
7238 sizeof(struct lpfc_mqe));
7239
7240 /* Post the high mailbox dma address to the port and wait for ready. */
7241 dma_address = &phba->sli4_hba.bmbx.dma_address;
7242 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7243
7244 /* wait for bootstrap mbox register for hi-address write done */
7245 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7246 if (rc)
7247 goto exit;
7248
7249 /* Post the low mailbox dma address to the port. */
7250 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
7251
7252 /* wait for bootstrap mbox register for low address write done */
7253 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7254 if (rc)
7255 goto exit;
7256
7257 /*
7258 * Read the CQ to ensure the mailbox has completed.
7259 * If so, update the mailbox status so that the upper layers
7260 * can complete the request normally.
7261 */
7262 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
7263 sizeof(struct lpfc_mqe));
7264 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
7265 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
7266 sizeof(struct lpfc_mcqe));
7267 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
7268 /*
7269 * When the CQE status indicates a failure and the mailbox status
7270 * indicates success then copy the CQE status into the mailbox status
7271 * (and prefix it with x4000).
7272 */
7273 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
7274 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
7275 bf_set(lpfc_mqe_status, mb,
7276 (LPFC_MBX_ERROR_RANGE | mcqe_status));
7277 rc = MBXERR_ERROR;
7278 } else
7279 lpfc_sli4_swap_str(phba, mboxq);
7280
7281 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7282 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
7283 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
7284 " x%x x%x CQ: x%x x%x x%x x%x\n",
7285 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7286 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7287 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7288 bf_get(lpfc_mqe_status, mb),
7289 mb->un.mb_words[0], mb->un.mb_words[1],
7290 mb->un.mb_words[2], mb->un.mb_words[3],
7291 mb->un.mb_words[4], mb->un.mb_words[5],
7292 mb->un.mb_words[6], mb->un.mb_words[7],
7293 mb->un.mb_words[8], mb->un.mb_words[9],
7294 mb->un.mb_words[10], mb->un.mb_words[11],
7295 mb->un.mb_words[12], mboxq->mcqe.word0,
7296 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
7297 mboxq->mcqe.trailer);
7298 exit:
7299 /* We are holding the token, no needed for lock when release */
7300 spin_lock_irqsave(&phba->hbalock, iflag);
7301 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7302 phba->sli.mbox_active = NULL;
7303 spin_unlock_irqrestore(&phba->hbalock, iflag);
7304 return rc;
7305 }
7306
7307 /**
7308 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
7309 * @phba: Pointer to HBA context object.
7310 * @pmbox: Pointer to mailbox object.
7311 * @flag: Flag indicating how the mailbox need to be processed.
7312 *
7313 * This function is called by discovery code and HBA management code to submit
7314 * a mailbox command to firmware with SLI-4 interface spec.
7315 *
7316 * Return codes the caller owns the mailbox command after the return of the
7317 * function.
7318 **/
7319 static int
lpfc_sli_issue_mbox_s4(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq,uint32_t flag)7320 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7321 uint32_t flag)
7322 {
7323 struct lpfc_sli *psli = &phba->sli;
7324 unsigned long iflags;
7325 int rc;
7326
7327 /* dump from issue mailbox command if setup */
7328 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
7329
7330 rc = lpfc_mbox_dev_check(phba);
7331 if (unlikely(rc)) {
7332 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7333 "(%d):2544 Mailbox command x%x (x%x/x%x) "
7334 "cannot issue Data: x%x x%x\n",
7335 mboxq->vport ? mboxq->vport->vpi : 0,
7336 mboxq->u.mb.mbxCommand,
7337 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7338 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7339 psli->sli_flag, flag);
7340 goto out_not_finished;
7341 }
7342
7343 /* Detect polling mode and jump to a handler */
7344 if (!phba->sli4_hba.intr_enable) {
7345 if (flag == MBX_POLL)
7346 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7347 else
7348 rc = -EIO;
7349 if (rc != MBX_SUCCESS)
7350 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7351 "(%d):2541 Mailbox command x%x "
7352 "(x%x/x%x) failure: "
7353 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7354 "Data: x%x x%x\n,",
7355 mboxq->vport ? mboxq->vport->vpi : 0,
7356 mboxq->u.mb.mbxCommand,
7357 lpfc_sli_config_mbox_subsys_get(phba,
7358 mboxq),
7359 lpfc_sli_config_mbox_opcode_get(phba,
7360 mboxq),
7361 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7362 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7363 bf_get(lpfc_mcqe_ext_status,
7364 &mboxq->mcqe),
7365 psli->sli_flag, flag);
7366 return rc;
7367 } else if (flag == MBX_POLL) {
7368 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7369 "(%d):2542 Try to issue mailbox command "
7370 "x%x (x%x/x%x) synchronously ahead of async"
7371 "mailbox command queue: x%x x%x\n",
7372 mboxq->vport ? mboxq->vport->vpi : 0,
7373 mboxq->u.mb.mbxCommand,
7374 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7375 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7376 psli->sli_flag, flag);
7377 /* Try to block the asynchronous mailbox posting */
7378 rc = lpfc_sli4_async_mbox_block(phba);
7379 if (!rc) {
7380 /* Successfully blocked, now issue sync mbox cmd */
7381 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7382 if (rc != MBX_SUCCESS)
7383 lpfc_printf_log(phba, KERN_WARNING,
7384 LOG_MBOX | LOG_SLI,
7385 "(%d):2597 Sync Mailbox command "
7386 "x%x (x%x/x%x) failure: "
7387 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7388 "Data: x%x x%x\n,",
7389 mboxq->vport ? mboxq->vport->vpi : 0,
7390 mboxq->u.mb.mbxCommand,
7391 lpfc_sli_config_mbox_subsys_get(phba,
7392 mboxq),
7393 lpfc_sli_config_mbox_opcode_get(phba,
7394 mboxq),
7395 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7396 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7397 bf_get(lpfc_mcqe_ext_status,
7398 &mboxq->mcqe),
7399 psli->sli_flag, flag);
7400 /* Unblock the async mailbox posting afterward */
7401 lpfc_sli4_async_mbox_unblock(phba);
7402 }
7403 return rc;
7404 }
7405
7406 /* Now, interrupt mode asynchrous mailbox command */
7407 rc = lpfc_mbox_cmd_check(phba, mboxq);
7408 if (rc) {
7409 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7410 "(%d):2543 Mailbox command x%x (x%x/x%x) "
7411 "cannot issue Data: x%x x%x\n",
7412 mboxq->vport ? mboxq->vport->vpi : 0,
7413 mboxq->u.mb.mbxCommand,
7414 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7415 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7416 psli->sli_flag, flag);
7417 goto out_not_finished;
7418 }
7419
7420 /* Put the mailbox command to the driver internal FIFO */
7421 psli->slistat.mbox_busy++;
7422 spin_lock_irqsave(&phba->hbalock, iflags);
7423 lpfc_mbox_put(phba, mboxq);
7424 spin_unlock_irqrestore(&phba->hbalock, iflags);
7425 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7426 "(%d):0354 Mbox cmd issue - Enqueue Data: "
7427 "x%x (x%x/x%x) x%x x%x x%x\n",
7428 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
7429 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7430 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7431 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7432 phba->pport->port_state,
7433 psli->sli_flag, MBX_NOWAIT);
7434 /* Wake up worker thread to transport mailbox command from head */
7435 lpfc_worker_wake_up(phba);
7436
7437 return MBX_BUSY;
7438
7439 out_not_finished:
7440 return MBX_NOT_FINISHED;
7441 }
7442
7443 /**
7444 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
7445 * @phba: Pointer to HBA context object.
7446 *
7447 * This function is called by worker thread to send a mailbox command to
7448 * SLI4 HBA firmware.
7449 *
7450 **/
7451 int
lpfc_sli4_post_async_mbox(struct lpfc_hba * phba)7452 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7453 {
7454 struct lpfc_sli *psli = &phba->sli;
7455 LPFC_MBOXQ_t *mboxq;
7456 int rc = MBX_SUCCESS;
7457 unsigned long iflags;
7458 struct lpfc_mqe *mqe;
7459 uint32_t mbx_cmnd;
7460
7461 /* Check interrupt mode before post async mailbox command */
7462 if (unlikely(!phba->sli4_hba.intr_enable))
7463 return MBX_NOT_FINISHED;
7464
7465 /* Check for mailbox command service token */
7466 spin_lock_irqsave(&phba->hbalock, iflags);
7467 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7468 spin_unlock_irqrestore(&phba->hbalock, iflags);
7469 return MBX_NOT_FINISHED;
7470 }
7471 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7472 spin_unlock_irqrestore(&phba->hbalock, iflags);
7473 return MBX_NOT_FINISHED;
7474 }
7475 if (unlikely(phba->sli.mbox_active)) {
7476 spin_unlock_irqrestore(&phba->hbalock, iflags);
7477 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7478 "0384 There is pending active mailbox cmd\n");
7479 return MBX_NOT_FINISHED;
7480 }
7481 /* Take the mailbox command service token */
7482 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7483
7484 /* Get the next mailbox command from head of queue */
7485 mboxq = lpfc_mbox_get(phba);
7486
7487 /* If no more mailbox command waiting for post, we're done */
7488 if (!mboxq) {
7489 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7490 spin_unlock_irqrestore(&phba->hbalock, iflags);
7491 return MBX_SUCCESS;
7492 }
7493 phba->sli.mbox_active = mboxq;
7494 spin_unlock_irqrestore(&phba->hbalock, iflags);
7495
7496 /* Check device readiness for posting mailbox command */
7497 rc = lpfc_mbox_dev_check(phba);
7498 if (unlikely(rc))
7499 /* Driver clean routine will clean up pending mailbox */
7500 goto out_not_finished;
7501
7502 /* Prepare the mbox command to be posted */
7503 mqe = &mboxq->u.mqe;
7504 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
7505
7506 /* Start timer for the mbox_tmo and log some mailbox post messages */
7507 mod_timer(&psli->mbox_tmo, (jiffies +
7508 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
7509
7510 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7511 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
7512 "x%x x%x\n",
7513 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7514 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7515 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7516 phba->pport->port_state, psli->sli_flag);
7517
7518 if (mbx_cmnd != MBX_HEARTBEAT) {
7519 if (mboxq->vport) {
7520 lpfc_debugfs_disc_trc(mboxq->vport,
7521 LPFC_DISC_TRC_MBOX_VPORT,
7522 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7523 mbx_cmnd, mqe->un.mb_words[0],
7524 mqe->un.mb_words[1]);
7525 } else {
7526 lpfc_debugfs_disc_trc(phba->pport,
7527 LPFC_DISC_TRC_MBOX,
7528 "MBOX Send: cmd:x%x mb:x%x x%x",
7529 mbx_cmnd, mqe->un.mb_words[0],
7530 mqe->un.mb_words[1]);
7531 }
7532 }
7533 psli->slistat.mbox_cmd++;
7534
7535 /* Post the mailbox command to the port */
7536 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
7537 if (rc != MBX_SUCCESS) {
7538 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7539 "(%d):2533 Mailbox command x%x (x%x/x%x) "
7540 "cannot issue Data: x%x x%x\n",
7541 mboxq->vport ? mboxq->vport->vpi : 0,
7542 mboxq->u.mb.mbxCommand,
7543 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7544 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7545 psli->sli_flag, MBX_NOWAIT);
7546 goto out_not_finished;
7547 }
7548
7549 return rc;
7550
7551 out_not_finished:
7552 spin_lock_irqsave(&phba->hbalock, iflags);
7553 if (phba->sli.mbox_active) {
7554 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7555 __lpfc_mbox_cmpl_put(phba, mboxq);
7556 /* Release the token */
7557 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7558 phba->sli.mbox_active = NULL;
7559 }
7560 spin_unlock_irqrestore(&phba->hbalock, iflags);
7561
7562 return MBX_NOT_FINISHED;
7563 }
7564
7565 /**
7566 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
7567 * @phba: Pointer to HBA context object.
7568 * @pmbox: Pointer to mailbox object.
7569 * @flag: Flag indicating how the mailbox need to be processed.
7570 *
7571 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
7572 * the API jump table function pointer from the lpfc_hba struct.
7573 *
7574 * Return codes the caller owns the mailbox command after the return of the
7575 * function.
7576 **/
7577 int
lpfc_sli_issue_mbox(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmbox,uint32_t flag)7578 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
7579 {
7580 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
7581 }
7582
7583 /**
7584 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
7585 * @phba: The hba struct for which this call is being executed.
7586 * @dev_grp: The HBA PCI-Device group number.
7587 *
7588 * This routine sets up the mbox interface API function jump table in @phba
7589 * struct.
7590 * Returns: 0 - success, -ENODEV - failure.
7591 **/
7592 int
lpfc_mbox_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)7593 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7594 {
7595
7596 switch (dev_grp) {
7597 case LPFC_PCI_DEV_LP:
7598 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
7599 phba->lpfc_sli_handle_slow_ring_event =
7600 lpfc_sli_handle_slow_ring_event_s3;
7601 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
7602 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
7603 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
7604 break;
7605 case LPFC_PCI_DEV_OC:
7606 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
7607 phba->lpfc_sli_handle_slow_ring_event =
7608 lpfc_sli_handle_slow_ring_event_s4;
7609 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
7610 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
7611 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
7612 break;
7613 default:
7614 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7615 "1420 Invalid HBA PCI-device group: 0x%x\n",
7616 dev_grp);
7617 return -ENODEV;
7618 break;
7619 }
7620 return 0;
7621 }
7622
7623 /**
7624 * __lpfc_sli_ringtx_put - Add an iocb to the txq
7625 * @phba: Pointer to HBA context object.
7626 * @pring: Pointer to driver SLI ring object.
7627 * @piocb: Pointer to address of newly added command iocb.
7628 *
7629 * This function is called with hbalock held to add a command
7630 * iocb to the txq when SLI layer cannot submit the command iocb
7631 * to the ring.
7632 **/
7633 void
__lpfc_sli_ringtx_put(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * piocb)7634 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7635 struct lpfc_iocbq *piocb)
7636 {
7637 /* Insert the caller's iocb in the txq tail for later processing. */
7638 list_add_tail(&piocb->list, &pring->txq);
7639 }
7640
7641 /**
7642 * lpfc_sli_next_iocb - Get the next iocb in the txq
7643 * @phba: Pointer to HBA context object.
7644 * @pring: Pointer to driver SLI ring object.
7645 * @piocb: Pointer to address of newly added command iocb.
7646 *
7647 * This function is called with hbalock held before a new
7648 * iocb is submitted to the firmware. This function checks
7649 * txq to flush the iocbs in txq to Firmware before
7650 * submitting new iocbs to the Firmware.
7651 * If there are iocbs in the txq which need to be submitted
7652 * to firmware, lpfc_sli_next_iocb returns the first element
7653 * of the txq after dequeuing it from txq.
7654 * If there is no iocb in the txq then the function will return
7655 * *piocb and *piocb is set to NULL. Caller needs to check
7656 * *piocb to find if there are more commands in the txq.
7657 **/
7658 static struct lpfc_iocbq *
lpfc_sli_next_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq ** piocb)7659 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7660 struct lpfc_iocbq **piocb)
7661 {
7662 struct lpfc_iocbq * nextiocb;
7663
7664 nextiocb = lpfc_sli_ringtx_get(phba, pring);
7665 if (!nextiocb) {
7666 nextiocb = *piocb;
7667 *piocb = NULL;
7668 }
7669
7670 return nextiocb;
7671 }
7672
7673 /**
7674 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
7675 * @phba: Pointer to HBA context object.
7676 * @ring_number: SLI ring number to issue iocb on.
7677 * @piocb: Pointer to command iocb.
7678 * @flag: Flag indicating if this command can be put into txq.
7679 *
7680 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
7681 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
7682 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
7683 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
7684 * this function allows only iocbs for posting buffers. This function finds
7685 * next available slot in the command ring and posts the command to the
7686 * available slot and writes the port attention register to request HBA start
7687 * processing new iocb. If there is no slot available in the ring and
7688 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
7689 * the function returns IOCB_BUSY.
7690 *
7691 * This function is called with hbalock held. The function will return success
7692 * after it successfully submit the iocb to firmware or after adding to the
7693 * txq.
7694 **/
7695 static int
__lpfc_sli_issue_iocb_s3(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)7696 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
7697 struct lpfc_iocbq *piocb, uint32_t flag)
7698 {
7699 struct lpfc_iocbq *nextiocb;
7700 IOCB_t *iocb;
7701 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
7702
7703 if (piocb->iocb_cmpl && (!piocb->vport) &&
7704 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
7705 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
7706 lpfc_printf_log(phba, KERN_ERR,
7707 LOG_SLI | LOG_VPORT,
7708 "1807 IOCB x%x failed. No vport\n",
7709 piocb->iocb.ulpCommand);
7710 dump_stack();
7711 return IOCB_ERROR;
7712 }
7713
7714
7715 /* If the PCI channel is in offline state, do not post iocbs. */
7716 if (unlikely(pci_channel_offline(phba->pcidev)))
7717 return IOCB_ERROR;
7718
7719 /* If HBA has a deferred error attention, fail the iocb. */
7720 if (unlikely(phba->hba_flag & DEFER_ERATT))
7721 return IOCB_ERROR;
7722
7723 /*
7724 * We should never get an IOCB if we are in a < LINK_DOWN state
7725 */
7726 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
7727 return IOCB_ERROR;
7728
7729 /*
7730 * Check to see if we are blocking IOCB processing because of a
7731 * outstanding event.
7732 */
7733 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
7734 goto iocb_busy;
7735
7736 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
7737 /*
7738 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
7739 * can be issued if the link is not up.
7740 */
7741 switch (piocb->iocb.ulpCommand) {
7742 case CMD_GEN_REQUEST64_CR:
7743 case CMD_GEN_REQUEST64_CX:
7744 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
7745 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
7746 FC_RCTL_DD_UNSOL_CMD) ||
7747 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
7748 MENLO_TRANSPORT_TYPE))
7749
7750 goto iocb_busy;
7751 break;
7752 case CMD_QUE_RING_BUF_CN:
7753 case CMD_QUE_RING_BUF64_CN:
7754 /*
7755 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
7756 * completion, iocb_cmpl MUST be 0.
7757 */
7758 if (piocb->iocb_cmpl)
7759 piocb->iocb_cmpl = NULL;
7760 /*FALLTHROUGH*/
7761 case CMD_CREATE_XRI_CR:
7762 case CMD_CLOSE_XRI_CN:
7763 case CMD_CLOSE_XRI_CX:
7764 break;
7765 default:
7766 goto iocb_busy;
7767 }
7768
7769 /*
7770 * For FCP commands, we must be in a state where we can process link
7771 * attention events.
7772 */
7773 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
7774 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
7775 goto iocb_busy;
7776 }
7777
7778 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
7779 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
7780 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
7781
7782 if (iocb)
7783 lpfc_sli_update_ring(phba, pring);
7784 else
7785 lpfc_sli_update_full_ring(phba, pring);
7786
7787 if (!piocb)
7788 return IOCB_SUCCESS;
7789
7790 goto out_busy;
7791
7792 iocb_busy:
7793 pring->stats.iocb_cmd_delay++;
7794
7795 out_busy:
7796
7797 if (!(flag & SLI_IOCB_RET_IOCB)) {
7798 __lpfc_sli_ringtx_put(phba, pring, piocb);
7799 return IOCB_SUCCESS;
7800 }
7801
7802 return IOCB_BUSY;
7803 }
7804
7805 /**
7806 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
7807 * @phba: Pointer to HBA context object.
7808 * @piocb: Pointer to command iocb.
7809 * @sglq: Pointer to the scatter gather queue object.
7810 *
7811 * This routine converts the bpl or bde that is in the IOCB
7812 * to a sgl list for the sli4 hardware. The physical address
7813 * of the bpl/bde is converted back to a virtual address.
7814 * If the IOCB contains a BPL then the list of BDE's is
7815 * converted to sli4_sge's. If the IOCB contains a single
7816 * BDE then it is converted to a single sli_sge.
7817 * The IOCB is still in cpu endianess so the contents of
7818 * the bpl can be used without byte swapping.
7819 *
7820 * Returns valid XRI = Success, NO_XRI = Failure.
7821 **/
7822 static uint16_t
lpfc_sli4_bpl2sgl(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq,struct lpfc_sglq * sglq)7823 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7824 struct lpfc_sglq *sglq)
7825 {
7826 uint16_t xritag = NO_XRI;
7827 struct ulp_bde64 *bpl = NULL;
7828 struct ulp_bde64 bde;
7829 struct sli4_sge *sgl = NULL;
7830 struct lpfc_dmabuf *dmabuf;
7831 IOCB_t *icmd;
7832 int numBdes = 0;
7833 int i = 0;
7834 uint32_t offset = 0; /* accumulated offset in the sg request list */
7835 int inbound = 0; /* number of sg reply entries inbound from firmware */
7836
7837 if (!piocbq || !sglq)
7838 return xritag;
7839
7840 sgl = (struct sli4_sge *)sglq->sgl;
7841 icmd = &piocbq->iocb;
7842 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
7843 return sglq->sli4_xritag;
7844 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
7845 numBdes = icmd->un.genreq64.bdl.bdeSize /
7846 sizeof(struct ulp_bde64);
7847 /* The addrHigh and addrLow fields within the IOCB
7848 * have not been byteswapped yet so there is no
7849 * need to swap them back.
7850 */
7851 if (piocbq->context3)
7852 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
7853 else
7854 return xritag;
7855
7856 bpl = (struct ulp_bde64 *)dmabuf->virt;
7857 if (!bpl)
7858 return xritag;
7859
7860 for (i = 0; i < numBdes; i++) {
7861 /* Should already be byte swapped. */
7862 sgl->addr_hi = bpl->addrHigh;
7863 sgl->addr_lo = bpl->addrLow;
7864
7865 sgl->word2 = le32_to_cpu(sgl->word2);
7866 if ((i+1) == numBdes)
7867 bf_set(lpfc_sli4_sge_last, sgl, 1);
7868 else
7869 bf_set(lpfc_sli4_sge_last, sgl, 0);
7870 /* swap the size field back to the cpu so we
7871 * can assign it to the sgl.
7872 */
7873 bde.tus.w = le32_to_cpu(bpl->tus.w);
7874 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
7875 /* The offsets in the sgl need to be accumulated
7876 * separately for the request and reply lists.
7877 * The request is always first, the reply follows.
7878 */
7879 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
7880 /* add up the reply sg entries */
7881 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
7882 inbound++;
7883 /* first inbound? reset the offset */
7884 if (inbound == 1)
7885 offset = 0;
7886 bf_set(lpfc_sli4_sge_offset, sgl, offset);
7887 bf_set(lpfc_sli4_sge_type, sgl,
7888 LPFC_SGE_TYPE_DATA);
7889 offset += bde.tus.f.bdeSize;
7890 }
7891 sgl->word2 = cpu_to_le32(sgl->word2);
7892 bpl++;
7893 sgl++;
7894 }
7895 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
7896 /* The addrHigh and addrLow fields of the BDE have not
7897 * been byteswapped yet so they need to be swapped
7898 * before putting them in the sgl.
7899 */
7900 sgl->addr_hi =
7901 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
7902 sgl->addr_lo =
7903 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
7904 sgl->word2 = le32_to_cpu(sgl->word2);
7905 bf_set(lpfc_sli4_sge_last, sgl, 1);
7906 sgl->word2 = cpu_to_le32(sgl->word2);
7907 sgl->sge_len =
7908 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
7909 }
7910 return sglq->sli4_xritag;
7911 }
7912
7913 /**
7914 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
7915 * @phba: Pointer to HBA context object.
7916 *
7917 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
7918 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
7919 * held.
7920 *
7921 * Return: index into SLI4 fast-path FCP queue index.
7922 **/
7923 static inline uint32_t
lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba * phba)7924 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
7925 {
7926 struct lpfc_vector_map_info *cpup;
7927 int chann, cpu;
7928
7929 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) {
7930 cpu = smp_processor_id();
7931 if (cpu < phba->sli4_hba.num_present_cpu) {
7932 cpup = phba->sli4_hba.cpu_map;
7933 cpup += cpu;
7934 return cpup->channel_id;
7935 }
7936 chann = cpu;
7937 }
7938 chann = atomic_add_return(1, &phba->fcp_qidx);
7939 chann = (chann % phba->cfg_fcp_io_channel);
7940 return chann;
7941 }
7942
7943 /**
7944 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
7945 * @phba: Pointer to HBA context object.
7946 * @piocb: Pointer to command iocb.
7947 * @wqe: Pointer to the work queue entry.
7948 *
7949 * This routine converts the iocb command to its Work Queue Entry
7950 * equivalent. The wqe pointer should not have any fields set when
7951 * this routine is called because it will memcpy over them.
7952 * This routine does not set the CQ_ID or the WQEC bits in the
7953 * wqe.
7954 *
7955 * Returns: 0 = Success, IOCB_ERROR = Failure.
7956 **/
7957 static int
lpfc_sli4_iocb2wqe(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq,union lpfc_wqe * wqe)7958 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7959 union lpfc_wqe *wqe)
7960 {
7961 uint32_t xmit_len = 0, total_len = 0;
7962 uint8_t ct = 0;
7963 uint32_t fip;
7964 uint32_t abort_tag;
7965 uint8_t command_type = ELS_COMMAND_NON_FIP;
7966 uint8_t cmnd;
7967 uint16_t xritag;
7968 uint16_t abrt_iotag;
7969 struct lpfc_iocbq *abrtiocbq;
7970 struct ulp_bde64 *bpl = NULL;
7971 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
7972 int numBdes, i;
7973 struct ulp_bde64 bde;
7974 struct lpfc_nodelist *ndlp;
7975 uint32_t *pcmd;
7976 uint32_t if_type;
7977
7978 fip = phba->hba_flag & HBA_FIP_SUPPORT;
7979 /* The fcp commands will set command type */
7980 if (iocbq->iocb_flag & LPFC_IO_FCP)
7981 command_type = FCP_COMMAND;
7982 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
7983 command_type = ELS_COMMAND_FIP;
7984 else
7985 command_type = ELS_COMMAND_NON_FIP;
7986
7987 /* Some of the fields are in the right position already */
7988 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
7989 abort_tag = (uint32_t) iocbq->iotag;
7990 xritag = iocbq->sli4_xritag;
7991 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
7992 /* words0-2 bpl convert bde */
7993 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
7994 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
7995 sizeof(struct ulp_bde64);
7996 bpl = (struct ulp_bde64 *)
7997 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
7998 if (!bpl)
7999 return IOCB_ERROR;
8000
8001 /* Should already be byte swapped. */
8002 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
8003 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
8004 /* swap the size field back to the cpu so we
8005 * can assign it to the sgl.
8006 */
8007 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
8008 xmit_len = wqe->generic.bde.tus.f.bdeSize;
8009 total_len = 0;
8010 for (i = 0; i < numBdes; i++) {
8011 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8012 total_len += bde.tus.f.bdeSize;
8013 }
8014 } else
8015 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
8016
8017 iocbq->iocb.ulpIoTag = iocbq->iotag;
8018 cmnd = iocbq->iocb.ulpCommand;
8019
8020 switch (iocbq->iocb.ulpCommand) {
8021 case CMD_ELS_REQUEST64_CR:
8022 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
8023 ndlp = iocbq->context_un.ndlp;
8024 else
8025 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8026 if (!iocbq->iocb.ulpLe) {
8027 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8028 "2007 Only Limited Edition cmd Format"
8029 " supported 0x%x\n",
8030 iocbq->iocb.ulpCommand);
8031 return IOCB_ERROR;
8032 }
8033
8034 wqe->els_req.payload_len = xmit_len;
8035 /* Els_reguest64 has a TMO */
8036 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
8037 iocbq->iocb.ulpTimeout);
8038 /* Need a VF for word 4 set the vf bit*/
8039 bf_set(els_req64_vf, &wqe->els_req, 0);
8040 /* And a VFID for word 12 */
8041 bf_set(els_req64_vfid, &wqe->els_req, 0);
8042 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8043 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8044 iocbq->iocb.ulpContext);
8045 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
8046 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
8047 /* CCP CCPE PV PRI in word10 were set in the memcpy */
8048 if (command_type == ELS_COMMAND_FIP)
8049 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
8050 >> LPFC_FIP_ELS_ID_SHIFT);
8051 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8052 iocbq->context2)->virt);
8053 if_type = bf_get(lpfc_sli_intf_if_type,
8054 &phba->sli4_hba.sli_intf);
8055 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8056 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
8057 *pcmd == ELS_CMD_SCR ||
8058 *pcmd == ELS_CMD_FDISC ||
8059 *pcmd == ELS_CMD_LOGO ||
8060 *pcmd == ELS_CMD_PLOGI)) {
8061 bf_set(els_req64_sp, &wqe->els_req, 1);
8062 bf_set(els_req64_sid, &wqe->els_req,
8063 iocbq->vport->fc_myDID);
8064 if ((*pcmd == ELS_CMD_FLOGI) &&
8065 !(phba->fc_topology ==
8066 LPFC_TOPOLOGY_LOOP))
8067 bf_set(els_req64_sid, &wqe->els_req, 0);
8068 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
8069 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8070 phba->vpi_ids[iocbq->vport->vpi]);
8071 } else if (pcmd && iocbq->context1) {
8072 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8073 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8074 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8075 }
8076 }
8077 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8078 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8079 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8080 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8081 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8082 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8083 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8084 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
8085 break;
8086 case CMD_XMIT_SEQUENCE64_CX:
8087 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
8088 iocbq->iocb.un.ulpWord[3]);
8089 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
8090 iocbq->iocb.unsli3.rcvsli3.ox_id);
8091 /* The entire sequence is transmitted for this IOCB */
8092 xmit_len = total_len;
8093 cmnd = CMD_XMIT_SEQUENCE64_CR;
8094 if (phba->link_flag & LS_LOOPBACK_MODE)
8095 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
8096 case CMD_XMIT_SEQUENCE64_CR:
8097 /* word3 iocb=io_tag32 wqe=reserved */
8098 wqe->xmit_sequence.rsvd3 = 0;
8099 /* word4 relative_offset memcpy */
8100 /* word5 r_ctl/df_ctl memcpy */
8101 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
8102 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
8103 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
8104 LPFC_WQE_IOD_WRITE);
8105 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
8106 LPFC_WQE_LENLOC_WORD12);
8107 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
8108 wqe->xmit_sequence.xmit_len = xmit_len;
8109 command_type = OTHER_COMMAND;
8110 break;
8111 case CMD_XMIT_BCAST64_CN:
8112 /* word3 iocb=iotag32 wqe=seq_payload_len */
8113 wqe->xmit_bcast64.seq_payload_len = xmit_len;
8114 /* word4 iocb=rsvd wqe=rsvd */
8115 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
8116 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
8117 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
8118 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8119 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
8120 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
8121 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
8122 LPFC_WQE_LENLOC_WORD3);
8123 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
8124 break;
8125 case CMD_FCP_IWRITE64_CR:
8126 command_type = FCP_COMMAND_DATA_OUT;
8127 /* word3 iocb=iotag wqe=payload_offset_len */
8128 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8129 wqe->fcp_iwrite.payload_offset_len =
8130 xmit_len + sizeof(struct fcp_rsp);
8131 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8132 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8133 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
8134 iocbq->iocb.ulpFCP2Rcvy);
8135 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
8136 /* Always open the exchange */
8137 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
8138 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
8139 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
8140 LPFC_WQE_LENLOC_WORD4);
8141 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
8142 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
8143 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
8144 break;
8145 case CMD_FCP_IREAD64_CR:
8146 /* word3 iocb=iotag wqe=payload_offset_len */
8147 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8148 wqe->fcp_iread.payload_offset_len =
8149 xmit_len + sizeof(struct fcp_rsp);
8150 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8151 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8152 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
8153 iocbq->iocb.ulpFCP2Rcvy);
8154 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
8155 /* Always open the exchange */
8156 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
8157 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
8158 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
8159 LPFC_WQE_LENLOC_WORD4);
8160 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
8161 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
8162 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
8163 break;
8164 case CMD_FCP_ICMND64_CR:
8165 /* word3 iocb=IO_TAG wqe=reserved */
8166 wqe->fcp_icmd.rsrvd3 = 0;
8167 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
8168 /* Always open the exchange */
8169 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
8170 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
8171 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
8172 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
8173 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8174 LPFC_WQE_LENLOC_NONE);
8175 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
8176 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8177 iocbq->iocb.ulpFCP2Rcvy);
8178 break;
8179 case CMD_GEN_REQUEST64_CR:
8180 /* For this command calculate the xmit length of the
8181 * request bde.
8182 */
8183 xmit_len = 0;
8184 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8185 sizeof(struct ulp_bde64);
8186 for (i = 0; i < numBdes; i++) {
8187 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8188 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
8189 break;
8190 xmit_len += bde.tus.f.bdeSize;
8191 }
8192 /* word3 iocb=IO_TAG wqe=request_payload_len */
8193 wqe->gen_req.request_payload_len = xmit_len;
8194 /* word4 iocb=parameter wqe=relative_offset memcpy */
8195 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
8196 /* word6 context tag copied in memcpy */
8197 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
8198 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8199 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8200 "2015 Invalid CT %x command 0x%x\n",
8201 ct, iocbq->iocb.ulpCommand);
8202 return IOCB_ERROR;
8203 }
8204 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
8205 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
8206 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
8207 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
8208 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
8209 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
8210 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8211 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
8212 command_type = OTHER_COMMAND;
8213 break;
8214 case CMD_XMIT_ELS_RSP64_CX:
8215 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8216 /* words0-2 BDE memcpy */
8217 /* word3 iocb=iotag32 wqe=response_payload_len */
8218 wqe->xmit_els_rsp.response_payload_len = xmit_len;
8219 /* word4 */
8220 wqe->xmit_els_rsp.word4 = 0;
8221 /* word5 iocb=rsvd wge=did */
8222 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
8223 iocbq->iocb.un.xseq64.xmit_els_remoteID);
8224
8225 if_type = bf_get(lpfc_sli_intf_if_type,
8226 &phba->sli4_hba.sli_intf);
8227 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8228 if (iocbq->vport->fc_flag & FC_PT2PT) {
8229 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8230 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8231 iocbq->vport->fc_myDID);
8232 if (iocbq->vport->fc_myDID == Fabric_DID) {
8233 bf_set(wqe_els_did,
8234 &wqe->xmit_els_rsp.wqe_dest, 0);
8235 }
8236 }
8237 }
8238 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
8239 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8240 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
8241 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
8242 iocbq->iocb.unsli3.rcvsli3.ox_id);
8243 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
8244 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
8245 phba->vpi_ids[iocbq->vport->vpi]);
8246 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
8247 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
8248 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
8249 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
8250 LPFC_WQE_LENLOC_WORD3);
8251 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
8252 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
8253 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8254 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8255 iocbq->context2)->virt);
8256 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
8257 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8258 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8259 iocbq->vport->fc_myDID);
8260 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
8261 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
8262 phba->vpi_ids[phba->pport->vpi]);
8263 }
8264 command_type = OTHER_COMMAND;
8265 break;
8266 case CMD_CLOSE_XRI_CN:
8267 case CMD_ABORT_XRI_CN:
8268 case CMD_ABORT_XRI_CX:
8269 /* words 0-2 memcpy should be 0 rserved */
8270 /* port will send abts */
8271 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
8272 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
8273 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
8274 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
8275 } else
8276 fip = 0;
8277
8278 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
8279 /*
8280 * The link is down, or the command was ELS_FIP
8281 * so the fw does not need to send abts
8282 * on the wire.
8283 */
8284 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
8285 else
8286 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
8287 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
8288 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
8289 wqe->abort_cmd.rsrvd5 = 0;
8290 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
8291 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8292 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
8293 /*
8294 * The abort handler will send us CMD_ABORT_XRI_CN or
8295 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
8296 */
8297 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
8298 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
8299 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
8300 LPFC_WQE_LENLOC_NONE);
8301 cmnd = CMD_ABORT_XRI_CX;
8302 command_type = OTHER_COMMAND;
8303 xritag = 0;
8304 break;
8305 case CMD_XMIT_BLS_RSP64_CX:
8306 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8307 /* As BLS ABTS RSP WQE is very different from other WQEs,
8308 * we re-construct this WQE here based on information in
8309 * iocbq from scratch.
8310 */
8311 memset(wqe, 0, sizeof(union lpfc_wqe));
8312 /* OX_ID is invariable to who sent ABTS to CT exchange */
8313 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
8314 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
8315 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
8316 LPFC_ABTS_UNSOL_INT) {
8317 /* ABTS sent by initiator to CT exchange, the
8318 * RX_ID field will be filled with the newly
8319 * allocated responder XRI.
8320 */
8321 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8322 iocbq->sli4_xritag);
8323 } else {
8324 /* ABTS sent by responder to CT exchange, the
8325 * RX_ID field will be filled with the responder
8326 * RX_ID from ABTS.
8327 */
8328 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8329 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
8330 }
8331 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
8332 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
8333
8334 /* Use CT=VPI */
8335 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
8336 ndlp->nlp_DID);
8337 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
8338 iocbq->iocb.ulpContext);
8339 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
8340 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
8341 phba->vpi_ids[phba->pport->vpi]);
8342 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
8343 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
8344 LPFC_WQE_LENLOC_NONE);
8345 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
8346 command_type = OTHER_COMMAND;
8347 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
8348 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
8349 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
8350 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
8351 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
8352 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
8353 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
8354 }
8355
8356 break;
8357 case CMD_XRI_ABORTED_CX:
8358 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
8359 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
8360 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
8361 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
8362 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
8363 default:
8364 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8365 "2014 Invalid command 0x%x\n",
8366 iocbq->iocb.ulpCommand);
8367 return IOCB_ERROR;
8368 break;
8369 }
8370
8371 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
8372 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
8373 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
8374 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
8375 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
8376 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
8377 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
8378 LPFC_IO_DIF_INSERT);
8379 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
8380 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
8381 wqe->generic.wqe_com.abort_tag = abort_tag;
8382 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
8383 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
8384 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
8385 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
8386 return 0;
8387 }
8388
8389 /**
8390 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
8391 * @phba: Pointer to HBA context object.
8392 * @ring_number: SLI ring number to issue iocb on.
8393 * @piocb: Pointer to command iocb.
8394 * @flag: Flag indicating if this command can be put into txq.
8395 *
8396 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
8397 * an iocb command to an HBA with SLI-4 interface spec.
8398 *
8399 * This function is called with hbalock held. The function will return success
8400 * after it successfully submit the iocb to firmware or after adding to the
8401 * txq.
8402 **/
8403 static int
__lpfc_sli_issue_iocb_s4(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)8404 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8405 struct lpfc_iocbq *piocb, uint32_t flag)
8406 {
8407 struct lpfc_sglq *sglq;
8408 union lpfc_wqe wqe;
8409 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
8410
8411 if (piocb->sli4_xritag == NO_XRI) {
8412 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
8413 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
8414 sglq = NULL;
8415 else {
8416 if (!list_empty(&pring->txq)) {
8417 if (!(flag & SLI_IOCB_RET_IOCB)) {
8418 __lpfc_sli_ringtx_put(phba,
8419 pring, piocb);
8420 return IOCB_SUCCESS;
8421 } else {
8422 return IOCB_BUSY;
8423 }
8424 } else {
8425 sglq = __lpfc_sli_get_sglq(phba, piocb);
8426 if (!sglq) {
8427 if (!(flag & SLI_IOCB_RET_IOCB)) {
8428 __lpfc_sli_ringtx_put(phba,
8429 pring,
8430 piocb);
8431 return IOCB_SUCCESS;
8432 } else
8433 return IOCB_BUSY;
8434 }
8435 }
8436 }
8437 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
8438 /* These IO's already have an XRI and a mapped sgl. */
8439 sglq = NULL;
8440 } else {
8441 /*
8442 * This is a continuation of a commandi,(CX) so this
8443 * sglq is on the active list
8444 */
8445 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
8446 if (!sglq)
8447 return IOCB_ERROR;
8448 }
8449
8450 if (sglq) {
8451 piocb->sli4_lxritag = sglq->sli4_lxritag;
8452 piocb->sli4_xritag = sglq->sli4_xritag;
8453 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
8454 return IOCB_ERROR;
8455 }
8456
8457 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
8458 return IOCB_ERROR;
8459
8460 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8461 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8462 if (unlikely(!phba->sli4_hba.fcp_wq))
8463 return IOCB_ERROR;
8464 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
8465 &wqe))
8466 return IOCB_ERROR;
8467 } else {
8468 if (unlikely(!phba->sli4_hba.els_wq))
8469 return IOCB_ERROR;
8470 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
8471 return IOCB_ERROR;
8472 }
8473 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
8474
8475 return 0;
8476 }
8477
8478 /**
8479 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
8480 *
8481 * This routine wraps the actual lockless version for issusing IOCB function
8482 * pointer from the lpfc_hba struct.
8483 *
8484 * Return codes:
8485 * IOCB_ERROR - Error
8486 * IOCB_SUCCESS - Success
8487 * IOCB_BUSY - Busy
8488 **/
8489 int
__lpfc_sli_issue_iocb(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)8490 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8491 struct lpfc_iocbq *piocb, uint32_t flag)
8492 {
8493 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8494 }
8495
8496 /**
8497 * lpfc_sli_api_table_setup - Set up sli api function jump table
8498 * @phba: The hba struct for which this call is being executed.
8499 * @dev_grp: The HBA PCI-Device group number.
8500 *
8501 * This routine sets up the SLI interface API function jump table in @phba
8502 * struct.
8503 * Returns: 0 - success, -ENODEV - failure.
8504 **/
8505 int
lpfc_sli_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)8506 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8507 {
8508
8509 switch (dev_grp) {
8510 case LPFC_PCI_DEV_LP:
8511 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
8512 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
8513 break;
8514 case LPFC_PCI_DEV_OC:
8515 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
8516 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
8517 break;
8518 default:
8519 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8520 "1419 Invalid HBA PCI-device group: 0x%x\n",
8521 dev_grp);
8522 return -ENODEV;
8523 break;
8524 }
8525 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
8526 return 0;
8527 }
8528
8529 /**
8530 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
8531 * @phba: Pointer to HBA context object.
8532 * @pring: Pointer to driver SLI ring object.
8533 * @piocb: Pointer to command iocb.
8534 * @flag: Flag indicating if this command can be put into txq.
8535 *
8536 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
8537 * function. This function gets the hbalock and calls
8538 * __lpfc_sli_issue_iocb function and will return the error returned
8539 * by __lpfc_sli_issue_iocb function. This wrapper is used by
8540 * functions which do not hold hbalock.
8541 **/
8542 int
lpfc_sli_issue_iocb(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)8543 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8544 struct lpfc_iocbq *piocb, uint32_t flag)
8545 {
8546 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
8547 struct lpfc_sli_ring *pring;
8548 struct lpfc_queue *fpeq;
8549 struct lpfc_eqe *eqe;
8550 unsigned long iflags;
8551 int rc, idx;
8552
8553 if (phba->sli_rev == LPFC_SLI_REV4) {
8554 if (piocb->iocb_flag & LPFC_IO_FCP) {
8555 if (unlikely(!phba->sli4_hba.fcp_wq))
8556 return IOCB_ERROR;
8557 idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8558 piocb->fcp_wqidx = idx;
8559 ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
8560
8561 pring = &phba->sli.ring[ring_number];
8562 spin_lock_irqsave(&pring->ring_lock, iflags);
8563 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8564 flag);
8565 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8566
8567 if (lpfc_fcp_look_ahead) {
8568 fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
8569
8570 if (atomic_dec_and_test(&fcp_eq_hdl->
8571 fcp_eq_in_use)) {
8572
8573 /* Get associated EQ with this index */
8574 fpeq = phba->sli4_hba.hba_eq[idx];
8575
8576 /* Turn off interrupts from this EQ */
8577 lpfc_sli4_eq_clr_intr(fpeq);
8578
8579 /*
8580 * Process all the events on FCP EQ
8581 */
8582 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
8583 lpfc_sli4_hba_handle_eqe(phba,
8584 eqe, idx);
8585 fpeq->EQ_processed++;
8586 }
8587
8588 /* Always clear and re-arm the EQ */
8589 lpfc_sli4_eq_release(fpeq,
8590 LPFC_QUEUE_REARM);
8591 }
8592 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
8593 }
8594 } else {
8595 pring = &phba->sli.ring[ring_number];
8596 spin_lock_irqsave(&pring->ring_lock, iflags);
8597 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8598 flag);
8599 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8600
8601 }
8602 } else {
8603 /* For now, SLI2/3 will still use hbalock */
8604 spin_lock_irqsave(&phba->hbalock, iflags);
8605 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8606 spin_unlock_irqrestore(&phba->hbalock, iflags);
8607 }
8608 return rc;
8609 }
8610
8611 /**
8612 * lpfc_extra_ring_setup - Extra ring setup function
8613 * @phba: Pointer to HBA context object.
8614 *
8615 * This function is called while driver attaches with the
8616 * HBA to setup the extra ring. The extra ring is used
8617 * only when driver needs to support target mode functionality
8618 * or IP over FC functionalities.
8619 *
8620 * This function is called with no lock held.
8621 **/
8622 static int
lpfc_extra_ring_setup(struct lpfc_hba * phba)8623 lpfc_extra_ring_setup( struct lpfc_hba *phba)
8624 {
8625 struct lpfc_sli *psli;
8626 struct lpfc_sli_ring *pring;
8627
8628 psli = &phba->sli;
8629
8630 /* Adjust cmd/rsp ring iocb entries more evenly */
8631
8632 /* Take some away from the FCP ring */
8633 pring = &psli->ring[psli->fcp_ring];
8634 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8635 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8636 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8637 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8638
8639 /* and give them to the extra ring */
8640 pring = &psli->ring[psli->extra_ring];
8641
8642 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8643 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8644 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8645 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8646
8647 /* Setup default profile for this ring */
8648 pring->iotag_max = 4096;
8649 pring->num_mask = 1;
8650 pring->prt[0].profile = 0; /* Mask 0 */
8651 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
8652 pring->prt[0].type = phba->cfg_multi_ring_type;
8653 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
8654 return 0;
8655 }
8656
8657 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
8658 * @phba: Pointer to HBA context object.
8659 * @iocbq: Pointer to iocb object.
8660 *
8661 * The async_event handler calls this routine when it receives
8662 * an ASYNC_STATUS_CN event from the port. The port generates
8663 * this event when an Abort Sequence request to an rport fails
8664 * twice in succession. The abort could be originated by the
8665 * driver or by the port. The ABTS could have been for an ELS
8666 * or FCP IO. The port only generates this event when an ABTS
8667 * fails to complete after one retry.
8668 */
8669 static void
lpfc_sli_abts_err_handler(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)8670 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
8671 struct lpfc_iocbq *iocbq)
8672 {
8673 struct lpfc_nodelist *ndlp = NULL;
8674 uint16_t rpi = 0, vpi = 0;
8675 struct lpfc_vport *vport = NULL;
8676
8677 /* The rpi in the ulpContext is vport-sensitive. */
8678 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
8679 rpi = iocbq->iocb.ulpContext;
8680
8681 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8682 "3092 Port generated ABTS async event "
8683 "on vpi %d rpi %d status 0x%x\n",
8684 vpi, rpi, iocbq->iocb.ulpStatus);
8685
8686 vport = lpfc_find_vport_by_vpid(phba, vpi);
8687 if (!vport)
8688 goto err_exit;
8689 ndlp = lpfc_findnode_rpi(vport, rpi);
8690 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
8691 goto err_exit;
8692
8693 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
8694 lpfc_sli_abts_recover_port(vport, ndlp);
8695 return;
8696
8697 err_exit:
8698 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8699 "3095 Event Context not found, no "
8700 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
8701 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
8702 vpi, rpi);
8703 }
8704
8705 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
8706 * @phba: pointer to HBA context object.
8707 * @ndlp: nodelist pointer for the impacted rport.
8708 * @axri: pointer to the wcqe containing the failed exchange.
8709 *
8710 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
8711 * port. The port generates this event when an abort exchange request to an
8712 * rport fails twice in succession with no reply. The abort could be originated
8713 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
8714 */
8715 void
lpfc_sli4_abts_err_handler(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct sli4_wcqe_xri_aborted * axri)8716 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
8717 struct lpfc_nodelist *ndlp,
8718 struct sli4_wcqe_xri_aborted *axri)
8719 {
8720 struct lpfc_vport *vport;
8721 uint32_t ext_status = 0;
8722
8723 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
8724 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8725 "3115 Node Context not found, driver "
8726 "ignoring abts err event\n");
8727 return;
8728 }
8729
8730 vport = ndlp->vport;
8731 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8732 "3116 Port generated FCP XRI ABORT event on "
8733 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
8734 ndlp->vport->vpi, ndlp->nlp_rpi,
8735 bf_get(lpfc_wcqe_xa_xri, axri),
8736 bf_get(lpfc_wcqe_xa_status, axri),
8737 axri->parameter);
8738
8739 /*
8740 * Catch the ABTS protocol failure case. Older OCe FW releases returned
8741 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
8742 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
8743 */
8744 ext_status = axri->parameter & IOERR_PARAM_MASK;
8745 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
8746 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
8747 lpfc_sli_abts_recover_port(vport, ndlp);
8748 }
8749
8750 /**
8751 * lpfc_sli_async_event_handler - ASYNC iocb handler function
8752 * @phba: Pointer to HBA context object.
8753 * @pring: Pointer to driver SLI ring object.
8754 * @iocbq: Pointer to iocb object.
8755 *
8756 * This function is called by the slow ring event handler
8757 * function when there is an ASYNC event iocb in the ring.
8758 * This function is called with no lock held.
8759 * Currently this function handles only temperature related
8760 * ASYNC events. The function decodes the temperature sensor
8761 * event message and posts events for the management applications.
8762 **/
8763 static void
lpfc_sli_async_event_handler(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * iocbq)8764 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
8765 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
8766 {
8767 IOCB_t *icmd;
8768 uint16_t evt_code;
8769 struct temp_event temp_event_data;
8770 struct Scsi_Host *shost;
8771 uint32_t *iocb_w;
8772
8773 icmd = &iocbq->iocb;
8774 evt_code = icmd->un.asyncstat.evt_code;
8775
8776 switch (evt_code) {
8777 case ASYNC_TEMP_WARN:
8778 case ASYNC_TEMP_SAFE:
8779 temp_event_data.data = (uint32_t) icmd->ulpContext;
8780 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
8781 if (evt_code == ASYNC_TEMP_WARN) {
8782 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
8783 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
8784 "0347 Adapter is very hot, please take "
8785 "corrective action. temperature : %d Celsius\n",
8786 (uint32_t) icmd->ulpContext);
8787 } else {
8788 temp_event_data.event_code = LPFC_NORMAL_TEMP;
8789 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
8790 "0340 Adapter temperature is OK now. "
8791 "temperature : %d Celsius\n",
8792 (uint32_t) icmd->ulpContext);
8793 }
8794
8795 /* Send temperature change event to applications */
8796 shost = lpfc_shost_from_vport(phba->pport);
8797 fc_host_post_vendor_event(shost, fc_get_event_number(),
8798 sizeof(temp_event_data), (char *) &temp_event_data,
8799 LPFC_NL_VENDOR_ID);
8800 break;
8801 case ASYNC_STATUS_CN:
8802 lpfc_sli_abts_err_handler(phba, iocbq);
8803 break;
8804 default:
8805 iocb_w = (uint32_t *) icmd;
8806 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8807 "0346 Ring %d handler: unexpected ASYNC_STATUS"
8808 " evt_code 0x%x\n"
8809 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
8810 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
8811 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
8812 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
8813 pring->ringno, icmd->un.asyncstat.evt_code,
8814 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
8815 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
8816 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
8817 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
8818
8819 break;
8820 }
8821 }
8822
8823
8824 /**
8825 * lpfc_sli_setup - SLI ring setup function
8826 * @phba: Pointer to HBA context object.
8827 *
8828 * lpfc_sli_setup sets up rings of the SLI interface with
8829 * number of iocbs per ring and iotags. This function is
8830 * called while driver attach to the HBA and before the
8831 * interrupts are enabled. So there is no need for locking.
8832 *
8833 * This function always returns 0.
8834 **/
8835 int
lpfc_sli_setup(struct lpfc_hba * phba)8836 lpfc_sli_setup(struct lpfc_hba *phba)
8837 {
8838 int i, totiocbsize = 0;
8839 struct lpfc_sli *psli = &phba->sli;
8840 struct lpfc_sli_ring *pring;
8841
8842 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
8843 if (phba->sli_rev == LPFC_SLI_REV4)
8844 psli->num_rings += phba->cfg_fcp_io_channel;
8845 psli->sli_flag = 0;
8846 psli->fcp_ring = LPFC_FCP_RING;
8847 psli->next_ring = LPFC_FCP_NEXT_RING;
8848 psli->extra_ring = LPFC_EXTRA_RING;
8849
8850 psli->iocbq_lookup = NULL;
8851 psli->iocbq_lookup_len = 0;
8852 psli->last_iotag = 0;
8853
8854 for (i = 0; i < psli->num_rings; i++) {
8855 pring = &psli->ring[i];
8856 switch (i) {
8857 case LPFC_FCP_RING: /* ring 0 - FCP */
8858 /* numCiocb and numRiocb are used in config_port */
8859 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
8860 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
8861 pring->sli.sli3.numCiocb +=
8862 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8863 pring->sli.sli3.numRiocb +=
8864 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8865 pring->sli.sli3.numCiocb +=
8866 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8867 pring->sli.sli3.numRiocb +=
8868 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8869 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8870 SLI3_IOCB_CMD_SIZE :
8871 SLI2_IOCB_CMD_SIZE;
8872 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8873 SLI3_IOCB_RSP_SIZE :
8874 SLI2_IOCB_RSP_SIZE;
8875 pring->iotag_ctr = 0;
8876 pring->iotag_max =
8877 (phba->cfg_hba_queue_depth * 2);
8878 pring->fast_iotag = pring->iotag_max;
8879 pring->num_mask = 0;
8880 break;
8881 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
8882 /* numCiocb and numRiocb are used in config_port */
8883 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
8884 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
8885 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8886 SLI3_IOCB_CMD_SIZE :
8887 SLI2_IOCB_CMD_SIZE;
8888 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8889 SLI3_IOCB_RSP_SIZE :
8890 SLI2_IOCB_RSP_SIZE;
8891 pring->iotag_max = phba->cfg_hba_queue_depth;
8892 pring->num_mask = 0;
8893 break;
8894 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
8895 /* numCiocb and numRiocb are used in config_port */
8896 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
8897 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
8898 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8899 SLI3_IOCB_CMD_SIZE :
8900 SLI2_IOCB_CMD_SIZE;
8901 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8902 SLI3_IOCB_RSP_SIZE :
8903 SLI2_IOCB_RSP_SIZE;
8904 pring->fast_iotag = 0;
8905 pring->iotag_ctr = 0;
8906 pring->iotag_max = 4096;
8907 pring->lpfc_sli_rcv_async_status =
8908 lpfc_sli_async_event_handler;
8909 pring->num_mask = LPFC_MAX_RING_MASK;
8910 pring->prt[0].profile = 0; /* Mask 0 */
8911 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
8912 pring->prt[0].type = FC_TYPE_ELS;
8913 pring->prt[0].lpfc_sli_rcv_unsol_event =
8914 lpfc_els_unsol_event;
8915 pring->prt[1].profile = 0; /* Mask 1 */
8916 pring->prt[1].rctl = FC_RCTL_ELS_REP;
8917 pring->prt[1].type = FC_TYPE_ELS;
8918 pring->prt[1].lpfc_sli_rcv_unsol_event =
8919 lpfc_els_unsol_event;
8920 pring->prt[2].profile = 0; /* Mask 2 */
8921 /* NameServer Inquiry */
8922 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
8923 /* NameServer */
8924 pring->prt[2].type = FC_TYPE_CT;
8925 pring->prt[2].lpfc_sli_rcv_unsol_event =
8926 lpfc_ct_unsol_event;
8927 pring->prt[3].profile = 0; /* Mask 3 */
8928 /* NameServer response */
8929 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
8930 /* NameServer */
8931 pring->prt[3].type = FC_TYPE_CT;
8932 pring->prt[3].lpfc_sli_rcv_unsol_event =
8933 lpfc_ct_unsol_event;
8934 break;
8935 }
8936 totiocbsize += (pring->sli.sli3.numCiocb *
8937 pring->sli.sli3.sizeCiocb) +
8938 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
8939 }
8940 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
8941 /* Too many cmd / rsp ring entries in SLI2 SLIM */
8942 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
8943 "SLI2 SLIM Data: x%x x%lx\n",
8944 phba->brd_no, totiocbsize,
8945 (unsigned long) MAX_SLIM_IOCB_SIZE);
8946 }
8947 if (phba->cfg_multi_ring_support == 2)
8948 lpfc_extra_ring_setup(phba);
8949
8950 return 0;
8951 }
8952
8953 /**
8954 * lpfc_sli_queue_setup - Queue initialization function
8955 * @phba: Pointer to HBA context object.
8956 *
8957 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
8958 * ring. This function also initializes ring indices of each ring.
8959 * This function is called during the initialization of the SLI
8960 * interface of an HBA.
8961 * This function is called with no lock held and always returns
8962 * 1.
8963 **/
8964 int
lpfc_sli_queue_setup(struct lpfc_hba * phba)8965 lpfc_sli_queue_setup(struct lpfc_hba *phba)
8966 {
8967 struct lpfc_sli *psli;
8968 struct lpfc_sli_ring *pring;
8969 int i;
8970
8971 psli = &phba->sli;
8972 spin_lock_irq(&phba->hbalock);
8973 INIT_LIST_HEAD(&psli->mboxq);
8974 INIT_LIST_HEAD(&psli->mboxq_cmpl);
8975 /* Initialize list headers for txq and txcmplq as double linked lists */
8976 for (i = 0; i < psli->num_rings; i++) {
8977 pring = &psli->ring[i];
8978 pring->ringno = i;
8979 pring->sli.sli3.next_cmdidx = 0;
8980 pring->sli.sli3.local_getidx = 0;
8981 pring->sli.sli3.cmdidx = 0;
8982 INIT_LIST_HEAD(&pring->txq);
8983 INIT_LIST_HEAD(&pring->txcmplq);
8984 INIT_LIST_HEAD(&pring->iocb_continueq);
8985 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
8986 INIT_LIST_HEAD(&pring->postbufq);
8987 spin_lock_init(&pring->ring_lock);
8988 }
8989 spin_unlock_irq(&phba->hbalock);
8990 return 1;
8991 }
8992
8993 /**
8994 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
8995 * @phba: Pointer to HBA context object.
8996 *
8997 * This routine flushes the mailbox command subsystem. It will unconditionally
8998 * flush all the mailbox commands in the three possible stages in the mailbox
8999 * command sub-system: pending mailbox command queue; the outstanding mailbox
9000 * command; and completed mailbox command queue. It is caller's responsibility
9001 * to make sure that the driver is in the proper state to flush the mailbox
9002 * command sub-system. Namely, the posting of mailbox commands into the
9003 * pending mailbox command queue from the various clients must be stopped;
9004 * either the HBA is in a state that it will never works on the outstanding
9005 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
9006 * mailbox command has been completed.
9007 **/
9008 static void
lpfc_sli_mbox_sys_flush(struct lpfc_hba * phba)9009 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
9010 {
9011 LIST_HEAD(completions);
9012 struct lpfc_sli *psli = &phba->sli;
9013 LPFC_MBOXQ_t *pmb;
9014 unsigned long iflag;
9015
9016 /* Flush all the mailbox commands in the mbox system */
9017 spin_lock_irqsave(&phba->hbalock, iflag);
9018 /* The pending mailbox command queue */
9019 list_splice_init(&phba->sli.mboxq, &completions);
9020 /* The outstanding active mailbox command */
9021 if (psli->mbox_active) {
9022 list_add_tail(&psli->mbox_active->list, &completions);
9023 psli->mbox_active = NULL;
9024 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9025 }
9026 /* The completed mailbox command queue */
9027 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
9028 spin_unlock_irqrestore(&phba->hbalock, iflag);
9029
9030 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
9031 while (!list_empty(&completions)) {
9032 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
9033 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
9034 if (pmb->mbox_cmpl)
9035 pmb->mbox_cmpl(phba, pmb);
9036 }
9037 }
9038
9039 /**
9040 * lpfc_sli_host_down - Vport cleanup function
9041 * @vport: Pointer to virtual port object.
9042 *
9043 * lpfc_sli_host_down is called to clean up the resources
9044 * associated with a vport before destroying virtual
9045 * port data structures.
9046 * This function does following operations:
9047 * - Free discovery resources associated with this virtual
9048 * port.
9049 * - Free iocbs associated with this virtual port in
9050 * the txq.
9051 * - Send abort for all iocb commands associated with this
9052 * vport in txcmplq.
9053 *
9054 * This function is called with no lock held and always returns 1.
9055 **/
9056 int
lpfc_sli_host_down(struct lpfc_vport * vport)9057 lpfc_sli_host_down(struct lpfc_vport *vport)
9058 {
9059 LIST_HEAD(completions);
9060 struct lpfc_hba *phba = vport->phba;
9061 struct lpfc_sli *psli = &phba->sli;
9062 struct lpfc_sli_ring *pring;
9063 struct lpfc_iocbq *iocb, *next_iocb;
9064 int i;
9065 unsigned long flags = 0;
9066 uint16_t prev_pring_flag;
9067
9068 lpfc_cleanup_discovery_resources(vport);
9069
9070 spin_lock_irqsave(&phba->hbalock, flags);
9071 for (i = 0; i < psli->num_rings; i++) {
9072 pring = &psli->ring[i];
9073 prev_pring_flag = pring->flag;
9074 /* Only slow rings */
9075 if (pring->ringno == LPFC_ELS_RING) {
9076 pring->flag |= LPFC_DEFERRED_RING_EVENT;
9077 /* Set the lpfc data pending flag */
9078 set_bit(LPFC_DATA_READY, &phba->data_flags);
9079 }
9080 /*
9081 * Error everything on the txq since these iocbs have not been
9082 * given to the FW yet.
9083 */
9084 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
9085 if (iocb->vport != vport)
9086 continue;
9087 list_move_tail(&iocb->list, &completions);
9088 }
9089
9090 /* Next issue ABTS for everything on the txcmplq */
9091 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
9092 list) {
9093 if (iocb->vport != vport)
9094 continue;
9095 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
9096 }
9097
9098 pring->flag = prev_pring_flag;
9099 }
9100
9101 spin_unlock_irqrestore(&phba->hbalock, flags);
9102
9103 /* Cancel all the IOCBs from the completions list */
9104 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9105 IOERR_SLI_DOWN);
9106 return 1;
9107 }
9108
9109 /**
9110 * lpfc_sli_hba_down - Resource cleanup function for the HBA
9111 * @phba: Pointer to HBA context object.
9112 *
9113 * This function cleans up all iocb, buffers, mailbox commands
9114 * while shutting down the HBA. This function is called with no
9115 * lock held and always returns 1.
9116 * This function does the following to cleanup driver resources:
9117 * - Free discovery resources for each virtual port
9118 * - Cleanup any pending fabric iocbs
9119 * - Iterate through the iocb txq and free each entry
9120 * in the list.
9121 * - Free up any buffer posted to the HBA
9122 * - Free mailbox commands in the mailbox queue.
9123 **/
9124 int
lpfc_sli_hba_down(struct lpfc_hba * phba)9125 lpfc_sli_hba_down(struct lpfc_hba *phba)
9126 {
9127 LIST_HEAD(completions);
9128 struct lpfc_sli *psli = &phba->sli;
9129 struct lpfc_sli_ring *pring;
9130 struct lpfc_dmabuf *buf_ptr;
9131 unsigned long flags = 0;
9132 int i;
9133
9134 /* Shutdown the mailbox command sub-system */
9135 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
9136
9137 lpfc_hba_down_prep(phba);
9138
9139 lpfc_fabric_abort_hba(phba);
9140
9141 spin_lock_irqsave(&phba->hbalock, flags);
9142 for (i = 0; i < psli->num_rings; i++) {
9143 pring = &psli->ring[i];
9144 /* Only slow rings */
9145 if (pring->ringno == LPFC_ELS_RING) {
9146 pring->flag |= LPFC_DEFERRED_RING_EVENT;
9147 /* Set the lpfc data pending flag */
9148 set_bit(LPFC_DATA_READY, &phba->data_flags);
9149 }
9150
9151 /*
9152 * Error everything on the txq since these iocbs have not been
9153 * given to the FW yet.
9154 */
9155 list_splice_init(&pring->txq, &completions);
9156 }
9157 spin_unlock_irqrestore(&phba->hbalock, flags);
9158
9159 /* Cancel all the IOCBs from the completions list */
9160 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9161 IOERR_SLI_DOWN);
9162
9163 spin_lock_irqsave(&phba->hbalock, flags);
9164 list_splice_init(&phba->elsbuf, &completions);
9165 phba->elsbuf_cnt = 0;
9166 phba->elsbuf_prev_cnt = 0;
9167 spin_unlock_irqrestore(&phba->hbalock, flags);
9168
9169 while (!list_empty(&completions)) {
9170 list_remove_head(&completions, buf_ptr,
9171 struct lpfc_dmabuf, list);
9172 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
9173 kfree(buf_ptr);
9174 }
9175
9176 /* Return any active mbox cmds */
9177 del_timer_sync(&psli->mbox_tmo);
9178
9179 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
9180 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9181 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
9182
9183 return 1;
9184 }
9185
9186 /**
9187 * lpfc_sli_pcimem_bcopy - SLI memory copy function
9188 * @srcp: Source memory pointer.
9189 * @destp: Destination memory pointer.
9190 * @cnt: Number of words required to be copied.
9191 *
9192 * This function is used for copying data between driver memory
9193 * and the SLI memory. This function also changes the endianness
9194 * of each word if native endianness is different from SLI
9195 * endianness. This function can be called with or without
9196 * lock.
9197 **/
9198 void
lpfc_sli_pcimem_bcopy(void * srcp,void * destp,uint32_t cnt)9199 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
9200 {
9201 uint32_t *src = srcp;
9202 uint32_t *dest = destp;
9203 uint32_t ldata;
9204 int i;
9205
9206 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
9207 ldata = *src;
9208 ldata = le32_to_cpu(ldata);
9209 *dest = ldata;
9210 src++;
9211 dest++;
9212 }
9213 }
9214
9215
9216 /**
9217 * lpfc_sli_bemem_bcopy - SLI memory copy function
9218 * @srcp: Source memory pointer.
9219 * @destp: Destination memory pointer.
9220 * @cnt: Number of words required to be copied.
9221 *
9222 * This function is used for copying data between a data structure
9223 * with big endian representation to local endianness.
9224 * This function can be called with or without lock.
9225 **/
9226 void
lpfc_sli_bemem_bcopy(void * srcp,void * destp,uint32_t cnt)9227 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
9228 {
9229 uint32_t *src = srcp;
9230 uint32_t *dest = destp;
9231 uint32_t ldata;
9232 int i;
9233
9234 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
9235 ldata = *src;
9236 ldata = be32_to_cpu(ldata);
9237 *dest = ldata;
9238 src++;
9239 dest++;
9240 }
9241 }
9242
9243 /**
9244 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
9245 * @phba: Pointer to HBA context object.
9246 * @pring: Pointer to driver SLI ring object.
9247 * @mp: Pointer to driver buffer object.
9248 *
9249 * This function is called with no lock held.
9250 * It always return zero after adding the buffer to the postbufq
9251 * buffer list.
9252 **/
9253 int
lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_dmabuf * mp)9254 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9255 struct lpfc_dmabuf *mp)
9256 {
9257 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
9258 later */
9259 spin_lock_irq(&phba->hbalock);
9260 list_add_tail(&mp->list, &pring->postbufq);
9261 pring->postbufq_cnt++;
9262 spin_unlock_irq(&phba->hbalock);
9263 return 0;
9264 }
9265
9266 /**
9267 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
9268 * @phba: Pointer to HBA context object.
9269 *
9270 * When HBQ is enabled, buffers are searched based on tags. This function
9271 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
9272 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
9273 * does not conflict with tags of buffer posted for unsolicited events.
9274 * The function returns the allocated tag. The function is called with
9275 * no locks held.
9276 **/
9277 uint32_t
lpfc_sli_get_buffer_tag(struct lpfc_hba * phba)9278 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
9279 {
9280 spin_lock_irq(&phba->hbalock);
9281 phba->buffer_tag_count++;
9282 /*
9283 * Always set the QUE_BUFTAG_BIT to distiguish between
9284 * a tag assigned by HBQ.
9285 */
9286 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
9287 spin_unlock_irq(&phba->hbalock);
9288 return phba->buffer_tag_count;
9289 }
9290
9291 /**
9292 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
9293 * @phba: Pointer to HBA context object.
9294 * @pring: Pointer to driver SLI ring object.
9295 * @tag: Buffer tag.
9296 *
9297 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
9298 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
9299 * iocb is posted to the response ring with the tag of the buffer.
9300 * This function searches the pring->postbufq list using the tag
9301 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
9302 * iocb. If the buffer is found then lpfc_dmabuf object of the
9303 * buffer is returned to the caller else NULL is returned.
9304 * This function is called with no lock held.
9305 **/
9306 struct lpfc_dmabuf *
lpfc_sli_ring_taggedbuf_get(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t tag)9307 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9308 uint32_t tag)
9309 {
9310 struct lpfc_dmabuf *mp, *next_mp;
9311 struct list_head *slp = &pring->postbufq;
9312
9313 /* Search postbufq, from the beginning, looking for a match on tag */
9314 spin_lock_irq(&phba->hbalock);
9315 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9316 if (mp->buffer_tag == tag) {
9317 list_del_init(&mp->list);
9318 pring->postbufq_cnt--;
9319 spin_unlock_irq(&phba->hbalock);
9320 return mp;
9321 }
9322 }
9323
9324 spin_unlock_irq(&phba->hbalock);
9325 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9326 "0402 Cannot find virtual addr for buffer tag on "
9327 "ring %d Data x%lx x%p x%p x%x\n",
9328 pring->ringno, (unsigned long) tag,
9329 slp->next, slp->prev, pring->postbufq_cnt);
9330
9331 return NULL;
9332 }
9333
9334 /**
9335 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
9336 * @phba: Pointer to HBA context object.
9337 * @pring: Pointer to driver SLI ring object.
9338 * @phys: DMA address of the buffer.
9339 *
9340 * This function searches the buffer list using the dma_address
9341 * of unsolicited event to find the driver's lpfc_dmabuf object
9342 * corresponding to the dma_address. The function returns the
9343 * lpfc_dmabuf object if a buffer is found else it returns NULL.
9344 * This function is called by the ct and els unsolicited event
9345 * handlers to get the buffer associated with the unsolicited
9346 * event.
9347 *
9348 * This function is called with no lock held.
9349 **/
9350 struct lpfc_dmabuf *
lpfc_sli_ringpostbuf_get(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,dma_addr_t phys)9351 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9352 dma_addr_t phys)
9353 {
9354 struct lpfc_dmabuf *mp, *next_mp;
9355 struct list_head *slp = &pring->postbufq;
9356
9357 /* Search postbufq, from the beginning, looking for a match on phys */
9358 spin_lock_irq(&phba->hbalock);
9359 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9360 if (mp->phys == phys) {
9361 list_del_init(&mp->list);
9362 pring->postbufq_cnt--;
9363 spin_unlock_irq(&phba->hbalock);
9364 return mp;
9365 }
9366 }
9367
9368 spin_unlock_irq(&phba->hbalock);
9369 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9370 "0410 Cannot find virtual addr for mapped buf on "
9371 "ring %d Data x%llx x%p x%p x%x\n",
9372 pring->ringno, (unsigned long long)phys,
9373 slp->next, slp->prev, pring->postbufq_cnt);
9374 return NULL;
9375 }
9376
9377 /**
9378 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
9379 * @phba: Pointer to HBA context object.
9380 * @cmdiocb: Pointer to driver command iocb object.
9381 * @rspiocb: Pointer to driver response iocb object.
9382 *
9383 * This function is the completion handler for the abort iocbs for
9384 * ELS commands. This function is called from the ELS ring event
9385 * handler with no lock held. This function frees memory resources
9386 * associated with the abort iocb.
9387 **/
9388 static void
lpfc_sli_abort_els_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)9389 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9390 struct lpfc_iocbq *rspiocb)
9391 {
9392 IOCB_t *irsp = &rspiocb->iocb;
9393 uint16_t abort_iotag, abort_context;
9394 struct lpfc_iocbq *abort_iocb = NULL;
9395
9396 if (irsp->ulpStatus) {
9397
9398 /*
9399 * Assume that the port already completed and returned, or
9400 * will return the iocb. Just Log the message.
9401 */
9402 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
9403 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
9404
9405 spin_lock_irq(&phba->hbalock);
9406 if (phba->sli_rev < LPFC_SLI_REV4) {
9407 if (abort_iotag != 0 &&
9408 abort_iotag <= phba->sli.last_iotag)
9409 abort_iocb =
9410 phba->sli.iocbq_lookup[abort_iotag];
9411 } else
9412 /* For sli4 the abort_tag is the XRI,
9413 * so the abort routine puts the iotag of the iocb
9414 * being aborted in the context field of the abort
9415 * IOCB.
9416 */
9417 abort_iocb = phba->sli.iocbq_lookup[abort_context];
9418
9419 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
9420 "0327 Cannot abort els iocb %p "
9421 "with tag %x context %x, abort status %x, "
9422 "abort code %x\n",
9423 abort_iocb, abort_iotag, abort_context,
9424 irsp->ulpStatus, irsp->un.ulpWord[4]);
9425
9426 spin_unlock_irq(&phba->hbalock);
9427 }
9428 lpfc_sli_release_iocbq(phba, cmdiocb);
9429 return;
9430 }
9431
9432 /**
9433 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
9434 * @phba: Pointer to HBA context object.
9435 * @cmdiocb: Pointer to driver command iocb object.
9436 * @rspiocb: Pointer to driver response iocb object.
9437 *
9438 * The function is called from SLI ring event handler with no
9439 * lock held. This function is the completion handler for ELS commands
9440 * which are aborted. The function frees memory resources used for
9441 * the aborted ELS commands.
9442 **/
9443 static void
lpfc_ignore_els_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)9444 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9445 struct lpfc_iocbq *rspiocb)
9446 {
9447 IOCB_t *irsp = &rspiocb->iocb;
9448
9449 /* ELS cmd tag <ulpIoTag> completes */
9450 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
9451 "0139 Ignoring ELS cmd tag x%x completion Data: "
9452 "x%x x%x x%x\n",
9453 irsp->ulpIoTag, irsp->ulpStatus,
9454 irsp->un.ulpWord[4], irsp->ulpTimeout);
9455 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
9456 lpfc_ct_free_iocb(phba, cmdiocb);
9457 else
9458 lpfc_els_free_iocb(phba, cmdiocb);
9459 return;
9460 }
9461
9462 /**
9463 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
9464 * @phba: Pointer to HBA context object.
9465 * @pring: Pointer to driver SLI ring object.
9466 * @cmdiocb: Pointer to driver command iocb object.
9467 *
9468 * This function issues an abort iocb for the provided command iocb down to
9469 * the port. Other than the case the outstanding command iocb is an abort
9470 * request, this function issues abort out unconditionally. This function is
9471 * called with hbalock held. The function returns 0 when it fails due to
9472 * memory allocation failure or when the command iocb is an abort request.
9473 **/
9474 static int
lpfc_sli_abort_iotag_issue(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * cmdiocb)9475 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9476 struct lpfc_iocbq *cmdiocb)
9477 {
9478 struct lpfc_vport *vport = cmdiocb->vport;
9479 struct lpfc_iocbq *abtsiocbp;
9480 IOCB_t *icmd = NULL;
9481 IOCB_t *iabt = NULL;
9482 int retval;
9483 unsigned long iflags;
9484
9485 /*
9486 * There are certain command types we don't want to abort. And we
9487 * don't want to abort commands that are already in the process of
9488 * being aborted.
9489 */
9490 icmd = &cmdiocb->iocb;
9491 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
9492 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9493 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
9494 return 0;
9495
9496 /* issue ABTS for this IOCB based on iotag */
9497 abtsiocbp = __lpfc_sli_get_iocbq(phba);
9498 if (abtsiocbp == NULL)
9499 return 0;
9500
9501 /* This signals the response to set the correct status
9502 * before calling the completion handler
9503 */
9504 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
9505
9506 iabt = &abtsiocbp->iocb;
9507 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
9508 iabt->un.acxri.abortContextTag = icmd->ulpContext;
9509 if (phba->sli_rev == LPFC_SLI_REV4) {
9510 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
9511 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
9512 }
9513 else
9514 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
9515 iabt->ulpLe = 1;
9516 iabt->ulpClass = icmd->ulpClass;
9517
9518 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9519 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
9520 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
9521 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
9522
9523 if (phba->link_state >= LPFC_LINK_UP)
9524 iabt->ulpCommand = CMD_ABORT_XRI_CN;
9525 else
9526 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
9527
9528 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
9529
9530 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
9531 "0339 Abort xri x%x, original iotag x%x, "
9532 "abort cmd iotag x%x\n",
9533 iabt->un.acxri.abortIoTag,
9534 iabt->un.acxri.abortContextTag,
9535 abtsiocbp->iotag);
9536
9537 if (phba->sli_rev == LPFC_SLI_REV4) {
9538 /* Note: both hbalock and ring_lock need to be set here */
9539 spin_lock_irqsave(&pring->ring_lock, iflags);
9540 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9541 abtsiocbp, 0);
9542 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9543 } else {
9544 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9545 abtsiocbp, 0);
9546 }
9547
9548 if (retval)
9549 __lpfc_sli_release_iocbq(phba, abtsiocbp);
9550
9551 /*
9552 * Caller to this routine should check for IOCB_ERROR
9553 * and handle it properly. This routine no longer removes
9554 * iocb off txcmplq and call compl in case of IOCB_ERROR.
9555 */
9556 return retval;
9557 }
9558
9559 /**
9560 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
9561 * @phba: Pointer to HBA context object.
9562 * @pring: Pointer to driver SLI ring object.
9563 * @cmdiocb: Pointer to driver command iocb object.
9564 *
9565 * This function issues an abort iocb for the provided command iocb. In case
9566 * of unloading, the abort iocb will not be issued to commands on the ELS
9567 * ring. Instead, the callback function shall be changed to those commands
9568 * so that nothing happens when them finishes. This function is called with
9569 * hbalock held. The function returns 0 when the command iocb is an abort
9570 * request.
9571 **/
9572 int
lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * cmdiocb)9573 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9574 struct lpfc_iocbq *cmdiocb)
9575 {
9576 struct lpfc_vport *vport = cmdiocb->vport;
9577 int retval = IOCB_ERROR;
9578 IOCB_t *icmd = NULL;
9579
9580 /*
9581 * There are certain command types we don't want to abort. And we
9582 * don't want to abort commands that are already in the process of
9583 * being aborted.
9584 */
9585 icmd = &cmdiocb->iocb;
9586 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
9587 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9588 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
9589 return 0;
9590
9591 /*
9592 * If we're unloading, don't abort iocb on the ELS ring, but change
9593 * the callback so that nothing happens when it finishes.
9594 */
9595 if ((vport->load_flag & FC_UNLOADING) &&
9596 (pring->ringno == LPFC_ELS_RING)) {
9597 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
9598 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
9599 else
9600 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
9601 goto abort_iotag_exit;
9602 }
9603
9604 /* Now, we try to issue the abort to the cmdiocb out */
9605 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
9606
9607 abort_iotag_exit:
9608 /*
9609 * Caller to this routine should check for IOCB_ERROR
9610 * and handle it properly. This routine no longer removes
9611 * iocb off txcmplq and call compl in case of IOCB_ERROR.
9612 */
9613 return retval;
9614 }
9615
9616 /**
9617 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
9618 * @phba: Pointer to HBA context object.
9619 * @pring: Pointer to driver SLI ring object.
9620 *
9621 * This function aborts all iocbs in the given ring and frees all the iocb
9622 * objects in txq. This function issues abort iocbs unconditionally for all
9623 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
9624 * to complete before the return of this function. The caller is not required
9625 * to hold any locks.
9626 **/
9627 static void
lpfc_sli_iocb_ring_abort(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)9628 lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
9629 {
9630 LIST_HEAD(completions);
9631 struct lpfc_iocbq *iocb, *next_iocb;
9632
9633 if (pring->ringno == LPFC_ELS_RING)
9634 lpfc_fabric_abort_hba(phba);
9635
9636 spin_lock_irq(&phba->hbalock);
9637
9638 /* Take off all the iocbs on txq for cancelling */
9639 list_splice_init(&pring->txq, &completions);
9640 pring->txq_cnt = 0;
9641
9642 /* Next issue ABTS for everything on the txcmplq */
9643 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
9644 lpfc_sli_abort_iotag_issue(phba, pring, iocb);
9645
9646 spin_unlock_irq(&phba->hbalock);
9647
9648 /* Cancel all the IOCBs from the completions list */
9649 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9650 IOERR_SLI_ABORTED);
9651 }
9652
9653 /**
9654 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
9655 * @phba: pointer to lpfc HBA data structure.
9656 *
9657 * This routine will abort all pending and outstanding iocbs to an HBA.
9658 **/
9659 void
lpfc_sli_hba_iocb_abort(struct lpfc_hba * phba)9660 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
9661 {
9662 struct lpfc_sli *psli = &phba->sli;
9663 struct lpfc_sli_ring *pring;
9664 int i;
9665
9666 for (i = 0; i < psli->num_rings; i++) {
9667 pring = &psli->ring[i];
9668 lpfc_sli_iocb_ring_abort(phba, pring);
9669 }
9670 }
9671
9672 /**
9673 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
9674 * @iocbq: Pointer to driver iocb object.
9675 * @vport: Pointer to driver virtual port object.
9676 * @tgt_id: SCSI ID of the target.
9677 * @lun_id: LUN ID of the scsi device.
9678 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
9679 *
9680 * This function acts as an iocb filter for functions which abort or count
9681 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
9682 * 0 if the filtering criteria is met for the given iocb and will return
9683 * 1 if the filtering criteria is not met.
9684 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
9685 * given iocb is for the SCSI device specified by vport, tgt_id and
9686 * lun_id parameter.
9687 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
9688 * given iocb is for the SCSI target specified by vport and tgt_id
9689 * parameters.
9690 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
9691 * given iocb is for the SCSI host associated with the given vport.
9692 * This function is called with no locks held.
9693 **/
9694 static int
lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq * iocbq,struct lpfc_vport * vport,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd ctx_cmd)9695 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
9696 uint16_t tgt_id, uint64_t lun_id,
9697 lpfc_ctx_cmd ctx_cmd)
9698 {
9699 struct lpfc_scsi_buf *lpfc_cmd;
9700 int rc = 1;
9701
9702 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
9703 return rc;
9704
9705 if (iocbq->vport != vport)
9706 return rc;
9707
9708 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
9709
9710 if (lpfc_cmd->pCmd == NULL)
9711 return rc;
9712
9713 switch (ctx_cmd) {
9714 case LPFC_CTX_LUN:
9715 if ((lpfc_cmd->rdata->pnode) &&
9716 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
9717 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
9718 rc = 0;
9719 break;
9720 case LPFC_CTX_TGT:
9721 if ((lpfc_cmd->rdata->pnode) &&
9722 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
9723 rc = 0;
9724 break;
9725 case LPFC_CTX_HOST:
9726 rc = 0;
9727 break;
9728 default:
9729 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
9730 __func__, ctx_cmd);
9731 break;
9732 }
9733
9734 return rc;
9735 }
9736
9737 /**
9738 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
9739 * @vport: Pointer to virtual port.
9740 * @tgt_id: SCSI ID of the target.
9741 * @lun_id: LUN ID of the scsi device.
9742 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9743 *
9744 * This function returns number of FCP commands pending for the vport.
9745 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
9746 * commands pending on the vport associated with SCSI device specified
9747 * by tgt_id and lun_id parameters.
9748 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
9749 * commands pending on the vport associated with SCSI target specified
9750 * by tgt_id parameter.
9751 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
9752 * commands pending on the vport.
9753 * This function returns the number of iocbs which satisfy the filter.
9754 * This function is called without any lock held.
9755 **/
9756 int
lpfc_sli_sum_iocb(struct lpfc_vport * vport,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd ctx_cmd)9757 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
9758 lpfc_ctx_cmd ctx_cmd)
9759 {
9760 struct lpfc_hba *phba = vport->phba;
9761 struct lpfc_iocbq *iocbq;
9762 int sum, i;
9763
9764 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
9765 iocbq = phba->sli.iocbq_lookup[i];
9766
9767 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
9768 ctx_cmd) == 0)
9769 sum++;
9770 }
9771
9772 return sum;
9773 }
9774
9775 /**
9776 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
9777 * @phba: Pointer to HBA context object
9778 * @cmdiocb: Pointer to command iocb object.
9779 * @rspiocb: Pointer to response iocb object.
9780 *
9781 * This function is called when an aborted FCP iocb completes. This
9782 * function is called by the ring event handler with no lock held.
9783 * This function frees the iocb.
9784 **/
9785 void
lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)9786 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9787 struct lpfc_iocbq *rspiocb)
9788 {
9789 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9790 "3096 ABORT_XRI_CN completing on xri x%x "
9791 "original iotag x%x, abort cmd iotag x%x "
9792 "status 0x%x, reason 0x%x\n",
9793 cmdiocb->iocb.un.acxri.abortContextTag,
9794 cmdiocb->iocb.un.acxri.abortIoTag,
9795 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
9796 rspiocb->iocb.un.ulpWord[4]);
9797 lpfc_sli_release_iocbq(phba, cmdiocb);
9798 return;
9799 }
9800
9801 /**
9802 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
9803 * @vport: Pointer to virtual port.
9804 * @pring: Pointer to driver SLI ring object.
9805 * @tgt_id: SCSI ID of the target.
9806 * @lun_id: LUN ID of the scsi device.
9807 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9808 *
9809 * This function sends an abort command for every SCSI command
9810 * associated with the given virtual port pending on the ring
9811 * filtered by lpfc_sli_validate_fcp_iocb function.
9812 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
9813 * FCP iocbs associated with lun specified by tgt_id and lun_id
9814 * parameters
9815 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
9816 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
9817 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
9818 * FCP iocbs associated with virtual port.
9819 * This function returns number of iocbs it failed to abort.
9820 * This function is called with no locks held.
9821 **/
9822 int
lpfc_sli_abort_iocb(struct lpfc_vport * vport,struct lpfc_sli_ring * pring,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd abort_cmd)9823 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
9824 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
9825 {
9826 struct lpfc_hba *phba = vport->phba;
9827 struct lpfc_iocbq *iocbq;
9828 struct lpfc_iocbq *abtsiocb;
9829 IOCB_t *cmd = NULL;
9830 int errcnt = 0, ret_val = 0;
9831 int i;
9832
9833 for (i = 1; i <= phba->sli.last_iotag; i++) {
9834 iocbq = phba->sli.iocbq_lookup[i];
9835
9836 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
9837 abort_cmd) != 0)
9838 continue;
9839
9840 /* issue ABTS for this IOCB based on iotag */
9841 abtsiocb = lpfc_sli_get_iocbq(phba);
9842 if (abtsiocb == NULL) {
9843 errcnt++;
9844 continue;
9845 }
9846
9847 cmd = &iocbq->iocb;
9848 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
9849 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
9850 if (phba->sli_rev == LPFC_SLI_REV4)
9851 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
9852 else
9853 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
9854 abtsiocb->iocb.ulpLe = 1;
9855 abtsiocb->iocb.ulpClass = cmd->ulpClass;
9856 abtsiocb->vport = phba->pport;
9857
9858 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9859 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
9860 if (iocbq->iocb_flag & LPFC_IO_FCP)
9861 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
9862
9863 if (lpfc_is_link_up(phba))
9864 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
9865 else
9866 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
9867
9868 /* Setup callback routine and issue the command. */
9869 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
9870 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
9871 abtsiocb, 0);
9872 if (ret_val == IOCB_ERROR) {
9873 lpfc_sli_release_iocbq(phba, abtsiocb);
9874 errcnt++;
9875 continue;
9876 }
9877 }
9878
9879 return errcnt;
9880 }
9881
9882 /**
9883 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
9884 * @phba: Pointer to HBA context object.
9885 * @cmdiocbq: Pointer to command iocb.
9886 * @rspiocbq: Pointer to response iocb.
9887 *
9888 * This function is the completion handler for iocbs issued using
9889 * lpfc_sli_issue_iocb_wait function. This function is called by the
9890 * ring event handler function without any lock held. This function
9891 * can be called from both worker thread context and interrupt
9892 * context. This function also can be called from other thread which
9893 * cleans up the SLI layer objects.
9894 * This function copy the contents of the response iocb to the
9895 * response iocb memory object provided by the caller of
9896 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
9897 * sleeps for the iocb completion.
9898 **/
9899 static void
lpfc_sli_wake_iocb_wait(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)9900 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
9901 struct lpfc_iocbq *cmdiocbq,
9902 struct lpfc_iocbq *rspiocbq)
9903 {
9904 wait_queue_head_t *pdone_q;
9905 unsigned long iflags;
9906 struct lpfc_scsi_buf *lpfc_cmd;
9907
9908 spin_lock_irqsave(&phba->hbalock, iflags);
9909 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
9910 if (cmdiocbq->context2 && rspiocbq)
9911 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
9912 &rspiocbq->iocb, sizeof(IOCB_t));
9913
9914 /* Set the exchange busy flag for task management commands */
9915 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
9916 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
9917 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
9918 cur_iocbq);
9919 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
9920 }
9921
9922 pdone_q = cmdiocbq->context_un.wait_queue;
9923 if (pdone_q)
9924 wake_up(pdone_q);
9925 spin_unlock_irqrestore(&phba->hbalock, iflags);
9926 return;
9927 }
9928
9929 /**
9930 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
9931 * @phba: Pointer to HBA context object..
9932 * @piocbq: Pointer to command iocb.
9933 * @flag: Flag to test.
9934 *
9935 * This routine grabs the hbalock and then test the iocb_flag to
9936 * see if the passed in flag is set.
9937 * Returns:
9938 * 1 if flag is set.
9939 * 0 if flag is not set.
9940 **/
9941 static int
lpfc_chk_iocb_flg(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq,uint32_t flag)9942 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
9943 struct lpfc_iocbq *piocbq, uint32_t flag)
9944 {
9945 unsigned long iflags;
9946 int ret;
9947
9948 spin_lock_irqsave(&phba->hbalock, iflags);
9949 ret = piocbq->iocb_flag & flag;
9950 spin_unlock_irqrestore(&phba->hbalock, iflags);
9951 return ret;
9952
9953 }
9954
9955 /**
9956 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
9957 * @phba: Pointer to HBA context object..
9958 * @pring: Pointer to sli ring.
9959 * @piocb: Pointer to command iocb.
9960 * @prspiocbq: Pointer to response iocb.
9961 * @timeout: Timeout in number of seconds.
9962 *
9963 * This function issues the iocb to firmware and waits for the
9964 * iocb to complete. If the iocb command is not
9965 * completed within timeout seconds, it returns IOCB_TIMEDOUT.
9966 * Caller should not free the iocb resources if this function
9967 * returns IOCB_TIMEDOUT.
9968 * The function waits for the iocb completion using an
9969 * non-interruptible wait.
9970 * This function will sleep while waiting for iocb completion.
9971 * So, this function should not be called from any context which
9972 * does not allow sleeping. Due to the same reason, this function
9973 * cannot be called with interrupt disabled.
9974 * This function assumes that the iocb completions occur while
9975 * this function sleep. So, this function cannot be called from
9976 * the thread which process iocb completion for this ring.
9977 * This function clears the iocb_flag of the iocb object before
9978 * issuing the iocb and the iocb completion handler sets this
9979 * flag and wakes this thread when the iocb completes.
9980 * The contents of the response iocb will be copied to prspiocbq
9981 * by the completion handler when the command completes.
9982 * This function returns IOCB_SUCCESS when success.
9983 * This function is called with no lock held.
9984 **/
9985 int
lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,struct lpfc_iocbq * prspiocbq,uint32_t timeout)9986 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
9987 uint32_t ring_number,
9988 struct lpfc_iocbq *piocb,
9989 struct lpfc_iocbq *prspiocbq,
9990 uint32_t timeout)
9991 {
9992 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
9993 long timeleft, timeout_req = 0;
9994 int retval = IOCB_SUCCESS;
9995 uint32_t creg_val;
9996 struct lpfc_iocbq *iocb;
9997 int txq_cnt = 0;
9998 int txcmplq_cnt = 0;
9999 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
10000 /*
10001 * If the caller has provided a response iocbq buffer, then context2
10002 * is NULL or its an error.
10003 */
10004 if (prspiocbq) {
10005 if (piocb->context2)
10006 return IOCB_ERROR;
10007 piocb->context2 = prspiocbq;
10008 }
10009
10010 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
10011 piocb->context_un.wait_queue = &done_q;
10012 piocb->iocb_flag &= ~LPFC_IO_WAKE;
10013
10014 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
10015 if (lpfc_readl(phba->HCregaddr, &creg_val))
10016 return IOCB_ERROR;
10017 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
10018 writel(creg_val, phba->HCregaddr);
10019 readl(phba->HCregaddr); /* flush */
10020 }
10021
10022 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
10023 SLI_IOCB_RET_IOCB);
10024 if (retval == IOCB_SUCCESS) {
10025 timeout_req = msecs_to_jiffies(timeout * 1000);
10026 timeleft = wait_event_timeout(done_q,
10027 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
10028 timeout_req);
10029
10030 if (piocb->iocb_flag & LPFC_IO_WAKE) {
10031 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10032 "0331 IOCB wake signaled\n");
10033 } else if (timeleft == 0) {
10034 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10035 "0338 IOCB wait timeout error - no "
10036 "wake response Data x%x\n", timeout);
10037 retval = IOCB_TIMEDOUT;
10038 } else {
10039 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10040 "0330 IOCB wake NOT set, "
10041 "Data x%x x%lx\n",
10042 timeout, (timeleft / jiffies));
10043 retval = IOCB_TIMEDOUT;
10044 }
10045 } else if (retval == IOCB_BUSY) {
10046 if (phba->cfg_log_verbose & LOG_SLI) {
10047 list_for_each_entry(iocb, &pring->txq, list) {
10048 txq_cnt++;
10049 }
10050 list_for_each_entry(iocb, &pring->txcmplq, list) {
10051 txcmplq_cnt++;
10052 }
10053 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10054 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
10055 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
10056 }
10057 return retval;
10058 } else {
10059 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10060 "0332 IOCB wait issue failed, Data x%x\n",
10061 retval);
10062 retval = IOCB_ERROR;
10063 }
10064
10065 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
10066 if (lpfc_readl(phba->HCregaddr, &creg_val))
10067 return IOCB_ERROR;
10068 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
10069 writel(creg_val, phba->HCregaddr);
10070 readl(phba->HCregaddr); /* flush */
10071 }
10072
10073 if (prspiocbq)
10074 piocb->context2 = NULL;
10075
10076 piocb->context_un.wait_queue = NULL;
10077 piocb->iocb_cmpl = NULL;
10078 return retval;
10079 }
10080
10081 /**
10082 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
10083 * @phba: Pointer to HBA context object.
10084 * @pmboxq: Pointer to driver mailbox object.
10085 * @timeout: Timeout in number of seconds.
10086 *
10087 * This function issues the mailbox to firmware and waits for the
10088 * mailbox command to complete. If the mailbox command is not
10089 * completed within timeout seconds, it returns MBX_TIMEOUT.
10090 * The function waits for the mailbox completion using an
10091 * interruptible wait. If the thread is woken up due to a
10092 * signal, MBX_TIMEOUT error is returned to the caller. Caller
10093 * should not free the mailbox resources, if this function returns
10094 * MBX_TIMEOUT.
10095 * This function will sleep while waiting for mailbox completion.
10096 * So, this function should not be called from any context which
10097 * does not allow sleeping. Due to the same reason, this function
10098 * cannot be called with interrupt disabled.
10099 * This function assumes that the mailbox completion occurs while
10100 * this function sleep. So, this function cannot be called from
10101 * the worker thread which processes mailbox completion.
10102 * This function is called in the context of HBA management
10103 * applications.
10104 * This function returns MBX_SUCCESS when successful.
10105 * This function is called with no lock held.
10106 **/
10107 int
lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq,uint32_t timeout)10108 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
10109 uint32_t timeout)
10110 {
10111 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
10112 int retval;
10113 unsigned long flag;
10114
10115 /* The caller must leave context1 empty. */
10116 if (pmboxq->context1)
10117 return MBX_NOT_FINISHED;
10118
10119 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
10120 /* setup wake call as IOCB callback */
10121 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
10122 /* setup context field to pass wait_queue pointer to wake function */
10123 pmboxq->context1 = &done_q;
10124
10125 /* now issue the command */
10126 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
10127 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
10128 wait_event_interruptible_timeout(done_q,
10129 pmboxq->mbox_flag & LPFC_MBX_WAKE,
10130 msecs_to_jiffies(timeout * 1000));
10131
10132 spin_lock_irqsave(&phba->hbalock, flag);
10133 pmboxq->context1 = NULL;
10134 /*
10135 * if LPFC_MBX_WAKE flag is set the mailbox is completed
10136 * else do not free the resources.
10137 */
10138 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
10139 retval = MBX_SUCCESS;
10140 lpfc_sli4_swap_str(phba, pmboxq);
10141 } else {
10142 retval = MBX_TIMEOUT;
10143 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10144 }
10145 spin_unlock_irqrestore(&phba->hbalock, flag);
10146 }
10147
10148 return retval;
10149 }
10150
10151 /**
10152 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
10153 * @phba: Pointer to HBA context.
10154 *
10155 * This function is called to shutdown the driver's mailbox sub-system.
10156 * It first marks the mailbox sub-system is in a block state to prevent
10157 * the asynchronous mailbox command from issued off the pending mailbox
10158 * command queue. If the mailbox command sub-system shutdown is due to
10159 * HBA error conditions such as EEH or ERATT, this routine shall invoke
10160 * the mailbox sub-system flush routine to forcefully bring down the
10161 * mailbox sub-system. Otherwise, if it is due to normal condition (such
10162 * as with offline or HBA function reset), this routine will wait for the
10163 * outstanding mailbox command to complete before invoking the mailbox
10164 * sub-system flush routine to gracefully bring down mailbox sub-system.
10165 **/
10166 void
lpfc_sli_mbox_sys_shutdown(struct lpfc_hba * phba,int mbx_action)10167 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
10168 {
10169 struct lpfc_sli *psli = &phba->sli;
10170 unsigned long timeout;
10171
10172 if (mbx_action == LPFC_MBX_NO_WAIT) {
10173 /* delay 100ms for port state */
10174 msleep(100);
10175 lpfc_sli_mbox_sys_flush(phba);
10176 return;
10177 }
10178 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
10179
10180 spin_lock_irq(&phba->hbalock);
10181 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
10182
10183 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
10184 /* Determine how long we might wait for the active mailbox
10185 * command to be gracefully completed by firmware.
10186 */
10187 if (phba->sli.mbox_active)
10188 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
10189 phba->sli.mbox_active) *
10190 1000) + jiffies;
10191 spin_unlock_irq(&phba->hbalock);
10192
10193 while (phba->sli.mbox_active) {
10194 /* Check active mailbox complete status every 2ms */
10195 msleep(2);
10196 if (time_after(jiffies, timeout))
10197 /* Timeout, let the mailbox flush routine to
10198 * forcefully release active mailbox command
10199 */
10200 break;
10201 }
10202 } else
10203 spin_unlock_irq(&phba->hbalock);
10204
10205 lpfc_sli_mbox_sys_flush(phba);
10206 }
10207
10208 /**
10209 * lpfc_sli_eratt_read - read sli-3 error attention events
10210 * @phba: Pointer to HBA context.
10211 *
10212 * This function is called to read the SLI3 device error attention registers
10213 * for possible error attention events. The caller must hold the hostlock
10214 * with spin_lock_irq().
10215 *
10216 * This function returns 1 when there is Error Attention in the Host Attention
10217 * Register and returns 0 otherwise.
10218 **/
10219 static int
lpfc_sli_eratt_read(struct lpfc_hba * phba)10220 lpfc_sli_eratt_read(struct lpfc_hba *phba)
10221 {
10222 uint32_t ha_copy;
10223
10224 /* Read chip Host Attention (HA) register */
10225 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10226 goto unplug_err;
10227
10228 if (ha_copy & HA_ERATT) {
10229 /* Read host status register to retrieve error event */
10230 if (lpfc_sli_read_hs(phba))
10231 goto unplug_err;
10232
10233 /* Check if there is a deferred error condition is active */
10234 if ((HS_FFER1 & phba->work_hs) &&
10235 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
10236 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
10237 phba->hba_flag |= DEFER_ERATT;
10238 /* Clear all interrupt enable conditions */
10239 writel(0, phba->HCregaddr);
10240 readl(phba->HCregaddr);
10241 }
10242
10243 /* Set the driver HA work bitmap */
10244 phba->work_ha |= HA_ERATT;
10245 /* Indicate polling handles this ERATT */
10246 phba->hba_flag |= HBA_ERATT_HANDLED;
10247 return 1;
10248 }
10249 return 0;
10250
10251 unplug_err:
10252 /* Set the driver HS work bitmap */
10253 phba->work_hs |= UNPLUG_ERR;
10254 /* Set the driver HA work bitmap */
10255 phba->work_ha |= HA_ERATT;
10256 /* Indicate polling handles this ERATT */
10257 phba->hba_flag |= HBA_ERATT_HANDLED;
10258 return 1;
10259 }
10260
10261 /**
10262 * lpfc_sli4_eratt_read - read sli-4 error attention events
10263 * @phba: Pointer to HBA context.
10264 *
10265 * This function is called to read the SLI4 device error attention registers
10266 * for possible error attention events. The caller must hold the hostlock
10267 * with spin_lock_irq().
10268 *
10269 * This function returns 1 when there is Error Attention in the Host Attention
10270 * Register and returns 0 otherwise.
10271 **/
10272 static int
lpfc_sli4_eratt_read(struct lpfc_hba * phba)10273 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
10274 {
10275 uint32_t uerr_sta_hi, uerr_sta_lo;
10276 uint32_t if_type, portsmphr;
10277 struct lpfc_register portstat_reg;
10278
10279 /*
10280 * For now, use the SLI4 device internal unrecoverable error
10281 * registers for error attention. This can be changed later.
10282 */
10283 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10284 switch (if_type) {
10285 case LPFC_SLI_INTF_IF_TYPE_0:
10286 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
10287 &uerr_sta_lo) ||
10288 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
10289 &uerr_sta_hi)) {
10290 phba->work_hs |= UNPLUG_ERR;
10291 phba->work_ha |= HA_ERATT;
10292 phba->hba_flag |= HBA_ERATT_HANDLED;
10293 return 1;
10294 }
10295 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
10296 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
10297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10298 "1423 HBA Unrecoverable error: "
10299 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
10300 "ue_mask_lo_reg=0x%x, "
10301 "ue_mask_hi_reg=0x%x\n",
10302 uerr_sta_lo, uerr_sta_hi,
10303 phba->sli4_hba.ue_mask_lo,
10304 phba->sli4_hba.ue_mask_hi);
10305 phba->work_status[0] = uerr_sta_lo;
10306 phba->work_status[1] = uerr_sta_hi;
10307 phba->work_ha |= HA_ERATT;
10308 phba->hba_flag |= HBA_ERATT_HANDLED;
10309 return 1;
10310 }
10311 break;
10312 case LPFC_SLI_INTF_IF_TYPE_2:
10313 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
10314 &portstat_reg.word0) ||
10315 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
10316 &portsmphr)){
10317 phba->work_hs |= UNPLUG_ERR;
10318 phba->work_ha |= HA_ERATT;
10319 phba->hba_flag |= HBA_ERATT_HANDLED;
10320 return 1;
10321 }
10322 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
10323 phba->work_status[0] =
10324 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
10325 phba->work_status[1] =
10326 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
10327 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10328 "2885 Port Status Event: "
10329 "port status reg 0x%x, "
10330 "port smphr reg 0x%x, "
10331 "error 1=0x%x, error 2=0x%x\n",
10332 portstat_reg.word0,
10333 portsmphr,
10334 phba->work_status[0],
10335 phba->work_status[1]);
10336 phba->work_ha |= HA_ERATT;
10337 phba->hba_flag |= HBA_ERATT_HANDLED;
10338 return 1;
10339 }
10340 break;
10341 case LPFC_SLI_INTF_IF_TYPE_1:
10342 default:
10343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10344 "2886 HBA Error Attention on unsupported "
10345 "if type %d.", if_type);
10346 return 1;
10347 }
10348
10349 return 0;
10350 }
10351
10352 /**
10353 * lpfc_sli_check_eratt - check error attention events
10354 * @phba: Pointer to HBA context.
10355 *
10356 * This function is called from timer soft interrupt context to check HBA's
10357 * error attention register bit for error attention events.
10358 *
10359 * This function returns 1 when there is Error Attention in the Host Attention
10360 * Register and returns 0 otherwise.
10361 **/
10362 int
lpfc_sli_check_eratt(struct lpfc_hba * phba)10363 lpfc_sli_check_eratt(struct lpfc_hba *phba)
10364 {
10365 uint32_t ha_copy;
10366
10367 /* If somebody is waiting to handle an eratt, don't process it
10368 * here. The brdkill function will do this.
10369 */
10370 if (phba->link_flag & LS_IGNORE_ERATT)
10371 return 0;
10372
10373 /* Check if interrupt handler handles this ERATT */
10374 spin_lock_irq(&phba->hbalock);
10375 if (phba->hba_flag & HBA_ERATT_HANDLED) {
10376 /* Interrupt handler has handled ERATT */
10377 spin_unlock_irq(&phba->hbalock);
10378 return 0;
10379 }
10380
10381 /*
10382 * If there is deferred error attention, do not check for error
10383 * attention
10384 */
10385 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10386 spin_unlock_irq(&phba->hbalock);
10387 return 0;
10388 }
10389
10390 /* If PCI channel is offline, don't process it */
10391 if (unlikely(pci_channel_offline(phba->pcidev))) {
10392 spin_unlock_irq(&phba->hbalock);
10393 return 0;
10394 }
10395
10396 switch (phba->sli_rev) {
10397 case LPFC_SLI_REV2:
10398 case LPFC_SLI_REV3:
10399 /* Read chip Host Attention (HA) register */
10400 ha_copy = lpfc_sli_eratt_read(phba);
10401 break;
10402 case LPFC_SLI_REV4:
10403 /* Read device Uncoverable Error (UERR) registers */
10404 ha_copy = lpfc_sli4_eratt_read(phba);
10405 break;
10406 default:
10407 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10408 "0299 Invalid SLI revision (%d)\n",
10409 phba->sli_rev);
10410 ha_copy = 0;
10411 break;
10412 }
10413 spin_unlock_irq(&phba->hbalock);
10414
10415 return ha_copy;
10416 }
10417
10418 /**
10419 * lpfc_intr_state_check - Check device state for interrupt handling
10420 * @phba: Pointer to HBA context.
10421 *
10422 * This inline routine checks whether a device or its PCI slot is in a state
10423 * that the interrupt should be handled.
10424 *
10425 * This function returns 0 if the device or the PCI slot is in a state that
10426 * interrupt should be handled, otherwise -EIO.
10427 */
10428 static inline int
lpfc_intr_state_check(struct lpfc_hba * phba)10429 lpfc_intr_state_check(struct lpfc_hba *phba)
10430 {
10431 /* If the pci channel is offline, ignore all the interrupts */
10432 if (unlikely(pci_channel_offline(phba->pcidev)))
10433 return -EIO;
10434
10435 /* Update device level interrupt statistics */
10436 phba->sli.slistat.sli_intr++;
10437
10438 /* Ignore all interrupts during initialization. */
10439 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10440 return -EIO;
10441
10442 return 0;
10443 }
10444
10445 /**
10446 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
10447 * @irq: Interrupt number.
10448 * @dev_id: The device context pointer.
10449 *
10450 * This function is directly called from the PCI layer as an interrupt
10451 * service routine when device with SLI-3 interface spec is enabled with
10452 * MSI-X multi-message interrupt mode and there are slow-path events in
10453 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
10454 * interrupt mode, this function is called as part of the device-level
10455 * interrupt handler. When the PCI slot is in error recovery or the HBA
10456 * is undergoing initialization, the interrupt handler will not process
10457 * the interrupt. The link attention and ELS ring attention events are
10458 * handled by the worker thread. The interrupt handler signals the worker
10459 * thread and returns for these events. This function is called without
10460 * any lock held. It gets the hbalock to access and update SLI data
10461 * structures.
10462 *
10463 * This function returns IRQ_HANDLED when interrupt is handled else it
10464 * returns IRQ_NONE.
10465 **/
10466 irqreturn_t
lpfc_sli_sp_intr_handler(int irq,void * dev_id)10467 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
10468 {
10469 struct lpfc_hba *phba;
10470 uint32_t ha_copy, hc_copy;
10471 uint32_t work_ha_copy;
10472 unsigned long status;
10473 unsigned long iflag;
10474 uint32_t control;
10475
10476 MAILBOX_t *mbox, *pmbox;
10477 struct lpfc_vport *vport;
10478 struct lpfc_nodelist *ndlp;
10479 struct lpfc_dmabuf *mp;
10480 LPFC_MBOXQ_t *pmb;
10481 int rc;
10482
10483 /*
10484 * Get the driver's phba structure from the dev_id and
10485 * assume the HBA is not interrupting.
10486 */
10487 phba = (struct lpfc_hba *)dev_id;
10488
10489 if (unlikely(!phba))
10490 return IRQ_NONE;
10491
10492 /*
10493 * Stuff needs to be attented to when this function is invoked as an
10494 * individual interrupt handler in MSI-X multi-message interrupt mode
10495 */
10496 if (phba->intr_type == MSIX) {
10497 /* Check device state for handling interrupt */
10498 if (lpfc_intr_state_check(phba))
10499 return IRQ_NONE;
10500 /* Need to read HA REG for slow-path events */
10501 spin_lock_irqsave(&phba->hbalock, iflag);
10502 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10503 goto unplug_error;
10504 /* If somebody is waiting to handle an eratt don't process it
10505 * here. The brdkill function will do this.
10506 */
10507 if (phba->link_flag & LS_IGNORE_ERATT)
10508 ha_copy &= ~HA_ERATT;
10509 /* Check the need for handling ERATT in interrupt handler */
10510 if (ha_copy & HA_ERATT) {
10511 if (phba->hba_flag & HBA_ERATT_HANDLED)
10512 /* ERATT polling has handled ERATT */
10513 ha_copy &= ~HA_ERATT;
10514 else
10515 /* Indicate interrupt handler handles ERATT */
10516 phba->hba_flag |= HBA_ERATT_HANDLED;
10517 }
10518
10519 /*
10520 * If there is deferred error attention, do not check for any
10521 * interrupt.
10522 */
10523 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10524 spin_unlock_irqrestore(&phba->hbalock, iflag);
10525 return IRQ_NONE;
10526 }
10527
10528 /* Clear up only attention source related to slow-path */
10529 if (lpfc_readl(phba->HCregaddr, &hc_copy))
10530 goto unplug_error;
10531
10532 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
10533 HC_LAINT_ENA | HC_ERINT_ENA),
10534 phba->HCregaddr);
10535 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
10536 phba->HAregaddr);
10537 writel(hc_copy, phba->HCregaddr);
10538 readl(phba->HAregaddr); /* flush */
10539 spin_unlock_irqrestore(&phba->hbalock, iflag);
10540 } else
10541 ha_copy = phba->ha_copy;
10542
10543 work_ha_copy = ha_copy & phba->work_ha_mask;
10544
10545 if (work_ha_copy) {
10546 if (work_ha_copy & HA_LATT) {
10547 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
10548 /*
10549 * Turn off Link Attention interrupts
10550 * until CLEAR_LA done
10551 */
10552 spin_lock_irqsave(&phba->hbalock, iflag);
10553 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
10554 if (lpfc_readl(phba->HCregaddr, &control))
10555 goto unplug_error;
10556 control &= ~HC_LAINT_ENA;
10557 writel(control, phba->HCregaddr);
10558 readl(phba->HCregaddr); /* flush */
10559 spin_unlock_irqrestore(&phba->hbalock, iflag);
10560 }
10561 else
10562 work_ha_copy &= ~HA_LATT;
10563 }
10564
10565 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
10566 /*
10567 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
10568 * the only slow ring.
10569 */
10570 status = (work_ha_copy &
10571 (HA_RXMASK << (4*LPFC_ELS_RING)));
10572 status >>= (4*LPFC_ELS_RING);
10573 if (status & HA_RXMASK) {
10574 spin_lock_irqsave(&phba->hbalock, iflag);
10575 if (lpfc_readl(phba->HCregaddr, &control))
10576 goto unplug_error;
10577
10578 lpfc_debugfs_slow_ring_trc(phba,
10579 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
10580 control, status,
10581 (uint32_t)phba->sli.slistat.sli_intr);
10582
10583 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
10584 lpfc_debugfs_slow_ring_trc(phba,
10585 "ISR Disable ring:"
10586 "pwork:x%x hawork:x%x wait:x%x",
10587 phba->work_ha, work_ha_copy,
10588 (uint32_t)((unsigned long)
10589 &phba->work_waitq));
10590
10591 control &=
10592 ~(HC_R0INT_ENA << LPFC_ELS_RING);
10593 writel(control, phba->HCregaddr);
10594 readl(phba->HCregaddr); /* flush */
10595 }
10596 else {
10597 lpfc_debugfs_slow_ring_trc(phba,
10598 "ISR slow ring: pwork:"
10599 "x%x hawork:x%x wait:x%x",
10600 phba->work_ha, work_ha_copy,
10601 (uint32_t)((unsigned long)
10602 &phba->work_waitq));
10603 }
10604 spin_unlock_irqrestore(&phba->hbalock, iflag);
10605 }
10606 }
10607 spin_lock_irqsave(&phba->hbalock, iflag);
10608 if (work_ha_copy & HA_ERATT) {
10609 if (lpfc_sli_read_hs(phba))
10610 goto unplug_error;
10611 /*
10612 * Check if there is a deferred error condition
10613 * is active
10614 */
10615 if ((HS_FFER1 & phba->work_hs) &&
10616 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
10617 HS_FFER6 | HS_FFER7 | HS_FFER8) &
10618 phba->work_hs)) {
10619 phba->hba_flag |= DEFER_ERATT;
10620 /* Clear all interrupt enable conditions */
10621 writel(0, phba->HCregaddr);
10622 readl(phba->HCregaddr);
10623 }
10624 }
10625
10626 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
10627 pmb = phba->sli.mbox_active;
10628 pmbox = &pmb->u.mb;
10629 mbox = phba->mbox;
10630 vport = pmb->vport;
10631
10632 /* First check out the status word */
10633 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
10634 if (pmbox->mbxOwner != OWN_HOST) {
10635 spin_unlock_irqrestore(&phba->hbalock, iflag);
10636 /*
10637 * Stray Mailbox Interrupt, mbxCommand <cmd>
10638 * mbxStatus <status>
10639 */
10640 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
10641 LOG_SLI,
10642 "(%d):0304 Stray Mailbox "
10643 "Interrupt mbxCommand x%x "
10644 "mbxStatus x%x\n",
10645 (vport ? vport->vpi : 0),
10646 pmbox->mbxCommand,
10647 pmbox->mbxStatus);
10648 /* clear mailbox attention bit */
10649 work_ha_copy &= ~HA_MBATT;
10650 } else {
10651 phba->sli.mbox_active = NULL;
10652 spin_unlock_irqrestore(&phba->hbalock, iflag);
10653 phba->last_completion_time = jiffies;
10654 del_timer(&phba->sli.mbox_tmo);
10655 if (pmb->mbox_cmpl) {
10656 lpfc_sli_pcimem_bcopy(mbox, pmbox,
10657 MAILBOX_CMD_SIZE);
10658 if (pmb->out_ext_byte_len &&
10659 pmb->context2)
10660 lpfc_sli_pcimem_bcopy(
10661 phba->mbox_ext,
10662 pmb->context2,
10663 pmb->out_ext_byte_len);
10664 }
10665 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
10666 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
10667
10668 lpfc_debugfs_disc_trc(vport,
10669 LPFC_DISC_TRC_MBOX_VPORT,
10670 "MBOX dflt rpi: : "
10671 "status:x%x rpi:x%x",
10672 (uint32_t)pmbox->mbxStatus,
10673 pmbox->un.varWords[0], 0);
10674
10675 if (!pmbox->mbxStatus) {
10676 mp = (struct lpfc_dmabuf *)
10677 (pmb->context1);
10678 ndlp = (struct lpfc_nodelist *)
10679 pmb->context2;
10680
10681 /* Reg_LOGIN of dflt RPI was
10682 * successful. new lets get
10683 * rid of the RPI using the
10684 * same mbox buffer.
10685 */
10686 lpfc_unreg_login(phba,
10687 vport->vpi,
10688 pmbox->un.varWords[0],
10689 pmb);
10690 pmb->mbox_cmpl =
10691 lpfc_mbx_cmpl_dflt_rpi;
10692 pmb->context1 = mp;
10693 pmb->context2 = ndlp;
10694 pmb->vport = vport;
10695 rc = lpfc_sli_issue_mbox(phba,
10696 pmb,
10697 MBX_NOWAIT);
10698 if (rc != MBX_BUSY)
10699 lpfc_printf_log(phba,
10700 KERN_ERR,
10701 LOG_MBOX | LOG_SLI,
10702 "0350 rc should have"
10703 "been MBX_BUSY\n");
10704 if (rc != MBX_NOT_FINISHED)
10705 goto send_current_mbox;
10706 }
10707 }
10708 spin_lock_irqsave(
10709 &phba->pport->work_port_lock,
10710 iflag);
10711 phba->pport->work_port_events &=
10712 ~WORKER_MBOX_TMO;
10713 spin_unlock_irqrestore(
10714 &phba->pport->work_port_lock,
10715 iflag);
10716 lpfc_mbox_cmpl_put(phba, pmb);
10717 }
10718 } else
10719 spin_unlock_irqrestore(&phba->hbalock, iflag);
10720
10721 if ((work_ha_copy & HA_MBATT) &&
10722 (phba->sli.mbox_active == NULL)) {
10723 send_current_mbox:
10724 /* Process next mailbox command if there is one */
10725 do {
10726 rc = lpfc_sli_issue_mbox(phba, NULL,
10727 MBX_NOWAIT);
10728 } while (rc == MBX_NOT_FINISHED);
10729 if (rc != MBX_SUCCESS)
10730 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
10731 LOG_SLI, "0349 rc should be "
10732 "MBX_SUCCESS\n");
10733 }
10734
10735 spin_lock_irqsave(&phba->hbalock, iflag);
10736 phba->work_ha |= work_ha_copy;
10737 spin_unlock_irqrestore(&phba->hbalock, iflag);
10738 lpfc_worker_wake_up(phba);
10739 }
10740 return IRQ_HANDLED;
10741 unplug_error:
10742 spin_unlock_irqrestore(&phba->hbalock, iflag);
10743 return IRQ_HANDLED;
10744
10745 } /* lpfc_sli_sp_intr_handler */
10746
10747 /**
10748 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
10749 * @irq: Interrupt number.
10750 * @dev_id: The device context pointer.
10751 *
10752 * This function is directly called from the PCI layer as an interrupt
10753 * service routine when device with SLI-3 interface spec is enabled with
10754 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
10755 * ring event in the HBA. However, when the device is enabled with either
10756 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
10757 * device-level interrupt handler. When the PCI slot is in error recovery
10758 * or the HBA is undergoing initialization, the interrupt handler will not
10759 * process the interrupt. The SCSI FCP fast-path ring event are handled in
10760 * the intrrupt context. This function is called without any lock held.
10761 * It gets the hbalock to access and update SLI data structures.
10762 *
10763 * This function returns IRQ_HANDLED when interrupt is handled else it
10764 * returns IRQ_NONE.
10765 **/
10766 irqreturn_t
lpfc_sli_fp_intr_handler(int irq,void * dev_id)10767 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
10768 {
10769 struct lpfc_hba *phba;
10770 uint32_t ha_copy;
10771 unsigned long status;
10772 unsigned long iflag;
10773
10774 /* Get the driver's phba structure from the dev_id and
10775 * assume the HBA is not interrupting.
10776 */
10777 phba = (struct lpfc_hba *) dev_id;
10778
10779 if (unlikely(!phba))
10780 return IRQ_NONE;
10781
10782 /*
10783 * Stuff needs to be attented to when this function is invoked as an
10784 * individual interrupt handler in MSI-X multi-message interrupt mode
10785 */
10786 if (phba->intr_type == MSIX) {
10787 /* Check device state for handling interrupt */
10788 if (lpfc_intr_state_check(phba))
10789 return IRQ_NONE;
10790 /* Need to read HA REG for FCP ring and other ring events */
10791 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10792 return IRQ_HANDLED;
10793 /* Clear up only attention source related to fast-path */
10794 spin_lock_irqsave(&phba->hbalock, iflag);
10795 /*
10796 * If there is deferred error attention, do not check for
10797 * any interrupt.
10798 */
10799 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10800 spin_unlock_irqrestore(&phba->hbalock, iflag);
10801 return IRQ_NONE;
10802 }
10803 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
10804 phba->HAregaddr);
10805 readl(phba->HAregaddr); /* flush */
10806 spin_unlock_irqrestore(&phba->hbalock, iflag);
10807 } else
10808 ha_copy = phba->ha_copy;
10809
10810 /*
10811 * Process all events on FCP ring. Take the optimized path for FCP IO.
10812 */
10813 ha_copy &= ~(phba->work_ha_mask);
10814
10815 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
10816 status >>= (4*LPFC_FCP_RING);
10817 if (status & HA_RXMASK)
10818 lpfc_sli_handle_fast_ring_event(phba,
10819 &phba->sli.ring[LPFC_FCP_RING],
10820 status);
10821
10822 if (phba->cfg_multi_ring_support == 2) {
10823 /*
10824 * Process all events on extra ring. Take the optimized path
10825 * for extra ring IO.
10826 */
10827 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
10828 status >>= (4*LPFC_EXTRA_RING);
10829 if (status & HA_RXMASK) {
10830 lpfc_sli_handle_fast_ring_event(phba,
10831 &phba->sli.ring[LPFC_EXTRA_RING],
10832 status);
10833 }
10834 }
10835 return IRQ_HANDLED;
10836 } /* lpfc_sli_fp_intr_handler */
10837
10838 /**
10839 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
10840 * @irq: Interrupt number.
10841 * @dev_id: The device context pointer.
10842 *
10843 * This function is the HBA device-level interrupt handler to device with
10844 * SLI-3 interface spec, called from the PCI layer when either MSI or
10845 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
10846 * requires driver attention. This function invokes the slow-path interrupt
10847 * attention handling function and fast-path interrupt attention handling
10848 * function in turn to process the relevant HBA attention events. This
10849 * function is called without any lock held. It gets the hbalock to access
10850 * and update SLI data structures.
10851 *
10852 * This function returns IRQ_HANDLED when interrupt is handled, else it
10853 * returns IRQ_NONE.
10854 **/
10855 irqreturn_t
lpfc_sli_intr_handler(int irq,void * dev_id)10856 lpfc_sli_intr_handler(int irq, void *dev_id)
10857 {
10858 struct lpfc_hba *phba;
10859 irqreturn_t sp_irq_rc, fp_irq_rc;
10860 unsigned long status1, status2;
10861 uint32_t hc_copy;
10862
10863 /*
10864 * Get the driver's phba structure from the dev_id and
10865 * assume the HBA is not interrupting.
10866 */
10867 phba = (struct lpfc_hba *) dev_id;
10868
10869 if (unlikely(!phba))
10870 return IRQ_NONE;
10871
10872 /* Check device state for handling interrupt */
10873 if (lpfc_intr_state_check(phba))
10874 return IRQ_NONE;
10875
10876 spin_lock(&phba->hbalock);
10877 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
10878 spin_unlock(&phba->hbalock);
10879 return IRQ_HANDLED;
10880 }
10881
10882 if (unlikely(!phba->ha_copy)) {
10883 spin_unlock(&phba->hbalock);
10884 return IRQ_NONE;
10885 } else if (phba->ha_copy & HA_ERATT) {
10886 if (phba->hba_flag & HBA_ERATT_HANDLED)
10887 /* ERATT polling has handled ERATT */
10888 phba->ha_copy &= ~HA_ERATT;
10889 else
10890 /* Indicate interrupt handler handles ERATT */
10891 phba->hba_flag |= HBA_ERATT_HANDLED;
10892 }
10893
10894 /*
10895 * If there is deferred error attention, do not check for any interrupt.
10896 */
10897 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10898 spin_unlock(&phba->hbalock);
10899 return IRQ_NONE;
10900 }
10901
10902 /* Clear attention sources except link and error attentions */
10903 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
10904 spin_unlock(&phba->hbalock);
10905 return IRQ_HANDLED;
10906 }
10907 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
10908 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
10909 phba->HCregaddr);
10910 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
10911 writel(hc_copy, phba->HCregaddr);
10912 readl(phba->HAregaddr); /* flush */
10913 spin_unlock(&phba->hbalock);
10914
10915 /*
10916 * Invokes slow-path host attention interrupt handling as appropriate.
10917 */
10918
10919 /* status of events with mailbox and link attention */
10920 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
10921
10922 /* status of events with ELS ring */
10923 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
10924 status2 >>= (4*LPFC_ELS_RING);
10925
10926 if (status1 || (status2 & HA_RXMASK))
10927 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
10928 else
10929 sp_irq_rc = IRQ_NONE;
10930
10931 /*
10932 * Invoke fast-path host attention interrupt handling as appropriate.
10933 */
10934
10935 /* status of events with FCP ring */
10936 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
10937 status1 >>= (4*LPFC_FCP_RING);
10938
10939 /* status of events with extra ring */
10940 if (phba->cfg_multi_ring_support == 2) {
10941 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
10942 status2 >>= (4*LPFC_EXTRA_RING);
10943 } else
10944 status2 = 0;
10945
10946 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
10947 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
10948 else
10949 fp_irq_rc = IRQ_NONE;
10950
10951 /* Return device-level interrupt handling status */
10952 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
10953 } /* lpfc_sli_intr_handler */
10954
10955 /**
10956 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
10957 * @phba: pointer to lpfc hba data structure.
10958 *
10959 * This routine is invoked by the worker thread to process all the pending
10960 * SLI4 FCP abort XRI events.
10961 **/
lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba * phba)10962 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
10963 {
10964 struct lpfc_cq_event *cq_event;
10965
10966 /* First, declare the fcp xri abort event has been handled */
10967 spin_lock_irq(&phba->hbalock);
10968 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
10969 spin_unlock_irq(&phba->hbalock);
10970 /* Now, handle all the fcp xri abort events */
10971 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
10972 /* Get the first event from the head of the event queue */
10973 spin_lock_irq(&phba->hbalock);
10974 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
10975 cq_event, struct lpfc_cq_event, list);
10976 spin_unlock_irq(&phba->hbalock);
10977 /* Notify aborted XRI for FCP work queue */
10978 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
10979 /* Free the event processed back to the free pool */
10980 lpfc_sli4_cq_event_release(phba, cq_event);
10981 }
10982 }
10983
10984 /**
10985 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
10986 * @phba: pointer to lpfc hba data structure.
10987 *
10988 * This routine is invoked by the worker thread to process all the pending
10989 * SLI4 els abort xri events.
10990 **/
lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba * phba)10991 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
10992 {
10993 struct lpfc_cq_event *cq_event;
10994
10995 /* First, declare the els xri abort event has been handled */
10996 spin_lock_irq(&phba->hbalock);
10997 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
10998 spin_unlock_irq(&phba->hbalock);
10999 /* Now, handle all the els xri abort events */
11000 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
11001 /* Get the first event from the head of the event queue */
11002 spin_lock_irq(&phba->hbalock);
11003 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11004 cq_event, struct lpfc_cq_event, list);
11005 spin_unlock_irq(&phba->hbalock);
11006 /* Notify aborted XRI for ELS work queue */
11007 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
11008 /* Free the event processed back to the free pool */
11009 lpfc_sli4_cq_event_release(phba, cq_event);
11010 }
11011 }
11012
11013 /**
11014 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
11015 * @phba: pointer to lpfc hba data structure
11016 * @pIocbIn: pointer to the rspiocbq
11017 * @pIocbOut: pointer to the cmdiocbq
11018 * @wcqe: pointer to the complete wcqe
11019 *
11020 * This routine transfers the fields of a command iocbq to a response iocbq
11021 * by copying all the IOCB fields from command iocbq and transferring the
11022 * completion status information from the complete wcqe.
11023 **/
11024 static void
lpfc_sli4_iocb_param_transfer(struct lpfc_hba * phba,struct lpfc_iocbq * pIocbIn,struct lpfc_iocbq * pIocbOut,struct lpfc_wcqe_complete * wcqe)11025 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
11026 struct lpfc_iocbq *pIocbIn,
11027 struct lpfc_iocbq *pIocbOut,
11028 struct lpfc_wcqe_complete *wcqe)
11029 {
11030 unsigned long iflags;
11031 uint32_t status;
11032 size_t offset = offsetof(struct lpfc_iocbq, iocb);
11033
11034 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
11035 sizeof(struct lpfc_iocbq) - offset);
11036 /* Map WCQE parameters into irspiocb parameters */
11037 status = bf_get(lpfc_wcqe_c_status, wcqe);
11038 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
11039 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
11040 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
11041 pIocbIn->iocb.un.fcpi.fcpi_parm =
11042 pIocbOut->iocb.un.fcpi.fcpi_parm -
11043 wcqe->total_data_placed;
11044 else
11045 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
11046 else {
11047 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
11048 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
11049 }
11050
11051 /* Convert BG errors for completion status */
11052 if (status == CQE_STATUS_DI_ERROR) {
11053 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
11054
11055 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
11056 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
11057 else
11058 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
11059
11060 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
11061 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
11062 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11063 BGS_GUARD_ERR_MASK;
11064 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
11065 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11066 BGS_APPTAG_ERR_MASK;
11067 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
11068 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11069 BGS_REFTAG_ERR_MASK;
11070
11071 /* Check to see if there was any good data before the error */
11072 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
11073 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11074 BGS_HI_WATER_MARK_PRESENT_MASK;
11075 pIocbIn->iocb.unsli3.sli3_bg.bghm =
11076 wcqe->total_data_placed;
11077 }
11078
11079 /*
11080 * Set ALL the error bits to indicate we don't know what
11081 * type of error it is.
11082 */
11083 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
11084 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11085 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
11086 BGS_GUARD_ERR_MASK);
11087 }
11088
11089 /* Pick up HBA exchange busy condition */
11090 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
11091 spin_lock_irqsave(&phba->hbalock, iflags);
11092 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
11093 spin_unlock_irqrestore(&phba->hbalock, iflags);
11094 }
11095 }
11096
11097 /**
11098 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
11099 * @phba: Pointer to HBA context object.
11100 * @wcqe: Pointer to work-queue completion queue entry.
11101 *
11102 * This routine handles an ELS work-queue completion event and construct
11103 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
11104 * discovery engine to handle.
11105 *
11106 * Return: Pointer to the receive IOCBQ, NULL otherwise.
11107 **/
11108 static struct lpfc_iocbq *
lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba * phba,struct lpfc_iocbq * irspiocbq)11109 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
11110 struct lpfc_iocbq *irspiocbq)
11111 {
11112 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
11113 struct lpfc_iocbq *cmdiocbq;
11114 struct lpfc_wcqe_complete *wcqe;
11115 unsigned long iflags;
11116
11117 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
11118 spin_lock_irqsave(&pring->ring_lock, iflags);
11119 pring->stats.iocb_event++;
11120 /* Look up the ELS command IOCB and create pseudo response IOCB */
11121 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11122 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11123 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11124
11125 if (unlikely(!cmdiocbq)) {
11126 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11127 "0386 ELS complete with no corresponding "
11128 "cmdiocb: iotag (%d)\n",
11129 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11130 lpfc_sli_release_iocbq(phba, irspiocbq);
11131 return NULL;
11132 }
11133
11134 /* Fake the irspiocbq and copy necessary response information */
11135 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
11136
11137 return irspiocbq;
11138 }
11139
11140 /**
11141 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
11142 * @phba: Pointer to HBA context object.
11143 * @cqe: Pointer to mailbox completion queue entry.
11144 *
11145 * This routine process a mailbox completion queue entry with asynchrous
11146 * event.
11147 *
11148 * Return: true if work posted to worker thread, otherwise false.
11149 **/
11150 static bool
lpfc_sli4_sp_handle_async_event(struct lpfc_hba * phba,struct lpfc_mcqe * mcqe)11151 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11152 {
11153 struct lpfc_cq_event *cq_event;
11154 unsigned long iflags;
11155
11156 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11157 "0392 Async Event: word0:x%x, word1:x%x, "
11158 "word2:x%x, word3:x%x\n", mcqe->word0,
11159 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
11160
11161 /* Allocate a new internal CQ_EVENT entry */
11162 cq_event = lpfc_sli4_cq_event_alloc(phba);
11163 if (!cq_event) {
11164 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11165 "0394 Failed to allocate CQ_EVENT entry\n");
11166 return false;
11167 }
11168
11169 /* Move the CQE into an asynchronous event entry */
11170 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
11171 spin_lock_irqsave(&phba->hbalock, iflags);
11172 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
11173 /* Set the async event flag */
11174 phba->hba_flag |= ASYNC_EVENT;
11175 spin_unlock_irqrestore(&phba->hbalock, iflags);
11176
11177 return true;
11178 }
11179
11180 /**
11181 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
11182 * @phba: Pointer to HBA context object.
11183 * @cqe: Pointer to mailbox completion queue entry.
11184 *
11185 * This routine process a mailbox completion queue entry with mailbox
11186 * completion event.
11187 *
11188 * Return: true if work posted to worker thread, otherwise false.
11189 **/
11190 static bool
lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba * phba,struct lpfc_mcqe * mcqe)11191 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11192 {
11193 uint32_t mcqe_status;
11194 MAILBOX_t *mbox, *pmbox;
11195 struct lpfc_mqe *mqe;
11196 struct lpfc_vport *vport;
11197 struct lpfc_nodelist *ndlp;
11198 struct lpfc_dmabuf *mp;
11199 unsigned long iflags;
11200 LPFC_MBOXQ_t *pmb;
11201 bool workposted = false;
11202 int rc;
11203
11204 /* If not a mailbox complete MCQE, out by checking mailbox consume */
11205 if (!bf_get(lpfc_trailer_completed, mcqe))
11206 goto out_no_mqe_complete;
11207
11208 /* Get the reference to the active mbox command */
11209 spin_lock_irqsave(&phba->hbalock, iflags);
11210 pmb = phba->sli.mbox_active;
11211 if (unlikely(!pmb)) {
11212 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11213 "1832 No pending MBOX command to handle\n");
11214 spin_unlock_irqrestore(&phba->hbalock, iflags);
11215 goto out_no_mqe_complete;
11216 }
11217 spin_unlock_irqrestore(&phba->hbalock, iflags);
11218 mqe = &pmb->u.mqe;
11219 pmbox = (MAILBOX_t *)&pmb->u.mqe;
11220 mbox = phba->mbox;
11221 vport = pmb->vport;
11222
11223 /* Reset heartbeat timer */
11224 phba->last_completion_time = jiffies;
11225 del_timer(&phba->sli.mbox_tmo);
11226
11227 /* Move mbox data to caller's mailbox region, do endian swapping */
11228 if (pmb->mbox_cmpl && mbox)
11229 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
11230
11231 /*
11232 * For mcqe errors, conditionally move a modified error code to
11233 * the mbox so that the error will not be missed.
11234 */
11235 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
11236 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
11237 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
11238 bf_set(lpfc_mqe_status, mqe,
11239 (LPFC_MBX_ERROR_RANGE | mcqe_status));
11240 }
11241 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11242 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11243 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
11244 "MBOX dflt rpi: status:x%x rpi:x%x",
11245 mcqe_status,
11246 pmbox->un.varWords[0], 0);
11247 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
11248 mp = (struct lpfc_dmabuf *)(pmb->context1);
11249 ndlp = (struct lpfc_nodelist *)pmb->context2;
11250 /* Reg_LOGIN of dflt RPI was successful. Now lets get
11251 * RID of the PPI using the same mbox buffer.
11252 */
11253 lpfc_unreg_login(phba, vport->vpi,
11254 pmbox->un.varWords[0], pmb);
11255 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
11256 pmb->context1 = mp;
11257 pmb->context2 = ndlp;
11258 pmb->vport = vport;
11259 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
11260 if (rc != MBX_BUSY)
11261 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11262 LOG_SLI, "0385 rc should "
11263 "have been MBX_BUSY\n");
11264 if (rc != MBX_NOT_FINISHED)
11265 goto send_current_mbox;
11266 }
11267 }
11268 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
11269 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11270 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
11271
11272 /* There is mailbox completion work to do */
11273 spin_lock_irqsave(&phba->hbalock, iflags);
11274 __lpfc_mbox_cmpl_put(phba, pmb);
11275 phba->work_ha |= HA_MBATT;
11276 spin_unlock_irqrestore(&phba->hbalock, iflags);
11277 workposted = true;
11278
11279 send_current_mbox:
11280 spin_lock_irqsave(&phba->hbalock, iflags);
11281 /* Release the mailbox command posting token */
11282 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11283 /* Setting active mailbox pointer need to be in sync to flag clear */
11284 phba->sli.mbox_active = NULL;
11285 spin_unlock_irqrestore(&phba->hbalock, iflags);
11286 /* Wake up worker thread to post the next pending mailbox command */
11287 lpfc_worker_wake_up(phba);
11288 out_no_mqe_complete:
11289 if (bf_get(lpfc_trailer_consumed, mcqe))
11290 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
11291 return workposted;
11292 }
11293
11294 /**
11295 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
11296 * @phba: Pointer to HBA context object.
11297 * @cqe: Pointer to mailbox completion queue entry.
11298 *
11299 * This routine process a mailbox completion queue entry, it invokes the
11300 * proper mailbox complete handling or asynchrous event handling routine
11301 * according to the MCQE's async bit.
11302 *
11303 * Return: true if work posted to worker thread, otherwise false.
11304 **/
11305 static bool
lpfc_sli4_sp_handle_mcqe(struct lpfc_hba * phba,struct lpfc_cqe * cqe)11306 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
11307 {
11308 struct lpfc_mcqe mcqe;
11309 bool workposted;
11310
11311 /* Copy the mailbox MCQE and convert endian order as needed */
11312 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
11313
11314 /* Invoke the proper event handling routine */
11315 if (!bf_get(lpfc_trailer_async, &mcqe))
11316 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
11317 else
11318 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
11319 return workposted;
11320 }
11321
11322 /**
11323 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
11324 * @phba: Pointer to HBA context object.
11325 * @cq: Pointer to associated CQ
11326 * @wcqe: Pointer to work-queue completion queue entry.
11327 *
11328 * This routine handles an ELS work-queue completion event.
11329 *
11330 * Return: true if work posted to worker thread, otherwise false.
11331 **/
11332 static bool
lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_wcqe_complete * wcqe)11333 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11334 struct lpfc_wcqe_complete *wcqe)
11335 {
11336 struct lpfc_iocbq *irspiocbq;
11337 unsigned long iflags;
11338 struct lpfc_sli_ring *pring = cq->pring;
11339 int txq_cnt = 0;
11340 int txcmplq_cnt = 0;
11341 int fcp_txcmplq_cnt = 0;
11342
11343 /* Get an irspiocbq for later ELS response processing use */
11344 irspiocbq = lpfc_sli_get_iocbq(phba);
11345 if (!irspiocbq) {
11346 if (!list_empty(&pring->txq))
11347 txq_cnt++;
11348 if (!list_empty(&pring->txcmplq))
11349 txcmplq_cnt++;
11350 if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
11351 fcp_txcmplq_cnt++;
11352 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11353 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
11354 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
11355 txq_cnt, phba->iocb_cnt,
11356 fcp_txcmplq_cnt,
11357 txcmplq_cnt);
11358 return false;
11359 }
11360
11361 /* Save off the slow-path queue event for work thread to process */
11362 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
11363 spin_lock_irqsave(&phba->hbalock, iflags);
11364 list_add_tail(&irspiocbq->cq_event.list,
11365 &phba->sli4_hba.sp_queue_event);
11366 phba->hba_flag |= HBA_SP_QUEUE_EVT;
11367 spin_unlock_irqrestore(&phba->hbalock, iflags);
11368
11369 return true;
11370 }
11371
11372 /**
11373 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
11374 * @phba: Pointer to HBA context object.
11375 * @wcqe: Pointer to work-queue completion queue entry.
11376 *
11377 * This routine handles slow-path WQ entry comsumed event by invoking the
11378 * proper WQ release routine to the slow-path WQ.
11379 **/
11380 static void
lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba * phba,struct lpfc_wcqe_release * wcqe)11381 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
11382 struct lpfc_wcqe_release *wcqe)
11383 {
11384 /* sanity check on queue memory */
11385 if (unlikely(!phba->sli4_hba.els_wq))
11386 return;
11387 /* Check for the slow-path ELS work queue */
11388 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
11389 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
11390 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
11391 else
11392 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11393 "2579 Slow-path wqe consume event carries "
11394 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
11395 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
11396 phba->sli4_hba.els_wq->queue_id);
11397 }
11398
11399 /**
11400 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
11401 * @phba: Pointer to HBA context object.
11402 * @cq: Pointer to a WQ completion queue.
11403 * @wcqe: Pointer to work-queue completion queue entry.
11404 *
11405 * This routine handles an XRI abort event.
11406 *
11407 * Return: true if work posted to worker thread, otherwise false.
11408 **/
11409 static bool
lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct sli4_wcqe_xri_aborted * wcqe)11410 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
11411 struct lpfc_queue *cq,
11412 struct sli4_wcqe_xri_aborted *wcqe)
11413 {
11414 bool workposted = false;
11415 struct lpfc_cq_event *cq_event;
11416 unsigned long iflags;
11417
11418 /* Allocate a new internal CQ_EVENT entry */
11419 cq_event = lpfc_sli4_cq_event_alloc(phba);
11420 if (!cq_event) {
11421 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11422 "0602 Failed to allocate CQ_EVENT entry\n");
11423 return false;
11424 }
11425
11426 /* Move the CQE into the proper xri abort event list */
11427 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
11428 switch (cq->subtype) {
11429 case LPFC_FCP:
11430 spin_lock_irqsave(&phba->hbalock, iflags);
11431 list_add_tail(&cq_event->list,
11432 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
11433 /* Set the fcp xri abort event flag */
11434 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
11435 spin_unlock_irqrestore(&phba->hbalock, iflags);
11436 workposted = true;
11437 break;
11438 case LPFC_ELS:
11439 spin_lock_irqsave(&phba->hbalock, iflags);
11440 list_add_tail(&cq_event->list,
11441 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
11442 /* Set the els xri abort event flag */
11443 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
11444 spin_unlock_irqrestore(&phba->hbalock, iflags);
11445 workposted = true;
11446 break;
11447 default:
11448 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11449 "0603 Invalid work queue CQE subtype (x%x)\n",
11450 cq->subtype);
11451 workposted = false;
11452 break;
11453 }
11454 return workposted;
11455 }
11456
11457 /**
11458 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
11459 * @phba: Pointer to HBA context object.
11460 * @rcqe: Pointer to receive-queue completion queue entry.
11461 *
11462 * This routine process a receive-queue completion queue entry.
11463 *
11464 * Return: true if work posted to worker thread, otherwise false.
11465 **/
11466 static bool
lpfc_sli4_sp_handle_rcqe(struct lpfc_hba * phba,struct lpfc_rcqe * rcqe)11467 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
11468 {
11469 bool workposted = false;
11470 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
11471 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
11472 struct hbq_dmabuf *dma_buf;
11473 uint32_t status, rq_id;
11474 unsigned long iflags;
11475
11476 /* sanity check on queue memory */
11477 if (unlikely(!hrq) || unlikely(!drq))
11478 return workposted;
11479
11480 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
11481 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
11482 else
11483 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
11484 if (rq_id != hrq->queue_id)
11485 goto out;
11486
11487 status = bf_get(lpfc_rcqe_status, rcqe);
11488 switch (status) {
11489 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
11490 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11491 "2537 Receive Frame Truncated!!\n");
11492 hrq->RQ_buf_trunc++;
11493 case FC_STATUS_RQ_SUCCESS:
11494 lpfc_sli4_rq_release(hrq, drq);
11495 spin_lock_irqsave(&phba->hbalock, iflags);
11496 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
11497 if (!dma_buf) {
11498 hrq->RQ_no_buf_found++;
11499 spin_unlock_irqrestore(&phba->hbalock, iflags);
11500 goto out;
11501 }
11502 hrq->RQ_rcv_buf++;
11503 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
11504 /* save off the frame for the word thread to process */
11505 list_add_tail(&dma_buf->cq_event.list,
11506 &phba->sli4_hba.sp_queue_event);
11507 /* Frame received */
11508 phba->hba_flag |= HBA_SP_QUEUE_EVT;
11509 spin_unlock_irqrestore(&phba->hbalock, iflags);
11510 workposted = true;
11511 break;
11512 case FC_STATUS_INSUFF_BUF_NEED_BUF:
11513 case FC_STATUS_INSUFF_BUF_FRM_DISC:
11514 hrq->RQ_no_posted_buf++;
11515 /* Post more buffers if possible */
11516 spin_lock_irqsave(&phba->hbalock, iflags);
11517 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
11518 spin_unlock_irqrestore(&phba->hbalock, iflags);
11519 workposted = true;
11520 break;
11521 }
11522 out:
11523 return workposted;
11524 }
11525
11526 /**
11527 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
11528 * @phba: Pointer to HBA context object.
11529 * @cq: Pointer to the completion queue.
11530 * @wcqe: Pointer to a completion queue entry.
11531 *
11532 * This routine process a slow-path work-queue or receive queue completion queue
11533 * entry.
11534 *
11535 * Return: true if work posted to worker thread, otherwise false.
11536 **/
11537 static bool
lpfc_sli4_sp_handle_cqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)11538 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11539 struct lpfc_cqe *cqe)
11540 {
11541 struct lpfc_cqe cqevt;
11542 bool workposted = false;
11543
11544 /* Copy the work queue CQE and convert endian order if needed */
11545 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
11546
11547 /* Check and process for different type of WCQE and dispatch */
11548 switch (bf_get(lpfc_cqe_code, &cqevt)) {
11549 case CQE_CODE_COMPL_WQE:
11550 /* Process the WQ/RQ complete event */
11551 phba->last_completion_time = jiffies;
11552 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
11553 (struct lpfc_wcqe_complete *)&cqevt);
11554 break;
11555 case CQE_CODE_RELEASE_WQE:
11556 /* Process the WQ release event */
11557 lpfc_sli4_sp_handle_rel_wcqe(phba,
11558 (struct lpfc_wcqe_release *)&cqevt);
11559 break;
11560 case CQE_CODE_XRI_ABORTED:
11561 /* Process the WQ XRI abort event */
11562 phba->last_completion_time = jiffies;
11563 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
11564 (struct sli4_wcqe_xri_aborted *)&cqevt);
11565 break;
11566 case CQE_CODE_RECEIVE:
11567 case CQE_CODE_RECEIVE_V1:
11568 /* Process the RQ event */
11569 phba->last_completion_time = jiffies;
11570 workposted = lpfc_sli4_sp_handle_rcqe(phba,
11571 (struct lpfc_rcqe *)&cqevt);
11572 break;
11573 default:
11574 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11575 "0388 Not a valid WCQE code: x%x\n",
11576 bf_get(lpfc_cqe_code, &cqevt));
11577 break;
11578 }
11579 return workposted;
11580 }
11581
11582 /**
11583 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
11584 * @phba: Pointer to HBA context object.
11585 * @eqe: Pointer to fast-path event queue entry.
11586 *
11587 * This routine process a event queue entry from the slow-path event queue.
11588 * It will check the MajorCode and MinorCode to determine this is for a
11589 * completion event on a completion queue, if not, an error shall be logged
11590 * and just return. Otherwise, it will get to the corresponding completion
11591 * queue and process all the entries on that completion queue, rearm the
11592 * completion queue, and then return.
11593 *
11594 **/
11595 static void
lpfc_sli4_sp_handle_eqe(struct lpfc_hba * phba,struct lpfc_eqe * eqe,struct lpfc_queue * speq)11596 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11597 struct lpfc_queue *speq)
11598 {
11599 struct lpfc_queue *cq = NULL, *childq;
11600 struct lpfc_cqe *cqe;
11601 bool workposted = false;
11602 int ecount = 0;
11603 uint16_t cqid;
11604
11605 /* Get the reference to the corresponding CQ */
11606 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11607
11608 list_for_each_entry(childq, &speq->child_list, list) {
11609 if (childq->queue_id == cqid) {
11610 cq = childq;
11611 break;
11612 }
11613 }
11614 if (unlikely(!cq)) {
11615 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11616 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11617 "0365 Slow-path CQ identifier "
11618 "(%d) does not exist\n", cqid);
11619 return;
11620 }
11621
11622 /* Process all the entries to the CQ */
11623 switch (cq->type) {
11624 case LPFC_MCQ:
11625 while ((cqe = lpfc_sli4_cq_get(cq))) {
11626 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
11627 if (!(++ecount % cq->entry_repost))
11628 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11629 cq->CQ_mbox++;
11630 }
11631 break;
11632 case LPFC_WCQ:
11633 while ((cqe = lpfc_sli4_cq_get(cq))) {
11634 if (cq->subtype == LPFC_FCP)
11635 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
11636 cqe);
11637 else
11638 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
11639 cqe);
11640 if (!(++ecount % cq->entry_repost))
11641 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11642 }
11643
11644 /* Track the max number of CQEs processed in 1 EQ */
11645 if (ecount > cq->CQ_max_cqe)
11646 cq->CQ_max_cqe = ecount;
11647 break;
11648 default:
11649 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11650 "0370 Invalid completion queue type (%d)\n",
11651 cq->type);
11652 return;
11653 }
11654
11655 /* Catch the no cq entry condition, log an error */
11656 if (unlikely(ecount == 0))
11657 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11658 "0371 No entry from the CQ: identifier "
11659 "(x%x), type (%d)\n", cq->queue_id, cq->type);
11660
11661 /* In any case, flash and re-arm the RCQ */
11662 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11663
11664 /* wake up worker thread if there are works to be done */
11665 if (workposted)
11666 lpfc_worker_wake_up(phba);
11667 }
11668
11669 /**
11670 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
11671 * @phba: Pointer to HBA context object.
11672 * @cq: Pointer to associated CQ
11673 * @wcqe: Pointer to work-queue completion queue entry.
11674 *
11675 * This routine process a fast-path work queue completion entry from fast-path
11676 * event queue for FCP command response completion.
11677 **/
11678 static void
lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_wcqe_complete * wcqe)11679 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11680 struct lpfc_wcqe_complete *wcqe)
11681 {
11682 struct lpfc_sli_ring *pring = cq->pring;
11683 struct lpfc_iocbq *cmdiocbq;
11684 struct lpfc_iocbq irspiocbq;
11685 unsigned long iflags;
11686
11687 /* Check for response status */
11688 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
11689 /* If resource errors reported from HBA, reduce queue
11690 * depth of the SCSI device.
11691 */
11692 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
11693 IOSTAT_LOCAL_REJECT)) &&
11694 ((wcqe->parameter & IOERR_PARAM_MASK) ==
11695 IOERR_NO_RESOURCES))
11696 phba->lpfc_rampdown_queue_depth(phba);
11697
11698 /* Log the error status */
11699 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11700 "0373 FCP complete error: status=x%x, "
11701 "hw_status=x%x, total_data_specified=%d, "
11702 "parameter=x%x, word3=x%x\n",
11703 bf_get(lpfc_wcqe_c_status, wcqe),
11704 bf_get(lpfc_wcqe_c_hw_status, wcqe),
11705 wcqe->total_data_placed, wcqe->parameter,
11706 wcqe->word3);
11707 }
11708
11709 /* Look up the FCP command IOCB and create pseudo response IOCB */
11710 spin_lock_irqsave(&pring->ring_lock, iflags);
11711 pring->stats.iocb_event++;
11712 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11713 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11714 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11715 if (unlikely(!cmdiocbq)) {
11716 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11717 "0374 FCP complete with no corresponding "
11718 "cmdiocb: iotag (%d)\n",
11719 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11720 return;
11721 }
11722 if (unlikely(!cmdiocbq->iocb_cmpl)) {
11723 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11724 "0375 FCP cmdiocb not callback function "
11725 "iotag: (%d)\n",
11726 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11727 return;
11728 }
11729
11730 /* Fake the irspiocb and copy necessary response information */
11731 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
11732
11733 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
11734 spin_lock_irqsave(&phba->hbalock, iflags);
11735 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
11736 spin_unlock_irqrestore(&phba->hbalock, iflags);
11737 }
11738
11739 /* Pass the cmd_iocb and the rsp state to the upper layer */
11740 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
11741 }
11742
11743 /**
11744 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
11745 * @phba: Pointer to HBA context object.
11746 * @cq: Pointer to completion queue.
11747 * @wcqe: Pointer to work-queue completion queue entry.
11748 *
11749 * This routine handles an fast-path WQ entry comsumed event by invoking the
11750 * proper WQ release routine to the slow-path WQ.
11751 **/
11752 static void
lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_wcqe_release * wcqe)11753 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11754 struct lpfc_wcqe_release *wcqe)
11755 {
11756 struct lpfc_queue *childwq;
11757 bool wqid_matched = false;
11758 uint16_t fcp_wqid;
11759
11760 /* Check for fast-path FCP work queue release */
11761 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
11762 list_for_each_entry(childwq, &cq->child_list, list) {
11763 if (childwq->queue_id == fcp_wqid) {
11764 lpfc_sli4_wq_release(childwq,
11765 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
11766 wqid_matched = true;
11767 break;
11768 }
11769 }
11770 /* Report warning log message if no match found */
11771 if (wqid_matched != true)
11772 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11773 "2580 Fast-path wqe consume event carries "
11774 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
11775 }
11776
11777 /**
11778 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
11779 * @cq: Pointer to the completion queue.
11780 * @eqe: Pointer to fast-path completion queue entry.
11781 *
11782 * This routine process a fast-path work queue completion entry from fast-path
11783 * event queue for FCP command response completion.
11784 **/
11785 static int
lpfc_sli4_fp_handle_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)11786 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11787 struct lpfc_cqe *cqe)
11788 {
11789 struct lpfc_wcqe_release wcqe;
11790 bool workposted = false;
11791
11792 /* Copy the work queue CQE and convert endian order if needed */
11793 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
11794
11795 /* Check and process for different type of WCQE and dispatch */
11796 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
11797 case CQE_CODE_COMPL_WQE:
11798 cq->CQ_wq++;
11799 /* Process the WQ complete event */
11800 phba->last_completion_time = jiffies;
11801 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
11802 (struct lpfc_wcqe_complete *)&wcqe);
11803 break;
11804 case CQE_CODE_RELEASE_WQE:
11805 cq->CQ_release_wqe++;
11806 /* Process the WQ release event */
11807 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
11808 (struct lpfc_wcqe_release *)&wcqe);
11809 break;
11810 case CQE_CODE_XRI_ABORTED:
11811 cq->CQ_xri_aborted++;
11812 /* Process the WQ XRI abort event */
11813 phba->last_completion_time = jiffies;
11814 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
11815 (struct sli4_wcqe_xri_aborted *)&wcqe);
11816 break;
11817 default:
11818 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11819 "0144 Not a valid WCQE code: x%x\n",
11820 bf_get(lpfc_wcqe_c_code, &wcqe));
11821 break;
11822 }
11823 return workposted;
11824 }
11825
11826 /**
11827 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
11828 * @phba: Pointer to HBA context object.
11829 * @eqe: Pointer to fast-path event queue entry.
11830 *
11831 * This routine process a event queue entry from the fast-path event queue.
11832 * It will check the MajorCode and MinorCode to determine this is for a
11833 * completion event on a completion queue, if not, an error shall be logged
11834 * and just return. Otherwise, it will get to the corresponding completion
11835 * queue and process all the entries on the completion queue, rearm the
11836 * completion queue, and then return.
11837 **/
11838 static void
lpfc_sli4_hba_handle_eqe(struct lpfc_hba * phba,struct lpfc_eqe * eqe,uint32_t qidx)11839 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11840 uint32_t qidx)
11841 {
11842 struct lpfc_queue *cq;
11843 struct lpfc_cqe *cqe;
11844 bool workposted = false;
11845 uint16_t cqid;
11846 int ecount = 0;
11847
11848 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
11849 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11850 "0366 Not a valid completion "
11851 "event: majorcode=x%x, minorcode=x%x\n",
11852 bf_get_le32(lpfc_eqe_major_code, eqe),
11853 bf_get_le32(lpfc_eqe_minor_code, eqe));
11854 return;
11855 }
11856
11857 /* Get the reference to the corresponding CQ */
11858 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11859
11860 /* Check if this is a Slow path event */
11861 if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
11862 lpfc_sli4_sp_handle_eqe(phba, eqe,
11863 phba->sli4_hba.hba_eq[qidx]);
11864 return;
11865 }
11866
11867 if (unlikely(!phba->sli4_hba.fcp_cq)) {
11868 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11869 "3146 Fast-path completion queues "
11870 "does not exist\n");
11871 return;
11872 }
11873 cq = phba->sli4_hba.fcp_cq[qidx];
11874 if (unlikely(!cq)) {
11875 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11876 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11877 "0367 Fast-path completion queue "
11878 "(%d) does not exist\n", qidx);
11879 return;
11880 }
11881
11882 if (unlikely(cqid != cq->queue_id)) {
11883 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11884 "0368 Miss-matched fast-path completion "
11885 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
11886 cqid, cq->queue_id);
11887 return;
11888 }
11889
11890 /* Process all the entries to the CQ */
11891 while ((cqe = lpfc_sli4_cq_get(cq))) {
11892 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
11893 if (!(++ecount % cq->entry_repost))
11894 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11895 }
11896
11897 /* Track the max number of CQEs processed in 1 EQ */
11898 if (ecount > cq->CQ_max_cqe)
11899 cq->CQ_max_cqe = ecount;
11900
11901 /* Catch the no cq entry condition */
11902 if (unlikely(ecount == 0))
11903 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11904 "0369 No entry from fast-path completion "
11905 "queue fcpcqid=%d\n", cq->queue_id);
11906
11907 /* In any case, flash and re-arm the CQ */
11908 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11909
11910 /* wake up worker thread if there are works to be done */
11911 if (workposted)
11912 lpfc_worker_wake_up(phba);
11913 }
11914
11915 static void
lpfc_sli4_eq_flush(struct lpfc_hba * phba,struct lpfc_queue * eq)11916 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
11917 {
11918 struct lpfc_eqe *eqe;
11919
11920 /* walk all the EQ entries and drop on the floor */
11921 while ((eqe = lpfc_sli4_eq_get(eq)))
11922 ;
11923
11924 /* Clear and re-arm the EQ */
11925 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
11926 }
11927
11928 /**
11929 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
11930 * @irq: Interrupt number.
11931 * @dev_id: The device context pointer.
11932 *
11933 * This function is directly called from the PCI layer as an interrupt
11934 * service routine when device with SLI-4 interface spec is enabled with
11935 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
11936 * ring event in the HBA. However, when the device is enabled with either
11937 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
11938 * device-level interrupt handler. When the PCI slot is in error recovery
11939 * or the HBA is undergoing initialization, the interrupt handler will not
11940 * process the interrupt. The SCSI FCP fast-path ring event are handled in
11941 * the intrrupt context. This function is called without any lock held.
11942 * It gets the hbalock to access and update SLI data structures. Note that,
11943 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
11944 * equal to that of FCP CQ index.
11945 *
11946 * The link attention and ELS ring attention events are handled
11947 * by the worker thread. The interrupt handler signals the worker thread
11948 * and returns for these events. This function is called without any lock
11949 * held. It gets the hbalock to access and update SLI data structures.
11950 *
11951 * This function returns IRQ_HANDLED when interrupt is handled else it
11952 * returns IRQ_NONE.
11953 **/
11954 irqreturn_t
lpfc_sli4_hba_intr_handler(int irq,void * dev_id)11955 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
11956 {
11957 struct lpfc_hba *phba;
11958 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
11959 struct lpfc_queue *fpeq;
11960 struct lpfc_eqe *eqe;
11961 unsigned long iflag;
11962 int ecount = 0;
11963 int fcp_eqidx;
11964
11965 /* Get the driver's phba structure from the dev_id */
11966 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
11967 phba = fcp_eq_hdl->phba;
11968 fcp_eqidx = fcp_eq_hdl->idx;
11969
11970 if (unlikely(!phba))
11971 return IRQ_NONE;
11972 if (unlikely(!phba->sli4_hba.hba_eq))
11973 return IRQ_NONE;
11974
11975 /* Get to the EQ struct associated with this vector */
11976 fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
11977 if (unlikely(!fpeq))
11978 return IRQ_NONE;
11979
11980 if (lpfc_fcp_look_ahead) {
11981 if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use))
11982 lpfc_sli4_eq_clr_intr(fpeq);
11983 else {
11984 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11985 return IRQ_NONE;
11986 }
11987 }
11988
11989 /* Check device state for handling interrupt */
11990 if (unlikely(lpfc_intr_state_check(phba))) {
11991 fpeq->EQ_badstate++;
11992 /* Check again for link_state with lock held */
11993 spin_lock_irqsave(&phba->hbalock, iflag);
11994 if (phba->link_state < LPFC_LINK_DOWN)
11995 /* Flush, clear interrupt, and rearm the EQ */
11996 lpfc_sli4_eq_flush(phba, fpeq);
11997 spin_unlock_irqrestore(&phba->hbalock, iflag);
11998 if (lpfc_fcp_look_ahead)
11999 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12000 return IRQ_NONE;
12001 }
12002
12003 /*
12004 * Process all the event on FCP fast-path EQ
12005 */
12006 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
12007 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
12008 if (!(++ecount % fpeq->entry_repost))
12009 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
12010 fpeq->EQ_processed++;
12011 }
12012
12013 /* Track the max number of EQEs processed in 1 intr */
12014 if (ecount > fpeq->EQ_max_eqe)
12015 fpeq->EQ_max_eqe = ecount;
12016
12017 /* Always clear and re-arm the fast-path EQ */
12018 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
12019
12020 if (unlikely(ecount == 0)) {
12021 fpeq->EQ_no_entry++;
12022
12023 if (lpfc_fcp_look_ahead) {
12024 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12025 return IRQ_NONE;
12026 }
12027
12028 if (phba->intr_type == MSIX)
12029 /* MSI-X treated interrupt served as no EQ share INT */
12030 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12031 "0358 MSI-X interrupt with no EQE\n");
12032 else
12033 /* Non MSI-X treated on interrupt as EQ share INT */
12034 return IRQ_NONE;
12035 }
12036
12037 if (lpfc_fcp_look_ahead)
12038 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12039 return IRQ_HANDLED;
12040 } /* lpfc_sli4_fp_intr_handler */
12041
12042 /**
12043 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
12044 * @irq: Interrupt number.
12045 * @dev_id: The device context pointer.
12046 *
12047 * This function is the device-level interrupt handler to device with SLI-4
12048 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
12049 * interrupt mode is enabled and there is an event in the HBA which requires
12050 * driver attention. This function invokes the slow-path interrupt attention
12051 * handling function and fast-path interrupt attention handling function in
12052 * turn to process the relevant HBA attention events. This function is called
12053 * without any lock held. It gets the hbalock to access and update SLI data
12054 * structures.
12055 *
12056 * This function returns IRQ_HANDLED when interrupt is handled, else it
12057 * returns IRQ_NONE.
12058 **/
12059 irqreturn_t
lpfc_sli4_intr_handler(int irq,void * dev_id)12060 lpfc_sli4_intr_handler(int irq, void *dev_id)
12061 {
12062 struct lpfc_hba *phba;
12063 irqreturn_t hba_irq_rc;
12064 bool hba_handled = false;
12065 int fcp_eqidx;
12066
12067 /* Get the driver's phba structure from the dev_id */
12068 phba = (struct lpfc_hba *)dev_id;
12069
12070 if (unlikely(!phba))
12071 return IRQ_NONE;
12072
12073 /*
12074 * Invoke fast-path host attention interrupt handling as appropriate.
12075 */
12076 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
12077 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
12078 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
12079 if (hba_irq_rc == IRQ_HANDLED)
12080 hba_handled |= true;
12081 }
12082
12083 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
12084 } /* lpfc_sli4_intr_handler */
12085
12086 /**
12087 * lpfc_sli4_queue_free - free a queue structure and associated memory
12088 * @queue: The queue structure to free.
12089 *
12090 * This function frees a queue structure and the DMAable memory used for
12091 * the host resident queue. This function must be called after destroying the
12092 * queue on the HBA.
12093 **/
12094 void
lpfc_sli4_queue_free(struct lpfc_queue * queue)12095 lpfc_sli4_queue_free(struct lpfc_queue *queue)
12096 {
12097 struct lpfc_dmabuf *dmabuf;
12098
12099 if (!queue)
12100 return;
12101
12102 while (!list_empty(&queue->page_list)) {
12103 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
12104 list);
12105 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
12106 dmabuf->virt, dmabuf->phys);
12107 kfree(dmabuf);
12108 }
12109 kfree(queue);
12110 return;
12111 }
12112
12113 /**
12114 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
12115 * @phba: The HBA that this queue is being created on.
12116 * @entry_size: The size of each queue entry for this queue.
12117 * @entry count: The number of entries that this queue will handle.
12118 *
12119 * This function allocates a queue structure and the DMAable memory used for
12120 * the host resident queue. This function must be called before creating the
12121 * queue on the HBA.
12122 **/
12123 struct lpfc_queue *
lpfc_sli4_queue_alloc(struct lpfc_hba * phba,uint32_t entry_size,uint32_t entry_count)12124 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
12125 uint32_t entry_count)
12126 {
12127 struct lpfc_queue *queue;
12128 struct lpfc_dmabuf *dmabuf;
12129 int x, total_qe_count;
12130 void *dma_pointer;
12131 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12132
12133 if (!phba->sli4_hba.pc_sli4_params.supported)
12134 hw_page_size = SLI4_PAGE_SIZE;
12135
12136 queue = kzalloc(sizeof(struct lpfc_queue) +
12137 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
12138 if (!queue)
12139 return NULL;
12140 queue->page_count = (ALIGN(entry_size * entry_count,
12141 hw_page_size))/hw_page_size;
12142 INIT_LIST_HEAD(&queue->list);
12143 INIT_LIST_HEAD(&queue->page_list);
12144 INIT_LIST_HEAD(&queue->child_list);
12145 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
12146 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
12147 if (!dmabuf)
12148 goto out_fail;
12149 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
12150 hw_page_size, &dmabuf->phys,
12151 GFP_KERNEL);
12152 if (!dmabuf->virt) {
12153 kfree(dmabuf);
12154 goto out_fail;
12155 }
12156 memset(dmabuf->virt, 0, hw_page_size);
12157 dmabuf->buffer_tag = x;
12158 list_add_tail(&dmabuf->list, &queue->page_list);
12159 /* initialize queue's entry array */
12160 dma_pointer = dmabuf->virt;
12161 for (; total_qe_count < entry_count &&
12162 dma_pointer < (hw_page_size + dmabuf->virt);
12163 total_qe_count++, dma_pointer += entry_size) {
12164 queue->qe[total_qe_count].address = dma_pointer;
12165 }
12166 }
12167 queue->entry_size = entry_size;
12168 queue->entry_count = entry_count;
12169
12170 /*
12171 * entry_repost is calculated based on the number of entries in the
12172 * queue. This works out except for RQs. If buffers are NOT initially
12173 * posted for every RQE, entry_repost should be adjusted accordingly.
12174 */
12175 queue->entry_repost = (entry_count >> 3);
12176 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
12177 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
12178 queue->phba = phba;
12179
12180 return queue;
12181 out_fail:
12182 lpfc_sli4_queue_free(queue);
12183 return NULL;
12184 }
12185
12186 /**
12187 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
12188 * @phba: HBA structure that indicates port to create a queue on.
12189 * @pci_barset: PCI BAR set flag.
12190 *
12191 * This function shall perform iomap of the specified PCI BAR address to host
12192 * memory address if not already done so and return it. The returned host
12193 * memory address can be NULL.
12194 */
12195 static void __iomem *
lpfc_dual_chute_pci_bar_map(struct lpfc_hba * phba,uint16_t pci_barset)12196 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
12197 {
12198 struct pci_dev *pdev;
12199 unsigned long bar_map, bar_map_len;
12200
12201 if (!phba->pcidev)
12202 return NULL;
12203 else
12204 pdev = phba->pcidev;
12205
12206 switch (pci_barset) {
12207 case WQ_PCI_BAR_0_AND_1:
12208 if (!phba->pci_bar0_memmap_p) {
12209 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
12210 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
12211 phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
12212 }
12213 return phba->pci_bar0_memmap_p;
12214 case WQ_PCI_BAR_2_AND_3:
12215 if (!phba->pci_bar2_memmap_p) {
12216 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
12217 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
12218 phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
12219 }
12220 return phba->pci_bar2_memmap_p;
12221 case WQ_PCI_BAR_4_AND_5:
12222 if (!phba->pci_bar4_memmap_p) {
12223 bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
12224 bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
12225 phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
12226 }
12227 return phba->pci_bar4_memmap_p;
12228 default:
12229 break;
12230 }
12231 return NULL;
12232 }
12233
12234 /**
12235 * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
12236 * @phba: HBA structure that indicates port to create a queue on.
12237 * @startq: The starting FCP EQ to modify
12238 *
12239 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
12240 *
12241 * The @phba struct is used to send mailbox command to HBA. The @startq
12242 * is used to get the starting FCP EQ to change.
12243 * This function is asynchronous and will wait for the mailbox
12244 * command to finish before continuing.
12245 *
12246 * On success this function will return a zero. If unable to allocate enough
12247 * memory this function will return -ENOMEM. If the queue create mailbox command
12248 * fails this function will return -ENXIO.
12249 **/
12250 uint32_t
lpfc_modify_fcp_eq_delay(struct lpfc_hba * phba,uint16_t startq)12251 lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12252 {
12253 struct lpfc_mbx_modify_eq_delay *eq_delay;
12254 LPFC_MBOXQ_t *mbox;
12255 struct lpfc_queue *eq;
12256 int cnt, rc, length, status = 0;
12257 uint32_t shdr_status, shdr_add_status;
12258 uint32_t result;
12259 int fcp_eqidx;
12260 union lpfc_sli4_cfg_shdr *shdr;
12261 uint16_t dmult;
12262
12263 if (startq >= phba->cfg_fcp_io_channel)
12264 return 0;
12265
12266 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12267 if (!mbox)
12268 return -ENOMEM;
12269 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
12270 sizeof(struct lpfc_sli4_cfg_mhdr));
12271 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12272 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
12273 length, LPFC_SLI4_MBX_EMBED);
12274 eq_delay = &mbox->u.mqe.un.eq_delay;
12275
12276 /* Calculate delay multiper from maximum interrupt per second */
12277 result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
12278 if (result > LPFC_DMULT_CONST)
12279 dmult = 0;
12280 else
12281 dmult = LPFC_DMULT_CONST/result - 1;
12282
12283 cnt = 0;
12284 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
12285 fcp_eqidx++) {
12286 eq = phba->sli4_hba.hba_eq[fcp_eqidx];
12287 if (!eq)
12288 continue;
12289 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
12290 eq_delay->u.request.eq[cnt].phase = 0;
12291 eq_delay->u.request.eq[cnt].delay_multi = dmult;
12292 cnt++;
12293 if (cnt >= LPFC_MAX_EQ_DELAY)
12294 break;
12295 }
12296 eq_delay->u.request.num_eq = cnt;
12297
12298 mbox->vport = phba->pport;
12299 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12300 mbox->context1 = NULL;
12301 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12302 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
12303 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12304 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12305 if (shdr_status || shdr_add_status || rc) {
12306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12307 "2512 MODIFY_EQ_DELAY mailbox failed with "
12308 "status x%x add_status x%x, mbx status x%x\n",
12309 shdr_status, shdr_add_status, rc);
12310 status = -ENXIO;
12311 }
12312 mempool_free(mbox, phba->mbox_mem_pool);
12313 return status;
12314 }
12315
12316 /**
12317 * lpfc_eq_create - Create an Event Queue on the HBA
12318 * @phba: HBA structure that indicates port to create a queue on.
12319 * @eq: The queue structure to use to create the event queue.
12320 * @imax: The maximum interrupt per second limit.
12321 *
12322 * This function creates an event queue, as detailed in @eq, on a port,
12323 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
12324 *
12325 * The @phba struct is used to send mailbox command to HBA. The @eq struct
12326 * is used to get the entry count and entry size that are necessary to
12327 * determine the number of pages to allocate and use for this queue. This
12328 * function will send the EQ_CREATE mailbox command to the HBA to setup the
12329 * event queue. This function is asynchronous and will wait for the mailbox
12330 * command to finish before continuing.
12331 *
12332 * On success this function will return a zero. If unable to allocate enough
12333 * memory this function will return -ENOMEM. If the queue create mailbox command
12334 * fails this function will return -ENXIO.
12335 **/
12336 uint32_t
lpfc_eq_create(struct lpfc_hba * phba,struct lpfc_queue * eq,uint32_t imax)12337 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
12338 {
12339 struct lpfc_mbx_eq_create *eq_create;
12340 LPFC_MBOXQ_t *mbox;
12341 int rc, length, status = 0;
12342 struct lpfc_dmabuf *dmabuf;
12343 uint32_t shdr_status, shdr_add_status;
12344 union lpfc_sli4_cfg_shdr *shdr;
12345 uint16_t dmult;
12346 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12347
12348 /* sanity check on queue memory */
12349 if (!eq)
12350 return -ENODEV;
12351 if (!phba->sli4_hba.pc_sli4_params.supported)
12352 hw_page_size = SLI4_PAGE_SIZE;
12353
12354 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12355 if (!mbox)
12356 return -ENOMEM;
12357 length = (sizeof(struct lpfc_mbx_eq_create) -
12358 sizeof(struct lpfc_sli4_cfg_mhdr));
12359 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12360 LPFC_MBOX_OPCODE_EQ_CREATE,
12361 length, LPFC_SLI4_MBX_EMBED);
12362 eq_create = &mbox->u.mqe.un.eq_create;
12363 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
12364 eq->page_count);
12365 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
12366 LPFC_EQE_SIZE);
12367 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
12368 /* Calculate delay multiper from maximum interrupt per second */
12369 if (imax > LPFC_DMULT_CONST)
12370 dmult = 0;
12371 else
12372 dmult = LPFC_DMULT_CONST/imax - 1;
12373 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
12374 dmult);
12375 switch (eq->entry_count) {
12376 default:
12377 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12378 "0360 Unsupported EQ count. (%d)\n",
12379 eq->entry_count);
12380 if (eq->entry_count < 256)
12381 return -EINVAL;
12382 /* otherwise default to smallest count (drop through) */
12383 case 256:
12384 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12385 LPFC_EQ_CNT_256);
12386 break;
12387 case 512:
12388 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12389 LPFC_EQ_CNT_512);
12390 break;
12391 case 1024:
12392 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12393 LPFC_EQ_CNT_1024);
12394 break;
12395 case 2048:
12396 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12397 LPFC_EQ_CNT_2048);
12398 break;
12399 case 4096:
12400 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
12401 LPFC_EQ_CNT_4096);
12402 break;
12403 }
12404 list_for_each_entry(dmabuf, &eq->page_list, list) {
12405 memset(dmabuf->virt, 0, hw_page_size);
12406 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12407 putPaddrLow(dmabuf->phys);
12408 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12409 putPaddrHigh(dmabuf->phys);
12410 }
12411 mbox->vport = phba->pport;
12412 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12413 mbox->context1 = NULL;
12414 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12415 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
12416 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12417 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12418 if (shdr_status || shdr_add_status || rc) {
12419 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12420 "2500 EQ_CREATE mailbox failed with "
12421 "status x%x add_status x%x, mbx status x%x\n",
12422 shdr_status, shdr_add_status, rc);
12423 status = -ENXIO;
12424 }
12425 eq->type = LPFC_EQ;
12426 eq->subtype = LPFC_NONE;
12427 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
12428 if (eq->queue_id == 0xFFFF)
12429 status = -ENXIO;
12430 eq->host_index = 0;
12431 eq->hba_index = 0;
12432
12433 mempool_free(mbox, phba->mbox_mem_pool);
12434 return status;
12435 }
12436
12437 /**
12438 * lpfc_cq_create - Create a Completion Queue on the HBA
12439 * @phba: HBA structure that indicates port to create a queue on.
12440 * @cq: The queue structure to use to create the completion queue.
12441 * @eq: The event queue to bind this completion queue to.
12442 *
12443 * This function creates a completion queue, as detailed in @wq, on a port,
12444 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
12445 *
12446 * The @phba struct is used to send mailbox command to HBA. The @cq struct
12447 * is used to get the entry count and entry size that are necessary to
12448 * determine the number of pages to allocate and use for this queue. The @eq
12449 * is used to indicate which event queue to bind this completion queue to. This
12450 * function will send the CQ_CREATE mailbox command to the HBA to setup the
12451 * completion queue. This function is asynchronous and will wait for the mailbox
12452 * command to finish before continuing.
12453 *
12454 * On success this function will return a zero. If unable to allocate enough
12455 * memory this function will return -ENOMEM. If the queue create mailbox command
12456 * fails this function will return -ENXIO.
12457 **/
12458 uint32_t
lpfc_cq_create(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_queue * eq,uint32_t type,uint32_t subtype)12459 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
12460 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
12461 {
12462 struct lpfc_mbx_cq_create *cq_create;
12463 struct lpfc_dmabuf *dmabuf;
12464 LPFC_MBOXQ_t *mbox;
12465 int rc, length, status = 0;
12466 uint32_t shdr_status, shdr_add_status;
12467 union lpfc_sli4_cfg_shdr *shdr;
12468 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12469
12470 /* sanity check on queue memory */
12471 if (!cq || !eq)
12472 return -ENODEV;
12473 if (!phba->sli4_hba.pc_sli4_params.supported)
12474 hw_page_size = SLI4_PAGE_SIZE;
12475
12476 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12477 if (!mbox)
12478 return -ENOMEM;
12479 length = (sizeof(struct lpfc_mbx_cq_create) -
12480 sizeof(struct lpfc_sli4_cfg_mhdr));
12481 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12482 LPFC_MBOX_OPCODE_CQ_CREATE,
12483 length, LPFC_SLI4_MBX_EMBED);
12484 cq_create = &mbox->u.mqe.un.cq_create;
12485 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
12486 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
12487 cq->page_count);
12488 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
12489 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
12490 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12491 phba->sli4_hba.pc_sli4_params.cqv);
12492 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
12493 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
12494 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
12495 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
12496 eq->queue_id);
12497 } else {
12498 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
12499 eq->queue_id);
12500 }
12501 switch (cq->entry_count) {
12502 default:
12503 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12504 "0361 Unsupported CQ count. (%d)\n",
12505 cq->entry_count);
12506 if (cq->entry_count < 256) {
12507 status = -EINVAL;
12508 goto out;
12509 }
12510 /* otherwise default to smallest count (drop through) */
12511 case 256:
12512 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12513 LPFC_CQ_CNT_256);
12514 break;
12515 case 512:
12516 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12517 LPFC_CQ_CNT_512);
12518 break;
12519 case 1024:
12520 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
12521 LPFC_CQ_CNT_1024);
12522 break;
12523 }
12524 list_for_each_entry(dmabuf, &cq->page_list, list) {
12525 memset(dmabuf->virt, 0, hw_page_size);
12526 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12527 putPaddrLow(dmabuf->phys);
12528 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12529 putPaddrHigh(dmabuf->phys);
12530 }
12531 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12532
12533 /* The IOCTL status is embedded in the mailbox subheader. */
12534 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12535 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12536 if (shdr_status || shdr_add_status || rc) {
12537 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12538 "2501 CQ_CREATE mailbox failed with "
12539 "status x%x add_status x%x, mbx status x%x\n",
12540 shdr_status, shdr_add_status, rc);
12541 status = -ENXIO;
12542 goto out;
12543 }
12544 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
12545 if (cq->queue_id == 0xFFFF) {
12546 status = -ENXIO;
12547 goto out;
12548 }
12549 /* link the cq onto the parent eq child list */
12550 list_add_tail(&cq->list, &eq->child_list);
12551 /* Set up completion queue's type and subtype */
12552 cq->type = type;
12553 cq->subtype = subtype;
12554 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
12555 cq->assoc_qid = eq->queue_id;
12556 cq->host_index = 0;
12557 cq->hba_index = 0;
12558
12559 out:
12560 mempool_free(mbox, phba->mbox_mem_pool);
12561 return status;
12562 }
12563
12564 /**
12565 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
12566 * @phba: HBA structure that indicates port to create a queue on.
12567 * @mq: The queue structure to use to create the mailbox queue.
12568 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
12569 * @cq: The completion queue to associate with this cq.
12570 *
12571 * This function provides failback (fb) functionality when the
12572 * mq_create_ext fails on older FW generations. It's purpose is identical
12573 * to mq_create_ext otherwise.
12574 *
12575 * This routine cannot fail as all attributes were previously accessed and
12576 * initialized in mq_create_ext.
12577 **/
12578 static void
lpfc_mq_create_fb_init(struct lpfc_hba * phba,struct lpfc_queue * mq,LPFC_MBOXQ_t * mbox,struct lpfc_queue * cq)12579 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
12580 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
12581 {
12582 struct lpfc_mbx_mq_create *mq_create;
12583 struct lpfc_dmabuf *dmabuf;
12584 int length;
12585
12586 length = (sizeof(struct lpfc_mbx_mq_create) -
12587 sizeof(struct lpfc_sli4_cfg_mhdr));
12588 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12589 LPFC_MBOX_OPCODE_MQ_CREATE,
12590 length, LPFC_SLI4_MBX_EMBED);
12591 mq_create = &mbox->u.mqe.un.mq_create;
12592 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
12593 mq->page_count);
12594 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
12595 cq->queue_id);
12596 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
12597 switch (mq->entry_count) {
12598 case 16:
12599 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12600 LPFC_MQ_RING_SIZE_16);
12601 break;
12602 case 32:
12603 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12604 LPFC_MQ_RING_SIZE_32);
12605 break;
12606 case 64:
12607 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12608 LPFC_MQ_RING_SIZE_64);
12609 break;
12610 case 128:
12611 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
12612 LPFC_MQ_RING_SIZE_128);
12613 break;
12614 }
12615 list_for_each_entry(dmabuf, &mq->page_list, list) {
12616 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12617 putPaddrLow(dmabuf->phys);
12618 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12619 putPaddrHigh(dmabuf->phys);
12620 }
12621 }
12622
12623 /**
12624 * lpfc_mq_create - Create a mailbox Queue on the HBA
12625 * @phba: HBA structure that indicates port to create a queue on.
12626 * @mq: The queue structure to use to create the mailbox queue.
12627 * @cq: The completion queue to associate with this cq.
12628 * @subtype: The queue's subtype.
12629 *
12630 * This function creates a mailbox queue, as detailed in @mq, on a port,
12631 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
12632 *
12633 * The @phba struct is used to send mailbox command to HBA. The @cq struct
12634 * is used to get the entry count and entry size that are necessary to
12635 * determine the number of pages to allocate and use for this queue. This
12636 * function will send the MQ_CREATE mailbox command to the HBA to setup the
12637 * mailbox queue. This function is asynchronous and will wait for the mailbox
12638 * command to finish before continuing.
12639 *
12640 * On success this function will return a zero. If unable to allocate enough
12641 * memory this function will return -ENOMEM. If the queue create mailbox command
12642 * fails this function will return -ENXIO.
12643 **/
12644 int32_t
lpfc_mq_create(struct lpfc_hba * phba,struct lpfc_queue * mq,struct lpfc_queue * cq,uint32_t subtype)12645 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
12646 struct lpfc_queue *cq, uint32_t subtype)
12647 {
12648 struct lpfc_mbx_mq_create *mq_create;
12649 struct lpfc_mbx_mq_create_ext *mq_create_ext;
12650 struct lpfc_dmabuf *dmabuf;
12651 LPFC_MBOXQ_t *mbox;
12652 int rc, length, status = 0;
12653 uint32_t shdr_status, shdr_add_status;
12654 union lpfc_sli4_cfg_shdr *shdr;
12655 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12656
12657 /* sanity check on queue memory */
12658 if (!mq || !cq)
12659 return -ENODEV;
12660 if (!phba->sli4_hba.pc_sli4_params.supported)
12661 hw_page_size = SLI4_PAGE_SIZE;
12662
12663 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12664 if (!mbox)
12665 return -ENOMEM;
12666 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
12667 sizeof(struct lpfc_sli4_cfg_mhdr));
12668 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12669 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
12670 length, LPFC_SLI4_MBX_EMBED);
12671
12672 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
12673 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
12674 bf_set(lpfc_mbx_mq_create_ext_num_pages,
12675 &mq_create_ext->u.request, mq->page_count);
12676 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
12677 &mq_create_ext->u.request, 1);
12678 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
12679 &mq_create_ext->u.request, 1);
12680 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
12681 &mq_create_ext->u.request, 1);
12682 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
12683 &mq_create_ext->u.request, 1);
12684 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
12685 &mq_create_ext->u.request, 1);
12686 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
12687 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12688 phba->sli4_hba.pc_sli4_params.mqv);
12689 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
12690 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
12691 cq->queue_id);
12692 else
12693 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
12694 cq->queue_id);
12695 switch (mq->entry_count) {
12696 default:
12697 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12698 "0362 Unsupported MQ count. (%d)\n",
12699 mq->entry_count);
12700 if (mq->entry_count < 16) {
12701 status = -EINVAL;
12702 goto out;
12703 }
12704 /* otherwise default to smallest count (drop through) */
12705 case 16:
12706 bf_set(lpfc_mq_context_ring_size,
12707 &mq_create_ext->u.request.context,
12708 LPFC_MQ_RING_SIZE_16);
12709 break;
12710 case 32:
12711 bf_set(lpfc_mq_context_ring_size,
12712 &mq_create_ext->u.request.context,
12713 LPFC_MQ_RING_SIZE_32);
12714 break;
12715 case 64:
12716 bf_set(lpfc_mq_context_ring_size,
12717 &mq_create_ext->u.request.context,
12718 LPFC_MQ_RING_SIZE_64);
12719 break;
12720 case 128:
12721 bf_set(lpfc_mq_context_ring_size,
12722 &mq_create_ext->u.request.context,
12723 LPFC_MQ_RING_SIZE_128);
12724 break;
12725 }
12726 list_for_each_entry(dmabuf, &mq->page_list, list) {
12727 memset(dmabuf->virt, 0, hw_page_size);
12728 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
12729 putPaddrLow(dmabuf->phys);
12730 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
12731 putPaddrHigh(dmabuf->phys);
12732 }
12733 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12734 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
12735 &mq_create_ext->u.response);
12736 if (rc != MBX_SUCCESS) {
12737 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12738 "2795 MQ_CREATE_EXT failed with "
12739 "status x%x. Failback to MQ_CREATE.\n",
12740 rc);
12741 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
12742 mq_create = &mbox->u.mqe.un.mq_create;
12743 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12744 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
12745 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
12746 &mq_create->u.response);
12747 }
12748
12749 /* The IOCTL status is embedded in the mailbox subheader. */
12750 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12751 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12752 if (shdr_status || shdr_add_status || rc) {
12753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12754 "2502 MQ_CREATE mailbox failed with "
12755 "status x%x add_status x%x, mbx status x%x\n",
12756 shdr_status, shdr_add_status, rc);
12757 status = -ENXIO;
12758 goto out;
12759 }
12760 if (mq->queue_id == 0xFFFF) {
12761 status = -ENXIO;
12762 goto out;
12763 }
12764 mq->type = LPFC_MQ;
12765 mq->assoc_qid = cq->queue_id;
12766 mq->subtype = subtype;
12767 mq->host_index = 0;
12768 mq->hba_index = 0;
12769
12770 /* link the mq onto the parent cq child list */
12771 list_add_tail(&mq->list, &cq->child_list);
12772 out:
12773 mempool_free(mbox, phba->mbox_mem_pool);
12774 return status;
12775 }
12776
12777 /**
12778 * lpfc_wq_create - Create a Work Queue on the HBA
12779 * @phba: HBA structure that indicates port to create a queue on.
12780 * @wq: The queue structure to use to create the work queue.
12781 * @cq: The completion queue to bind this work queue to.
12782 * @subtype: The subtype of the work queue indicating its functionality.
12783 *
12784 * This function creates a work queue, as detailed in @wq, on a port, described
12785 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
12786 *
12787 * The @phba struct is used to send mailbox command to HBA. The @wq struct
12788 * is used to get the entry count and entry size that are necessary to
12789 * determine the number of pages to allocate and use for this queue. The @cq
12790 * is used to indicate which completion queue to bind this work queue to. This
12791 * function will send the WQ_CREATE mailbox command to the HBA to setup the
12792 * work queue. This function is asynchronous and will wait for the mailbox
12793 * command to finish before continuing.
12794 *
12795 * On success this function will return a zero. If unable to allocate enough
12796 * memory this function will return -ENOMEM. If the queue create mailbox command
12797 * fails this function will return -ENXIO.
12798 **/
12799 uint32_t
lpfc_wq_create(struct lpfc_hba * phba,struct lpfc_queue * wq,struct lpfc_queue * cq,uint32_t subtype)12800 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12801 struct lpfc_queue *cq, uint32_t subtype)
12802 {
12803 struct lpfc_mbx_wq_create *wq_create;
12804 struct lpfc_dmabuf *dmabuf;
12805 LPFC_MBOXQ_t *mbox;
12806 int rc, length, status = 0;
12807 uint32_t shdr_status, shdr_add_status;
12808 union lpfc_sli4_cfg_shdr *shdr;
12809 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12810 struct dma_address *page;
12811 void __iomem *bar_memmap_p;
12812 uint32_t db_offset;
12813 uint16_t pci_barset;
12814
12815 /* sanity check on queue memory */
12816 if (!wq || !cq)
12817 return -ENODEV;
12818 if (!phba->sli4_hba.pc_sli4_params.supported)
12819 hw_page_size = SLI4_PAGE_SIZE;
12820
12821 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12822 if (!mbox)
12823 return -ENOMEM;
12824 length = (sizeof(struct lpfc_mbx_wq_create) -
12825 sizeof(struct lpfc_sli4_cfg_mhdr));
12826 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12827 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
12828 length, LPFC_SLI4_MBX_EMBED);
12829 wq_create = &mbox->u.mqe.un.wq_create;
12830 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
12831 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
12832 wq->page_count);
12833 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
12834 cq->queue_id);
12835 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12836 phba->sli4_hba.pc_sli4_params.wqv);
12837
12838 if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
12839 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
12840 wq->entry_count);
12841 switch (wq->entry_size) {
12842 default:
12843 case 64:
12844 bf_set(lpfc_mbx_wq_create_wqe_size,
12845 &wq_create->u.request_1,
12846 LPFC_WQ_WQE_SIZE_64);
12847 break;
12848 case 128:
12849 bf_set(lpfc_mbx_wq_create_wqe_size,
12850 &wq_create->u.request_1,
12851 LPFC_WQ_WQE_SIZE_128);
12852 break;
12853 }
12854 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
12855 (PAGE_SIZE/SLI4_PAGE_SIZE));
12856 page = wq_create->u.request_1.page;
12857 } else {
12858 page = wq_create->u.request.page;
12859 }
12860 list_for_each_entry(dmabuf, &wq->page_list, list) {
12861 memset(dmabuf->virt, 0, hw_page_size);
12862 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
12863 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
12864 }
12865
12866 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
12867 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
12868
12869 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12870 /* The IOCTL status is embedded in the mailbox subheader. */
12871 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12872 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12873 if (shdr_status || shdr_add_status || rc) {
12874 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12875 "2503 WQ_CREATE mailbox failed with "
12876 "status x%x add_status x%x, mbx status x%x\n",
12877 shdr_status, shdr_add_status, rc);
12878 status = -ENXIO;
12879 goto out;
12880 }
12881 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
12882 if (wq->queue_id == 0xFFFF) {
12883 status = -ENXIO;
12884 goto out;
12885 }
12886 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
12887 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
12888 &wq_create->u.response);
12889 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
12890 (wq->db_format != LPFC_DB_RING_FORMAT)) {
12891 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12892 "3265 WQ[%d] doorbell format not "
12893 "supported: x%x\n", wq->queue_id,
12894 wq->db_format);
12895 status = -EINVAL;
12896 goto out;
12897 }
12898 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
12899 &wq_create->u.response);
12900 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
12901 if (!bar_memmap_p) {
12902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12903 "3263 WQ[%d] failed to memmap pci "
12904 "barset:x%x\n", wq->queue_id,
12905 pci_barset);
12906 status = -ENOMEM;
12907 goto out;
12908 }
12909 db_offset = wq_create->u.response.doorbell_offset;
12910 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
12911 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
12912 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12913 "3252 WQ[%d] doorbell offset not "
12914 "supported: x%x\n", wq->queue_id,
12915 db_offset);
12916 status = -EINVAL;
12917 goto out;
12918 }
12919 wq->db_regaddr = bar_memmap_p + db_offset;
12920 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12921 "3264 WQ[%d]: barset:x%x, offset:x%x, "
12922 "format:x%x\n", wq->queue_id, pci_barset,
12923 db_offset, wq->db_format);
12924 } else {
12925 wq->db_format = LPFC_DB_LIST_FORMAT;
12926 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
12927 }
12928 wq->type = LPFC_WQ;
12929 wq->assoc_qid = cq->queue_id;
12930 wq->subtype = subtype;
12931 wq->host_index = 0;
12932 wq->hba_index = 0;
12933 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
12934
12935 /* link the wq onto the parent cq child list */
12936 list_add_tail(&wq->list, &cq->child_list);
12937 out:
12938 mempool_free(mbox, phba->mbox_mem_pool);
12939 return status;
12940 }
12941
12942 /**
12943 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
12944 * @phba: HBA structure that indicates port to create a queue on.
12945 * @rq: The queue structure to use for the receive queue.
12946 * @qno: The associated HBQ number
12947 *
12948 *
12949 * For SLI4 we need to adjust the RQ repost value based on
12950 * the number of buffers that are initially posted to the RQ.
12951 */
12952 void
lpfc_rq_adjust_repost(struct lpfc_hba * phba,struct lpfc_queue * rq,int qno)12953 lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
12954 {
12955 uint32_t cnt;
12956
12957 /* sanity check on queue memory */
12958 if (!rq)
12959 return;
12960 cnt = lpfc_hbq_defs[qno]->entry_count;
12961
12962 /* Recalc repost for RQs based on buffers initially posted */
12963 cnt = (cnt >> 3);
12964 if (cnt < LPFC_QUEUE_MIN_REPOST)
12965 cnt = LPFC_QUEUE_MIN_REPOST;
12966
12967 rq->entry_repost = cnt;
12968 }
12969
12970 /**
12971 * lpfc_rq_create - Create a Receive Queue on the HBA
12972 * @phba: HBA structure that indicates port to create a queue on.
12973 * @hrq: The queue structure to use to create the header receive queue.
12974 * @drq: The queue structure to use to create the data receive queue.
12975 * @cq: The completion queue to bind this work queue to.
12976 *
12977 * This function creates a receive buffer queue pair , as detailed in @hrq and
12978 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
12979 * to the HBA.
12980 *
12981 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
12982 * struct is used to get the entry count that is necessary to determine the
12983 * number of pages to use for this queue. The @cq is used to indicate which
12984 * completion queue to bind received buffers that are posted to these queues to.
12985 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
12986 * receive queue pair. This function is asynchronous and will wait for the
12987 * mailbox command to finish before continuing.
12988 *
12989 * On success this function will return a zero. If unable to allocate enough
12990 * memory this function will return -ENOMEM. If the queue create mailbox command
12991 * fails this function will return -ENXIO.
12992 **/
12993 uint32_t
lpfc_rq_create(struct lpfc_hba * phba,struct lpfc_queue * hrq,struct lpfc_queue * drq,struct lpfc_queue * cq,uint32_t subtype)12994 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12995 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
12996 {
12997 struct lpfc_mbx_rq_create *rq_create;
12998 struct lpfc_dmabuf *dmabuf;
12999 LPFC_MBOXQ_t *mbox;
13000 int rc, length, status = 0;
13001 uint32_t shdr_status, shdr_add_status;
13002 union lpfc_sli4_cfg_shdr *shdr;
13003 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13004 void __iomem *bar_memmap_p;
13005 uint32_t db_offset;
13006 uint16_t pci_barset;
13007
13008 /* sanity check on queue memory */
13009 if (!hrq || !drq || !cq)
13010 return -ENODEV;
13011 if (!phba->sli4_hba.pc_sli4_params.supported)
13012 hw_page_size = SLI4_PAGE_SIZE;
13013
13014 if (hrq->entry_count != drq->entry_count)
13015 return -EINVAL;
13016 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13017 if (!mbox)
13018 return -ENOMEM;
13019 length = (sizeof(struct lpfc_mbx_rq_create) -
13020 sizeof(struct lpfc_sli4_cfg_mhdr));
13021 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13022 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
13023 length, LPFC_SLI4_MBX_EMBED);
13024 rq_create = &mbox->u.mqe.un.rq_create;
13025 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
13026 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13027 phba->sli4_hba.pc_sli4_params.rqv);
13028 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
13029 bf_set(lpfc_rq_context_rqe_count_1,
13030 &rq_create->u.request.context,
13031 hrq->entry_count);
13032 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
13033 bf_set(lpfc_rq_context_rqe_size,
13034 &rq_create->u.request.context,
13035 LPFC_RQE_SIZE_8);
13036 bf_set(lpfc_rq_context_page_size,
13037 &rq_create->u.request.context,
13038 (PAGE_SIZE/SLI4_PAGE_SIZE));
13039 } else {
13040 switch (hrq->entry_count) {
13041 default:
13042 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13043 "2535 Unsupported RQ count. (%d)\n",
13044 hrq->entry_count);
13045 if (hrq->entry_count < 512) {
13046 status = -EINVAL;
13047 goto out;
13048 }
13049 /* otherwise default to smallest count (drop through) */
13050 case 512:
13051 bf_set(lpfc_rq_context_rqe_count,
13052 &rq_create->u.request.context,
13053 LPFC_RQ_RING_SIZE_512);
13054 break;
13055 case 1024:
13056 bf_set(lpfc_rq_context_rqe_count,
13057 &rq_create->u.request.context,
13058 LPFC_RQ_RING_SIZE_1024);
13059 break;
13060 case 2048:
13061 bf_set(lpfc_rq_context_rqe_count,
13062 &rq_create->u.request.context,
13063 LPFC_RQ_RING_SIZE_2048);
13064 break;
13065 case 4096:
13066 bf_set(lpfc_rq_context_rqe_count,
13067 &rq_create->u.request.context,
13068 LPFC_RQ_RING_SIZE_4096);
13069 break;
13070 }
13071 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
13072 LPFC_HDR_BUF_SIZE);
13073 }
13074 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
13075 cq->queue_id);
13076 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
13077 hrq->page_count);
13078 list_for_each_entry(dmabuf, &hrq->page_list, list) {
13079 memset(dmabuf->virt, 0, hw_page_size);
13080 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13081 putPaddrLow(dmabuf->phys);
13082 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13083 putPaddrHigh(dmabuf->phys);
13084 }
13085 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13086 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
13087
13088 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13089 /* The IOCTL status is embedded in the mailbox subheader. */
13090 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13091 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13092 if (shdr_status || shdr_add_status || rc) {
13093 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13094 "2504 RQ_CREATE mailbox failed with "
13095 "status x%x add_status x%x, mbx status x%x\n",
13096 shdr_status, shdr_add_status, rc);
13097 status = -ENXIO;
13098 goto out;
13099 }
13100 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
13101 if (hrq->queue_id == 0xFFFF) {
13102 status = -ENXIO;
13103 goto out;
13104 }
13105
13106 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
13107 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
13108 &rq_create->u.response);
13109 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
13110 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
13111 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13112 "3262 RQ [%d] doorbell format not "
13113 "supported: x%x\n", hrq->queue_id,
13114 hrq->db_format);
13115 status = -EINVAL;
13116 goto out;
13117 }
13118
13119 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
13120 &rq_create->u.response);
13121 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
13122 if (!bar_memmap_p) {
13123 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13124 "3269 RQ[%d] failed to memmap pci "
13125 "barset:x%x\n", hrq->queue_id,
13126 pci_barset);
13127 status = -ENOMEM;
13128 goto out;
13129 }
13130
13131 db_offset = rq_create->u.response.doorbell_offset;
13132 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
13133 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
13134 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13135 "3270 RQ[%d] doorbell offset not "
13136 "supported: x%x\n", hrq->queue_id,
13137 db_offset);
13138 status = -EINVAL;
13139 goto out;
13140 }
13141 hrq->db_regaddr = bar_memmap_p + db_offset;
13142 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13143 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
13144 "format:x%x\n", hrq->queue_id, pci_barset,
13145 db_offset, hrq->db_format);
13146 } else {
13147 hrq->db_format = LPFC_DB_RING_FORMAT;
13148 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
13149 }
13150 hrq->type = LPFC_HRQ;
13151 hrq->assoc_qid = cq->queue_id;
13152 hrq->subtype = subtype;
13153 hrq->host_index = 0;
13154 hrq->hba_index = 0;
13155
13156 /* now create the data queue */
13157 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13158 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
13159 length, LPFC_SLI4_MBX_EMBED);
13160 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13161 phba->sli4_hba.pc_sli4_params.rqv);
13162 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
13163 bf_set(lpfc_rq_context_rqe_count_1,
13164 &rq_create->u.request.context, hrq->entry_count);
13165 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
13166 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
13167 LPFC_RQE_SIZE_8);
13168 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
13169 (PAGE_SIZE/SLI4_PAGE_SIZE));
13170 } else {
13171 switch (drq->entry_count) {
13172 default:
13173 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13174 "2536 Unsupported RQ count. (%d)\n",
13175 drq->entry_count);
13176 if (drq->entry_count < 512) {
13177 status = -EINVAL;
13178 goto out;
13179 }
13180 /* otherwise default to smallest count (drop through) */
13181 case 512:
13182 bf_set(lpfc_rq_context_rqe_count,
13183 &rq_create->u.request.context,
13184 LPFC_RQ_RING_SIZE_512);
13185 break;
13186 case 1024:
13187 bf_set(lpfc_rq_context_rqe_count,
13188 &rq_create->u.request.context,
13189 LPFC_RQ_RING_SIZE_1024);
13190 break;
13191 case 2048:
13192 bf_set(lpfc_rq_context_rqe_count,
13193 &rq_create->u.request.context,
13194 LPFC_RQ_RING_SIZE_2048);
13195 break;
13196 case 4096:
13197 bf_set(lpfc_rq_context_rqe_count,
13198 &rq_create->u.request.context,
13199 LPFC_RQ_RING_SIZE_4096);
13200 break;
13201 }
13202 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
13203 LPFC_DATA_BUF_SIZE);
13204 }
13205 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
13206 cq->queue_id);
13207 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
13208 drq->page_count);
13209 list_for_each_entry(dmabuf, &drq->page_list, list) {
13210 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13211 putPaddrLow(dmabuf->phys);
13212 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13213 putPaddrHigh(dmabuf->phys);
13214 }
13215 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13216 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
13217 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13218 /* The IOCTL status is embedded in the mailbox subheader. */
13219 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
13220 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13221 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13222 if (shdr_status || shdr_add_status || rc) {
13223 status = -ENXIO;
13224 goto out;
13225 }
13226 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
13227 if (drq->queue_id == 0xFFFF) {
13228 status = -ENXIO;
13229 goto out;
13230 }
13231 drq->type = LPFC_DRQ;
13232 drq->assoc_qid = cq->queue_id;
13233 drq->subtype = subtype;
13234 drq->host_index = 0;
13235 drq->hba_index = 0;
13236
13237 /* link the header and data RQs onto the parent cq child list */
13238 list_add_tail(&hrq->list, &cq->child_list);
13239 list_add_tail(&drq->list, &cq->child_list);
13240
13241 out:
13242 mempool_free(mbox, phba->mbox_mem_pool);
13243 return status;
13244 }
13245
13246 /**
13247 * lpfc_eq_destroy - Destroy an event Queue on the HBA
13248 * @eq: The queue structure associated with the queue to destroy.
13249 *
13250 * This function destroys a queue, as detailed in @eq by sending an mailbox
13251 * command, specific to the type of queue, to the HBA.
13252 *
13253 * The @eq struct is used to get the queue ID of the queue to destroy.
13254 *
13255 * On success this function will return a zero. If the queue destroy mailbox
13256 * command fails this function will return -ENXIO.
13257 **/
13258 uint32_t
lpfc_eq_destroy(struct lpfc_hba * phba,struct lpfc_queue * eq)13259 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
13260 {
13261 LPFC_MBOXQ_t *mbox;
13262 int rc, length, status = 0;
13263 uint32_t shdr_status, shdr_add_status;
13264 union lpfc_sli4_cfg_shdr *shdr;
13265
13266 /* sanity check on queue memory */
13267 if (!eq)
13268 return -ENODEV;
13269 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
13270 if (!mbox)
13271 return -ENOMEM;
13272 length = (sizeof(struct lpfc_mbx_eq_destroy) -
13273 sizeof(struct lpfc_sli4_cfg_mhdr));
13274 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13275 LPFC_MBOX_OPCODE_EQ_DESTROY,
13276 length, LPFC_SLI4_MBX_EMBED);
13277 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
13278 eq->queue_id);
13279 mbox->vport = eq->phba->pport;
13280 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13281
13282 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
13283 /* The IOCTL status is embedded in the mailbox subheader. */
13284 shdr = (union lpfc_sli4_cfg_shdr *)
13285 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
13286 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13287 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13288 if (shdr_status || shdr_add_status || rc) {
13289 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13290 "2505 EQ_DESTROY mailbox failed with "
13291 "status x%x add_status x%x, mbx status x%x\n",
13292 shdr_status, shdr_add_status, rc);
13293 status = -ENXIO;
13294 }
13295
13296 /* Remove eq from any list */
13297 list_del_init(&eq->list);
13298 mempool_free(mbox, eq->phba->mbox_mem_pool);
13299 return status;
13300 }
13301
13302 /**
13303 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
13304 * @cq: The queue structure associated with the queue to destroy.
13305 *
13306 * This function destroys a queue, as detailed in @cq by sending an mailbox
13307 * command, specific to the type of queue, to the HBA.
13308 *
13309 * The @cq struct is used to get the queue ID of the queue to destroy.
13310 *
13311 * On success this function will return a zero. If the queue destroy mailbox
13312 * command fails this function will return -ENXIO.
13313 **/
13314 uint32_t
lpfc_cq_destroy(struct lpfc_hba * phba,struct lpfc_queue * cq)13315 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
13316 {
13317 LPFC_MBOXQ_t *mbox;
13318 int rc, length, status = 0;
13319 uint32_t shdr_status, shdr_add_status;
13320 union lpfc_sli4_cfg_shdr *shdr;
13321
13322 /* sanity check on queue memory */
13323 if (!cq)
13324 return -ENODEV;
13325 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
13326 if (!mbox)
13327 return -ENOMEM;
13328 length = (sizeof(struct lpfc_mbx_cq_destroy) -
13329 sizeof(struct lpfc_sli4_cfg_mhdr));
13330 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13331 LPFC_MBOX_OPCODE_CQ_DESTROY,
13332 length, LPFC_SLI4_MBX_EMBED);
13333 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
13334 cq->queue_id);
13335 mbox->vport = cq->phba->pport;
13336 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13337 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
13338 /* The IOCTL status is embedded in the mailbox subheader. */
13339 shdr = (union lpfc_sli4_cfg_shdr *)
13340 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
13341 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13342 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13343 if (shdr_status || shdr_add_status || rc) {
13344 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13345 "2506 CQ_DESTROY mailbox failed with "
13346 "status x%x add_status x%x, mbx status x%x\n",
13347 shdr_status, shdr_add_status, rc);
13348 status = -ENXIO;
13349 }
13350 /* Remove cq from any list */
13351 list_del_init(&cq->list);
13352 mempool_free(mbox, cq->phba->mbox_mem_pool);
13353 return status;
13354 }
13355
13356 /**
13357 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
13358 * @qm: The queue structure associated with the queue to destroy.
13359 *
13360 * This function destroys a queue, as detailed in @mq by sending an mailbox
13361 * command, specific to the type of queue, to the HBA.
13362 *
13363 * The @mq struct is used to get the queue ID of the queue to destroy.
13364 *
13365 * On success this function will return a zero. If the queue destroy mailbox
13366 * command fails this function will return -ENXIO.
13367 **/
13368 uint32_t
lpfc_mq_destroy(struct lpfc_hba * phba,struct lpfc_queue * mq)13369 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
13370 {
13371 LPFC_MBOXQ_t *mbox;
13372 int rc, length, status = 0;
13373 uint32_t shdr_status, shdr_add_status;
13374 union lpfc_sli4_cfg_shdr *shdr;
13375
13376 /* sanity check on queue memory */
13377 if (!mq)
13378 return -ENODEV;
13379 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
13380 if (!mbox)
13381 return -ENOMEM;
13382 length = (sizeof(struct lpfc_mbx_mq_destroy) -
13383 sizeof(struct lpfc_sli4_cfg_mhdr));
13384 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13385 LPFC_MBOX_OPCODE_MQ_DESTROY,
13386 length, LPFC_SLI4_MBX_EMBED);
13387 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
13388 mq->queue_id);
13389 mbox->vport = mq->phba->pport;
13390 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13391 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
13392 /* The IOCTL status is embedded in the mailbox subheader. */
13393 shdr = (union lpfc_sli4_cfg_shdr *)
13394 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
13395 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13396 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13397 if (shdr_status || shdr_add_status || rc) {
13398 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13399 "2507 MQ_DESTROY mailbox failed with "
13400 "status x%x add_status x%x, mbx status x%x\n",
13401 shdr_status, shdr_add_status, rc);
13402 status = -ENXIO;
13403 }
13404 /* Remove mq from any list */
13405 list_del_init(&mq->list);
13406 mempool_free(mbox, mq->phba->mbox_mem_pool);
13407 return status;
13408 }
13409
13410 /**
13411 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
13412 * @wq: The queue structure associated with the queue to destroy.
13413 *
13414 * This function destroys a queue, as detailed in @wq by sending an mailbox
13415 * command, specific to the type of queue, to the HBA.
13416 *
13417 * The @wq struct is used to get the queue ID of the queue to destroy.
13418 *
13419 * On success this function will return a zero. If the queue destroy mailbox
13420 * command fails this function will return -ENXIO.
13421 **/
13422 uint32_t
lpfc_wq_destroy(struct lpfc_hba * phba,struct lpfc_queue * wq)13423 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
13424 {
13425 LPFC_MBOXQ_t *mbox;
13426 int rc, length, status = 0;
13427 uint32_t shdr_status, shdr_add_status;
13428 union lpfc_sli4_cfg_shdr *shdr;
13429
13430 /* sanity check on queue memory */
13431 if (!wq)
13432 return -ENODEV;
13433 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
13434 if (!mbox)
13435 return -ENOMEM;
13436 length = (sizeof(struct lpfc_mbx_wq_destroy) -
13437 sizeof(struct lpfc_sli4_cfg_mhdr));
13438 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13439 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
13440 length, LPFC_SLI4_MBX_EMBED);
13441 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
13442 wq->queue_id);
13443 mbox->vport = wq->phba->pport;
13444 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13445 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
13446 shdr = (union lpfc_sli4_cfg_shdr *)
13447 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
13448 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13449 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13450 if (shdr_status || shdr_add_status || rc) {
13451 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13452 "2508 WQ_DESTROY mailbox failed with "
13453 "status x%x add_status x%x, mbx status x%x\n",
13454 shdr_status, shdr_add_status, rc);
13455 status = -ENXIO;
13456 }
13457 /* Remove wq from any list */
13458 list_del_init(&wq->list);
13459 mempool_free(mbox, wq->phba->mbox_mem_pool);
13460 return status;
13461 }
13462
13463 /**
13464 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
13465 * @rq: The queue structure associated with the queue to destroy.
13466 *
13467 * This function destroys a queue, as detailed in @rq by sending an mailbox
13468 * command, specific to the type of queue, to the HBA.
13469 *
13470 * The @rq struct is used to get the queue ID of the queue to destroy.
13471 *
13472 * On success this function will return a zero. If the queue destroy mailbox
13473 * command fails this function will return -ENXIO.
13474 **/
13475 uint32_t
lpfc_rq_destroy(struct lpfc_hba * phba,struct lpfc_queue * hrq,struct lpfc_queue * drq)13476 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
13477 struct lpfc_queue *drq)
13478 {
13479 LPFC_MBOXQ_t *mbox;
13480 int rc, length, status = 0;
13481 uint32_t shdr_status, shdr_add_status;
13482 union lpfc_sli4_cfg_shdr *shdr;
13483
13484 /* sanity check on queue memory */
13485 if (!hrq || !drq)
13486 return -ENODEV;
13487 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
13488 if (!mbox)
13489 return -ENOMEM;
13490 length = (sizeof(struct lpfc_mbx_rq_destroy) -
13491 sizeof(struct lpfc_sli4_cfg_mhdr));
13492 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13493 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
13494 length, LPFC_SLI4_MBX_EMBED);
13495 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
13496 hrq->queue_id);
13497 mbox->vport = hrq->phba->pport;
13498 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13499 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
13500 /* The IOCTL status is embedded in the mailbox subheader. */
13501 shdr = (union lpfc_sli4_cfg_shdr *)
13502 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
13503 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13504 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13505 if (shdr_status || shdr_add_status || rc) {
13506 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13507 "2509 RQ_DESTROY mailbox failed with "
13508 "status x%x add_status x%x, mbx status x%x\n",
13509 shdr_status, shdr_add_status, rc);
13510 if (rc != MBX_TIMEOUT)
13511 mempool_free(mbox, hrq->phba->mbox_mem_pool);
13512 return -ENXIO;
13513 }
13514 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
13515 drq->queue_id);
13516 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
13517 shdr = (union lpfc_sli4_cfg_shdr *)
13518 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
13519 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13520 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13521 if (shdr_status || shdr_add_status || rc) {
13522 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13523 "2510 RQ_DESTROY mailbox failed with "
13524 "status x%x add_status x%x, mbx status x%x\n",
13525 shdr_status, shdr_add_status, rc);
13526 status = -ENXIO;
13527 }
13528 list_del_init(&hrq->list);
13529 list_del_init(&drq->list);
13530 mempool_free(mbox, hrq->phba->mbox_mem_pool);
13531 return status;
13532 }
13533
13534 /**
13535 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
13536 * @phba: The virtual port for which this call being executed.
13537 * @pdma_phys_addr0: Physical address of the 1st SGL page.
13538 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
13539 * @xritag: the xritag that ties this io to the SGL pages.
13540 *
13541 * This routine will post the sgl pages for the IO that has the xritag
13542 * that is in the iocbq structure. The xritag is assigned during iocbq
13543 * creation and persists for as long as the driver is loaded.
13544 * if the caller has fewer than 256 scatter gather segments to map then
13545 * pdma_phys_addr1 should be 0.
13546 * If the caller needs to map more than 256 scatter gather segment then
13547 * pdma_phys_addr1 should be a valid physical address.
13548 * physical address for SGLs must be 64 byte aligned.
13549 * If you are going to map 2 SGL's then the first one must have 256 entries
13550 * the second sgl can have between 1 and 256 entries.
13551 *
13552 * Return codes:
13553 * 0 - Success
13554 * -ENXIO, -ENOMEM - Failure
13555 **/
13556 int
lpfc_sli4_post_sgl(struct lpfc_hba * phba,dma_addr_t pdma_phys_addr0,dma_addr_t pdma_phys_addr1,uint16_t xritag)13557 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
13558 dma_addr_t pdma_phys_addr0,
13559 dma_addr_t pdma_phys_addr1,
13560 uint16_t xritag)
13561 {
13562 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
13563 LPFC_MBOXQ_t *mbox;
13564 int rc;
13565 uint32_t shdr_status, shdr_add_status;
13566 uint32_t mbox_tmo;
13567 union lpfc_sli4_cfg_shdr *shdr;
13568
13569 if (xritag == NO_XRI) {
13570 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13571 "0364 Invalid param:\n");
13572 return -EINVAL;
13573 }
13574
13575 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13576 if (!mbox)
13577 return -ENOMEM;
13578
13579 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13580 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
13581 sizeof(struct lpfc_mbx_post_sgl_pages) -
13582 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
13583
13584 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
13585 &mbox->u.mqe.un.post_sgl_pages;
13586 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
13587 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
13588
13589 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
13590 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
13591 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
13592 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
13593
13594 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
13595 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
13596 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
13597 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
13598 if (!phba->sli4_hba.intr_enable)
13599 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13600 else {
13601 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13602 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13603 }
13604 /* The IOCTL status is embedded in the mailbox subheader. */
13605 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
13606 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13607 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13608 if (rc != MBX_TIMEOUT)
13609 mempool_free(mbox, phba->mbox_mem_pool);
13610 if (shdr_status || shdr_add_status || rc) {
13611 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13612 "2511 POST_SGL mailbox failed with "
13613 "status x%x add_status x%x, mbx status x%x\n",
13614 shdr_status, shdr_add_status, rc);
13615 rc = -ENXIO;
13616 }
13617 return 0;
13618 }
13619
13620 /**
13621 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
13622 * @phba: pointer to lpfc hba data structure.
13623 *
13624 * This routine is invoked to post rpi header templates to the
13625 * HBA consistent with the SLI-4 interface spec. This routine
13626 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
13627 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
13628 *
13629 * Returns
13630 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
13631 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
13632 **/
13633 uint16_t
lpfc_sli4_alloc_xri(struct lpfc_hba * phba)13634 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
13635 {
13636 unsigned long xri;
13637
13638 /*
13639 * Fetch the next logical xri. Because this index is logical,
13640 * the driver starts at 0 each time.
13641 */
13642 spin_lock_irq(&phba->hbalock);
13643 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
13644 phba->sli4_hba.max_cfg_param.max_xri, 0);
13645 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
13646 spin_unlock_irq(&phba->hbalock);
13647 return NO_XRI;
13648 } else {
13649 set_bit(xri, phba->sli4_hba.xri_bmask);
13650 phba->sli4_hba.max_cfg_param.xri_used++;
13651 }
13652 spin_unlock_irq(&phba->hbalock);
13653 return xri;
13654 }
13655
13656 /**
13657 * lpfc_sli4_free_xri - Release an xri for reuse.
13658 * @phba: pointer to lpfc hba data structure.
13659 *
13660 * This routine is invoked to release an xri to the pool of
13661 * available rpis maintained by the driver.
13662 **/
13663 void
__lpfc_sli4_free_xri(struct lpfc_hba * phba,int xri)13664 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
13665 {
13666 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
13667 phba->sli4_hba.max_cfg_param.xri_used--;
13668 }
13669 }
13670
13671 /**
13672 * lpfc_sli4_free_xri - Release an xri for reuse.
13673 * @phba: pointer to lpfc hba data structure.
13674 *
13675 * This routine is invoked to release an xri to the pool of
13676 * available rpis maintained by the driver.
13677 **/
13678 void
lpfc_sli4_free_xri(struct lpfc_hba * phba,int xri)13679 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
13680 {
13681 spin_lock_irq(&phba->hbalock);
13682 __lpfc_sli4_free_xri(phba, xri);
13683 spin_unlock_irq(&phba->hbalock);
13684 }
13685
13686 /**
13687 * lpfc_sli4_next_xritag - Get an xritag for the io
13688 * @phba: Pointer to HBA context object.
13689 *
13690 * This function gets an xritag for the iocb. If there is no unused xritag
13691 * it will return 0xffff.
13692 * The function returns the allocated xritag if successful, else returns zero.
13693 * Zero is not a valid xritag.
13694 * The caller is not required to hold any lock.
13695 **/
13696 uint16_t
lpfc_sli4_next_xritag(struct lpfc_hba * phba)13697 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
13698 {
13699 uint16_t xri_index;
13700
13701 xri_index = lpfc_sli4_alloc_xri(phba);
13702 if (xri_index == NO_XRI)
13703 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13704 "2004 Failed to allocate XRI.last XRITAG is %d"
13705 " Max XRI is %d, Used XRI is %d\n",
13706 xri_index,
13707 phba->sli4_hba.max_cfg_param.max_xri,
13708 phba->sli4_hba.max_cfg_param.xri_used);
13709 return xri_index;
13710 }
13711
13712 /**
13713 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
13714 * @phba: pointer to lpfc hba data structure.
13715 * @post_sgl_list: pointer to els sgl entry list.
13716 * @count: number of els sgl entries on the list.
13717 *
13718 * This routine is invoked to post a block of driver's sgl pages to the
13719 * HBA using non-embedded mailbox command. No Lock is held. This routine
13720 * is only called when the driver is loading and after all IO has been
13721 * stopped.
13722 **/
13723 static int
lpfc_sli4_post_els_sgl_list(struct lpfc_hba * phba,struct list_head * post_sgl_list,int post_cnt)13724 lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
13725 struct list_head *post_sgl_list,
13726 int post_cnt)
13727 {
13728 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
13729 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13730 struct sgl_page_pairs *sgl_pg_pairs;
13731 void *viraddr;
13732 LPFC_MBOXQ_t *mbox;
13733 uint32_t reqlen, alloclen, pg_pairs;
13734 uint32_t mbox_tmo;
13735 uint16_t xritag_start = 0;
13736 int rc = 0;
13737 uint32_t shdr_status, shdr_add_status;
13738 union lpfc_sli4_cfg_shdr *shdr;
13739
13740 reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
13741 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13742 if (reqlen > SLI4_PAGE_SIZE) {
13743 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13744 "2559 Block sgl registration required DMA "
13745 "size (%d) great than a page\n", reqlen);
13746 return -ENOMEM;
13747 }
13748 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13749 if (!mbox)
13750 return -ENOMEM;
13751
13752 /* Allocate DMA memory and set up the non-embedded mailbox command */
13753 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13754 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
13755 LPFC_SLI4_MBX_NEMBED);
13756
13757 if (alloclen < reqlen) {
13758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13759 "0285 Allocated DMA memory size (%d) is "
13760 "less than the requested DMA memory "
13761 "size (%d)\n", alloclen, reqlen);
13762 lpfc_sli4_mbox_cmd_free(phba, mbox);
13763 return -ENOMEM;
13764 }
13765 /* Set up the SGL pages in the non-embedded DMA pages */
13766 viraddr = mbox->sge_array->addr[0];
13767 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13768 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13769
13770 pg_pairs = 0;
13771 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
13772 /* Set up the sge entry */
13773 sgl_pg_pairs->sgl_pg0_addr_lo =
13774 cpu_to_le32(putPaddrLow(sglq_entry->phys));
13775 sgl_pg_pairs->sgl_pg0_addr_hi =
13776 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
13777 sgl_pg_pairs->sgl_pg1_addr_lo =
13778 cpu_to_le32(putPaddrLow(0));
13779 sgl_pg_pairs->sgl_pg1_addr_hi =
13780 cpu_to_le32(putPaddrHigh(0));
13781
13782 /* Keep the first xritag on the list */
13783 if (pg_pairs == 0)
13784 xritag_start = sglq_entry->sli4_xritag;
13785 sgl_pg_pairs++;
13786 pg_pairs++;
13787 }
13788
13789 /* Complete initialization and perform endian conversion. */
13790 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13791 bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
13792 sgl->word0 = cpu_to_le32(sgl->word0);
13793 if (!phba->sli4_hba.intr_enable)
13794 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13795 else {
13796 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13797 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13798 }
13799 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13800 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13801 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13802 if (rc != MBX_TIMEOUT)
13803 lpfc_sli4_mbox_cmd_free(phba, mbox);
13804 if (shdr_status || shdr_add_status || rc) {
13805 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13806 "2513 POST_SGL_BLOCK mailbox command failed "
13807 "status x%x add_status x%x mbx status x%x\n",
13808 shdr_status, shdr_add_status, rc);
13809 rc = -ENXIO;
13810 }
13811 return rc;
13812 }
13813
13814 /**
13815 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
13816 * @phba: pointer to lpfc hba data structure.
13817 * @sblist: pointer to scsi buffer list.
13818 * @count: number of scsi buffers on the list.
13819 *
13820 * This routine is invoked to post a block of @count scsi sgl pages from a
13821 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
13822 * No Lock is held.
13823 *
13824 **/
13825 int
lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba * phba,struct list_head * sblist,int count)13826 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
13827 struct list_head *sblist,
13828 int count)
13829 {
13830 struct lpfc_scsi_buf *psb;
13831 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13832 struct sgl_page_pairs *sgl_pg_pairs;
13833 void *viraddr;
13834 LPFC_MBOXQ_t *mbox;
13835 uint32_t reqlen, alloclen, pg_pairs;
13836 uint32_t mbox_tmo;
13837 uint16_t xritag_start = 0;
13838 int rc = 0;
13839 uint32_t shdr_status, shdr_add_status;
13840 dma_addr_t pdma_phys_bpl1;
13841 union lpfc_sli4_cfg_shdr *shdr;
13842
13843 /* Calculate the requested length of the dma memory */
13844 reqlen = count * sizeof(struct sgl_page_pairs) +
13845 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13846 if (reqlen > SLI4_PAGE_SIZE) {
13847 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13848 "0217 Block sgl registration required DMA "
13849 "size (%d) great than a page\n", reqlen);
13850 return -ENOMEM;
13851 }
13852 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13853 if (!mbox) {
13854 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13855 "0283 Failed to allocate mbox cmd memory\n");
13856 return -ENOMEM;
13857 }
13858
13859 /* Allocate DMA memory and set up the non-embedded mailbox command */
13860 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13861 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
13862 LPFC_SLI4_MBX_NEMBED);
13863
13864 if (alloclen < reqlen) {
13865 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13866 "2561 Allocated DMA memory size (%d) is "
13867 "less than the requested DMA memory "
13868 "size (%d)\n", alloclen, reqlen);
13869 lpfc_sli4_mbox_cmd_free(phba, mbox);
13870 return -ENOMEM;
13871 }
13872
13873 /* Get the first SGE entry from the non-embedded DMA memory */
13874 viraddr = mbox->sge_array->addr[0];
13875
13876 /* Set up the SGL pages in the non-embedded DMA pages */
13877 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13878 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13879
13880 pg_pairs = 0;
13881 list_for_each_entry(psb, sblist, list) {
13882 /* Set up the sge entry */
13883 sgl_pg_pairs->sgl_pg0_addr_lo =
13884 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
13885 sgl_pg_pairs->sgl_pg0_addr_hi =
13886 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
13887 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
13888 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
13889 else
13890 pdma_phys_bpl1 = 0;
13891 sgl_pg_pairs->sgl_pg1_addr_lo =
13892 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
13893 sgl_pg_pairs->sgl_pg1_addr_hi =
13894 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
13895 /* Keep the first xritag on the list */
13896 if (pg_pairs == 0)
13897 xritag_start = psb->cur_iocbq.sli4_xritag;
13898 sgl_pg_pairs++;
13899 pg_pairs++;
13900 }
13901 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13902 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
13903 /* Perform endian conversion if necessary */
13904 sgl->word0 = cpu_to_le32(sgl->word0);
13905
13906 if (!phba->sli4_hba.intr_enable)
13907 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13908 else {
13909 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13910 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13911 }
13912 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13913 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13914 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13915 if (rc != MBX_TIMEOUT)
13916 lpfc_sli4_mbox_cmd_free(phba, mbox);
13917 if (shdr_status || shdr_add_status || rc) {
13918 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13919 "2564 POST_SGL_BLOCK mailbox command failed "
13920 "status x%x add_status x%x mbx status x%x\n",
13921 shdr_status, shdr_add_status, rc);
13922 rc = -ENXIO;
13923 }
13924 return rc;
13925 }
13926
13927 /**
13928 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
13929 * @phba: pointer to lpfc_hba struct that the frame was received on
13930 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13931 *
13932 * This function checks the fields in the @fc_hdr to see if the FC frame is a
13933 * valid type of frame that the LPFC driver will handle. This function will
13934 * return a zero if the frame is a valid frame or a non zero value when the
13935 * frame does not pass the check.
13936 **/
13937 static int
lpfc_fc_frame_check(struct lpfc_hba * phba,struct fc_frame_header * fc_hdr)13938 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
13939 {
13940 /* make rctl_names static to save stack space */
13941 static char *rctl_names[] = FC_RCTL_NAMES_INIT;
13942 char *type_names[] = FC_TYPE_NAMES_INIT;
13943 struct fc_vft_header *fc_vft_hdr;
13944 uint32_t *header = (uint32_t *) fc_hdr;
13945
13946 switch (fc_hdr->fh_r_ctl) {
13947 case FC_RCTL_DD_UNCAT: /* uncategorized information */
13948 case FC_RCTL_DD_SOL_DATA: /* solicited data */
13949 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
13950 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
13951 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
13952 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
13953 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
13954 case FC_RCTL_DD_CMD_STATUS: /* command status */
13955 case FC_RCTL_ELS_REQ: /* extended link services request */
13956 case FC_RCTL_ELS_REP: /* extended link services reply */
13957 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
13958 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
13959 case FC_RCTL_BA_NOP: /* basic link service NOP */
13960 case FC_RCTL_BA_ABTS: /* basic link service abort */
13961 case FC_RCTL_BA_RMC: /* remove connection */
13962 case FC_RCTL_BA_ACC: /* basic accept */
13963 case FC_RCTL_BA_RJT: /* basic reject */
13964 case FC_RCTL_BA_PRMT:
13965 case FC_RCTL_ACK_1: /* acknowledge_1 */
13966 case FC_RCTL_ACK_0: /* acknowledge_0 */
13967 case FC_RCTL_P_RJT: /* port reject */
13968 case FC_RCTL_F_RJT: /* fabric reject */
13969 case FC_RCTL_P_BSY: /* port busy */
13970 case FC_RCTL_F_BSY: /* fabric busy to data frame */
13971 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
13972 case FC_RCTL_LCR: /* link credit reset */
13973 case FC_RCTL_END: /* end */
13974 break;
13975 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
13976 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
13977 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
13978 return lpfc_fc_frame_check(phba, fc_hdr);
13979 default:
13980 goto drop;
13981 }
13982 switch (fc_hdr->fh_type) {
13983 case FC_TYPE_BLS:
13984 case FC_TYPE_ELS:
13985 case FC_TYPE_FCP:
13986 case FC_TYPE_CT:
13987 break;
13988 case FC_TYPE_IP:
13989 case FC_TYPE_ILS:
13990 default:
13991 goto drop;
13992 }
13993
13994 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
13995 "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
13996 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
13997 rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
13998 type_names[fc_hdr->fh_type], fc_hdr->fh_type,
13999 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
14000 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
14001 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
14002 be32_to_cpu(header[6]));
14003 return 0;
14004 drop:
14005 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
14006 "2539 Dropped frame rctl:%s type:%s\n",
14007 rctl_names[fc_hdr->fh_r_ctl],
14008 type_names[fc_hdr->fh_type]);
14009 return 1;
14010 }
14011
14012 /**
14013 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
14014 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
14015 *
14016 * This function processes the FC header to retrieve the VFI from the VF
14017 * header, if one exists. This function will return the VFI if one exists
14018 * or 0 if no VSAN Header exists.
14019 **/
14020 static uint32_t
lpfc_fc_hdr_get_vfi(struct fc_frame_header * fc_hdr)14021 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
14022 {
14023 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
14024
14025 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
14026 return 0;
14027 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
14028 }
14029
14030 /**
14031 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
14032 * @phba: Pointer to the HBA structure to search for the vport on
14033 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
14034 * @fcfi: The FC Fabric ID that the frame came from
14035 *
14036 * This function searches the @phba for a vport that matches the content of the
14037 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
14038 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
14039 * returns the matching vport pointer or NULL if unable to match frame to a
14040 * vport.
14041 **/
14042 static struct lpfc_vport *
lpfc_fc_frame_to_vport(struct lpfc_hba * phba,struct fc_frame_header * fc_hdr,uint16_t fcfi)14043 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
14044 uint16_t fcfi)
14045 {
14046 struct lpfc_vport **vports;
14047 struct lpfc_vport *vport = NULL;
14048 int i;
14049 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
14050 fc_hdr->fh_d_id[1] << 8 |
14051 fc_hdr->fh_d_id[2]);
14052
14053 if (did == Fabric_DID)
14054 return phba->pport;
14055 if ((phba->pport->fc_flag & FC_PT2PT) &&
14056 !(phba->link_state == LPFC_HBA_READY))
14057 return phba->pport;
14058
14059 vports = lpfc_create_vport_work_array(phba);
14060 if (vports != NULL)
14061 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
14062 if (phba->fcf.fcfi == fcfi &&
14063 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
14064 vports[i]->fc_myDID == did) {
14065 vport = vports[i];
14066 break;
14067 }
14068 }
14069 lpfc_destroy_vport_work_array(phba, vports);
14070 return vport;
14071 }
14072
14073 /**
14074 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
14075 * @vport: The vport to work on.
14076 *
14077 * This function updates the receive sequence time stamp for this vport. The
14078 * receive sequence time stamp indicates the time that the last frame of the
14079 * the sequence that has been idle for the longest amount of time was received.
14080 * the driver uses this time stamp to indicate if any received sequences have
14081 * timed out.
14082 **/
14083 void
lpfc_update_rcv_time_stamp(struct lpfc_vport * vport)14084 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
14085 {
14086 struct lpfc_dmabuf *h_buf;
14087 struct hbq_dmabuf *dmabuf = NULL;
14088
14089 /* get the oldest sequence on the rcv list */
14090 h_buf = list_get_first(&vport->rcv_buffer_list,
14091 struct lpfc_dmabuf, list);
14092 if (!h_buf)
14093 return;
14094 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14095 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
14096 }
14097
14098 /**
14099 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
14100 * @vport: The vport that the received sequences were sent to.
14101 *
14102 * This function cleans up all outstanding received sequences. This is called
14103 * by the driver when a link event or user action invalidates all the received
14104 * sequences.
14105 **/
14106 void
lpfc_cleanup_rcv_buffers(struct lpfc_vport * vport)14107 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
14108 {
14109 struct lpfc_dmabuf *h_buf, *hnext;
14110 struct lpfc_dmabuf *d_buf, *dnext;
14111 struct hbq_dmabuf *dmabuf = NULL;
14112
14113 /* start with the oldest sequence on the rcv list */
14114 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
14115 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14116 list_del_init(&dmabuf->hbuf.list);
14117 list_for_each_entry_safe(d_buf, dnext,
14118 &dmabuf->dbuf.list, list) {
14119 list_del_init(&d_buf->list);
14120 lpfc_in_buf_free(vport->phba, d_buf);
14121 }
14122 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
14123 }
14124 }
14125
14126 /**
14127 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
14128 * @vport: The vport that the received sequences were sent to.
14129 *
14130 * This function determines whether any received sequences have timed out by
14131 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
14132 * indicates that there is at least one timed out sequence this routine will
14133 * go through the received sequences one at a time from most inactive to most
14134 * active to determine which ones need to be cleaned up. Once it has determined
14135 * that a sequence needs to be cleaned up it will simply free up the resources
14136 * without sending an abort.
14137 **/
14138 void
lpfc_rcv_seq_check_edtov(struct lpfc_vport * vport)14139 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
14140 {
14141 struct lpfc_dmabuf *h_buf, *hnext;
14142 struct lpfc_dmabuf *d_buf, *dnext;
14143 struct hbq_dmabuf *dmabuf = NULL;
14144 unsigned long timeout;
14145 int abort_count = 0;
14146
14147 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
14148 vport->rcv_buffer_time_stamp);
14149 if (list_empty(&vport->rcv_buffer_list) ||
14150 time_before(jiffies, timeout))
14151 return;
14152 /* start with the oldest sequence on the rcv list */
14153 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
14154 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14155 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
14156 dmabuf->time_stamp);
14157 if (time_before(jiffies, timeout))
14158 break;
14159 abort_count++;
14160 list_del_init(&dmabuf->hbuf.list);
14161 list_for_each_entry_safe(d_buf, dnext,
14162 &dmabuf->dbuf.list, list) {
14163 list_del_init(&d_buf->list);
14164 lpfc_in_buf_free(vport->phba, d_buf);
14165 }
14166 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
14167 }
14168 if (abort_count)
14169 lpfc_update_rcv_time_stamp(vport);
14170 }
14171
14172 /**
14173 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
14174 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
14175 *
14176 * This function searches through the existing incomplete sequences that have
14177 * been sent to this @vport. If the frame matches one of the incomplete
14178 * sequences then the dbuf in the @dmabuf is added to the list of frames that
14179 * make up that sequence. If no sequence is found that matches this frame then
14180 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
14181 * This function returns a pointer to the first dmabuf in the sequence list that
14182 * the frame was linked to.
14183 **/
14184 static struct hbq_dmabuf *
lpfc_fc_frame_add(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)14185 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
14186 {
14187 struct fc_frame_header *new_hdr;
14188 struct fc_frame_header *temp_hdr;
14189 struct lpfc_dmabuf *d_buf;
14190 struct lpfc_dmabuf *h_buf;
14191 struct hbq_dmabuf *seq_dmabuf = NULL;
14192 struct hbq_dmabuf *temp_dmabuf = NULL;
14193
14194 INIT_LIST_HEAD(&dmabuf->dbuf.list);
14195 dmabuf->time_stamp = jiffies;
14196 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14197 /* Use the hdr_buf to find the sequence that this frame belongs to */
14198 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
14199 temp_hdr = (struct fc_frame_header *)h_buf->virt;
14200 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
14201 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
14202 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
14203 continue;
14204 /* found a pending sequence that matches this frame */
14205 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14206 break;
14207 }
14208 if (!seq_dmabuf) {
14209 /*
14210 * This indicates first frame received for this sequence.
14211 * Queue the buffer on the vport's rcv_buffer_list.
14212 */
14213 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
14214 lpfc_update_rcv_time_stamp(vport);
14215 return dmabuf;
14216 }
14217 temp_hdr = seq_dmabuf->hbuf.virt;
14218 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
14219 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
14220 list_del_init(&seq_dmabuf->hbuf.list);
14221 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
14222 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
14223 lpfc_update_rcv_time_stamp(vport);
14224 return dmabuf;
14225 }
14226 /* move this sequence to the tail to indicate a young sequence */
14227 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
14228 seq_dmabuf->time_stamp = jiffies;
14229 lpfc_update_rcv_time_stamp(vport);
14230 if (list_empty(&seq_dmabuf->dbuf.list)) {
14231 temp_hdr = dmabuf->hbuf.virt;
14232 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
14233 return seq_dmabuf;
14234 }
14235 /* find the correct place in the sequence to insert this frame */
14236 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
14237 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14238 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
14239 /*
14240 * If the frame's sequence count is greater than the frame on
14241 * the list then insert the frame right after this frame
14242 */
14243 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
14244 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
14245 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
14246 return seq_dmabuf;
14247 }
14248 }
14249 return NULL;
14250 }
14251
14252 /**
14253 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
14254 * @vport: pointer to a vitural port
14255 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14256 *
14257 * This function tries to abort from the partially assembed sequence, described
14258 * by the information from basic abbort @dmabuf. It checks to see whether such
14259 * partially assembled sequence held by the driver. If so, it shall free up all
14260 * the frames from the partially assembled sequence.
14261 *
14262 * Return
14263 * true -- if there is matching partially assembled sequence present and all
14264 * the frames freed with the sequence;
14265 * false -- if there is no matching partially assembled sequence present so
14266 * nothing got aborted in the lower layer driver
14267 **/
14268 static bool
lpfc_sli4_abort_partial_seq(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)14269 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
14270 struct hbq_dmabuf *dmabuf)
14271 {
14272 struct fc_frame_header *new_hdr;
14273 struct fc_frame_header *temp_hdr;
14274 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
14275 struct hbq_dmabuf *seq_dmabuf = NULL;
14276
14277 /* Use the hdr_buf to find the sequence that matches this frame */
14278 INIT_LIST_HEAD(&dmabuf->dbuf.list);
14279 INIT_LIST_HEAD(&dmabuf->hbuf.list);
14280 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14281 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
14282 temp_hdr = (struct fc_frame_header *)h_buf->virt;
14283 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
14284 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
14285 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
14286 continue;
14287 /* found a pending sequence that matches this frame */
14288 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14289 break;
14290 }
14291
14292 /* Free up all the frames from the partially assembled sequence */
14293 if (seq_dmabuf) {
14294 list_for_each_entry_safe(d_buf, n_buf,
14295 &seq_dmabuf->dbuf.list, list) {
14296 list_del_init(&d_buf->list);
14297 lpfc_in_buf_free(vport->phba, d_buf);
14298 }
14299 return true;
14300 }
14301 return false;
14302 }
14303
14304 /**
14305 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
14306 * @vport: pointer to a vitural port
14307 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14308 *
14309 * This function tries to abort from the assembed sequence from upper level
14310 * protocol, described by the information from basic abbort @dmabuf. It
14311 * checks to see whether such pending context exists at upper level protocol.
14312 * If so, it shall clean up the pending context.
14313 *
14314 * Return
14315 * true -- if there is matching pending context of the sequence cleaned
14316 * at ulp;
14317 * false -- if there is no matching pending context of the sequence present
14318 * at ulp.
14319 **/
14320 static bool
lpfc_sli4_abort_ulp_seq(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)14321 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
14322 {
14323 struct lpfc_hba *phba = vport->phba;
14324 int handled;
14325
14326 /* Accepting abort at ulp with SLI4 only */
14327 if (phba->sli_rev < LPFC_SLI_REV4)
14328 return false;
14329
14330 /* Register all caring upper level protocols to attend abort */
14331 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
14332 if (handled)
14333 return true;
14334
14335 return false;
14336 }
14337
14338 /**
14339 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
14340 * @phba: Pointer to HBA context object.
14341 * @cmd_iocbq: pointer to the command iocbq structure.
14342 * @rsp_iocbq: pointer to the response iocbq structure.
14343 *
14344 * This function handles the sequence abort response iocb command complete
14345 * event. It properly releases the memory allocated to the sequence abort
14346 * accept iocb.
14347 **/
14348 static void
lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmd_iocbq,struct lpfc_iocbq * rsp_iocbq)14349 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
14350 struct lpfc_iocbq *cmd_iocbq,
14351 struct lpfc_iocbq *rsp_iocbq)
14352 {
14353 struct lpfc_nodelist *ndlp;
14354
14355 if (cmd_iocbq) {
14356 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
14357 lpfc_nlp_put(ndlp);
14358 lpfc_nlp_not_used(ndlp);
14359 lpfc_sli_release_iocbq(phba, cmd_iocbq);
14360 }
14361
14362 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
14363 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
14364 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14365 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
14366 rsp_iocbq->iocb.ulpStatus,
14367 rsp_iocbq->iocb.un.ulpWord[4]);
14368 }
14369
14370 /**
14371 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
14372 * @phba: Pointer to HBA context object.
14373 * @xri: xri id in transaction.
14374 *
14375 * This function validates the xri maps to the known range of XRIs allocated an
14376 * used by the driver.
14377 **/
14378 uint16_t
lpfc_sli4_xri_inrange(struct lpfc_hba * phba,uint16_t xri)14379 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
14380 uint16_t xri)
14381 {
14382 int i;
14383
14384 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
14385 if (xri == phba->sli4_hba.xri_ids[i])
14386 return i;
14387 }
14388 return NO_XRI;
14389 }
14390
14391 /**
14392 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
14393 * @phba: Pointer to HBA context object.
14394 * @fc_hdr: pointer to a FC frame header.
14395 *
14396 * This function sends a basic response to a previous unsol sequence abort
14397 * event after aborting the sequence handling.
14398 **/
14399 static void
lpfc_sli4_seq_abort_rsp(struct lpfc_vport * vport,struct fc_frame_header * fc_hdr,bool aborted)14400 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
14401 struct fc_frame_header *fc_hdr, bool aborted)
14402 {
14403 struct lpfc_hba *phba = vport->phba;
14404 struct lpfc_iocbq *ctiocb = NULL;
14405 struct lpfc_nodelist *ndlp;
14406 uint16_t oxid, rxid, xri, lxri;
14407 uint32_t sid, fctl;
14408 IOCB_t *icmd;
14409 int rc;
14410
14411 if (!lpfc_is_link_up(phba))
14412 return;
14413
14414 sid = sli4_sid_from_fc_hdr(fc_hdr);
14415 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
14416 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
14417
14418 ndlp = lpfc_findnode_did(vport, sid);
14419 if (!ndlp) {
14420 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
14421 if (!ndlp) {
14422 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
14423 "1268 Failed to allocate ndlp for "
14424 "oxid:x%x SID:x%x\n", oxid, sid);
14425 return;
14426 }
14427 lpfc_nlp_init(vport, ndlp, sid);
14428 /* Put ndlp onto pport node list */
14429 lpfc_enqueue_node(vport, ndlp);
14430 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
14431 /* re-setup ndlp without removing from node list */
14432 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
14433 if (!ndlp) {
14434 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
14435 "3275 Failed to active ndlp found "
14436 "for oxid:x%x SID:x%x\n", oxid, sid);
14437 return;
14438 }
14439 }
14440
14441 /* Allocate buffer for rsp iocb */
14442 ctiocb = lpfc_sli_get_iocbq(phba);
14443 if (!ctiocb)
14444 return;
14445
14446 /* Extract the F_CTL field from FC_HDR */
14447 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
14448
14449 icmd = &ctiocb->iocb;
14450 icmd->un.xseq64.bdl.bdeSize = 0;
14451 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
14452 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
14453 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
14454 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
14455
14456 /* Fill in the rest of iocb fields */
14457 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
14458 icmd->ulpBdeCount = 0;
14459 icmd->ulpLe = 1;
14460 icmd->ulpClass = CLASS3;
14461 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
14462 ctiocb->context1 = lpfc_nlp_get(ndlp);
14463
14464 ctiocb->iocb_cmpl = NULL;
14465 ctiocb->vport = phba->pport;
14466 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
14467 ctiocb->sli4_lxritag = NO_XRI;
14468 ctiocb->sli4_xritag = NO_XRI;
14469
14470 if (fctl & FC_FC_EX_CTX)
14471 /* Exchange responder sent the abort so we
14472 * own the oxid.
14473 */
14474 xri = oxid;
14475 else
14476 xri = rxid;
14477 lxri = lpfc_sli4_xri_inrange(phba, xri);
14478 if (lxri != NO_XRI)
14479 lpfc_set_rrq_active(phba, ndlp, lxri,
14480 (xri == oxid) ? rxid : oxid, 0);
14481 /* For BA_ABTS from exchange responder, if the logical xri with
14482 * the oxid maps to the FCP XRI range, the port no longer has
14483 * that exchange context, send a BLS_RJT. Override the IOCB for
14484 * a BA_RJT.
14485 */
14486 if ((fctl & FC_FC_EX_CTX) &&
14487 (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
14488 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
14489 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
14490 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
14491 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
14492 }
14493
14494 /* If BA_ABTS failed to abort a partially assembled receive sequence,
14495 * the driver no longer has that exchange, send a BLS_RJT. Override
14496 * the IOCB for a BA_RJT.
14497 */
14498 if (aborted == false) {
14499 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
14500 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
14501 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
14502 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
14503 }
14504
14505 if (fctl & FC_FC_EX_CTX) {
14506 /* ABTS sent by responder to CT exchange, construction
14507 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
14508 * field and RX_ID from ABTS for RX_ID field.
14509 */
14510 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
14511 } else {
14512 /* ABTS sent by initiator to CT exchange, construction
14513 * of BA_ACC will need to allocate a new XRI as for the
14514 * XRI_TAG field.
14515 */
14516 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
14517 }
14518 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
14519 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
14520
14521 /* Xmit CT abts response on exchange <xid> */
14522 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
14523 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
14524 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
14525
14526 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
14527 if (rc == IOCB_ERROR) {
14528 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
14529 "2925 Failed to issue CT ABTS RSP x%x on "
14530 "xri x%x, Data x%x\n",
14531 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
14532 phba->link_state);
14533 lpfc_nlp_put(ndlp);
14534 ctiocb->context1 = NULL;
14535 lpfc_sli_release_iocbq(phba, ctiocb);
14536 }
14537 }
14538
14539 /**
14540 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
14541 * @vport: Pointer to the vport on which this sequence was received
14542 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14543 *
14544 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
14545 * receive sequence is only partially assembed by the driver, it shall abort
14546 * the partially assembled frames for the sequence. Otherwise, if the
14547 * unsolicited receive sequence has been completely assembled and passed to
14548 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
14549 * unsolicited sequence has been aborted. After that, it will issue a basic
14550 * accept to accept the abort.
14551 **/
14552 void
lpfc_sli4_handle_unsol_abort(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)14553 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
14554 struct hbq_dmabuf *dmabuf)
14555 {
14556 struct lpfc_hba *phba = vport->phba;
14557 struct fc_frame_header fc_hdr;
14558 uint32_t fctl;
14559 bool aborted;
14560
14561 /* Make a copy of fc_hdr before the dmabuf being released */
14562 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
14563 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
14564
14565 if (fctl & FC_FC_EX_CTX) {
14566 /* ABTS by responder to exchange, no cleanup needed */
14567 aborted = true;
14568 } else {
14569 /* ABTS by initiator to exchange, need to do cleanup */
14570 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
14571 if (aborted == false)
14572 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
14573 }
14574 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14575
14576 /* Respond with BA_ACC or BA_RJT accordingly */
14577 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
14578 }
14579
14580 /**
14581 * lpfc_seq_complete - Indicates if a sequence is complete
14582 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14583 *
14584 * This function checks the sequence, starting with the frame described by
14585 * @dmabuf, to see if all the frames associated with this sequence are present.
14586 * the frames associated with this sequence are linked to the @dmabuf using the
14587 * dbuf list. This function looks for two major things. 1) That the first frame
14588 * has a sequence count of zero. 2) There is a frame with last frame of sequence
14589 * set. 3) That there are no holes in the sequence count. The function will
14590 * return 1 when the sequence is complete, otherwise it will return 0.
14591 **/
14592 static int
lpfc_seq_complete(struct hbq_dmabuf * dmabuf)14593 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
14594 {
14595 struct fc_frame_header *hdr;
14596 struct lpfc_dmabuf *d_buf;
14597 struct hbq_dmabuf *seq_dmabuf;
14598 uint32_t fctl;
14599 int seq_count = 0;
14600
14601 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14602 /* make sure first fame of sequence has a sequence count of zero */
14603 if (hdr->fh_seq_cnt != seq_count)
14604 return 0;
14605 fctl = (hdr->fh_f_ctl[0] << 16 |
14606 hdr->fh_f_ctl[1] << 8 |
14607 hdr->fh_f_ctl[2]);
14608 /* If last frame of sequence we can return success. */
14609 if (fctl & FC_FC_END_SEQ)
14610 return 1;
14611 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
14612 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14613 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14614 /* If there is a hole in the sequence count then fail. */
14615 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
14616 return 0;
14617 fctl = (hdr->fh_f_ctl[0] << 16 |
14618 hdr->fh_f_ctl[1] << 8 |
14619 hdr->fh_f_ctl[2]);
14620 /* If last frame of sequence we can return success. */
14621 if (fctl & FC_FC_END_SEQ)
14622 return 1;
14623 }
14624 return 0;
14625 }
14626
14627 /**
14628 * lpfc_prep_seq - Prep sequence for ULP processing
14629 * @vport: Pointer to the vport on which this sequence was received
14630 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14631 *
14632 * This function takes a sequence, described by a list of frames, and creates
14633 * a list of iocbq structures to describe the sequence. This iocbq list will be
14634 * used to issue to the generic unsolicited sequence handler. This routine
14635 * returns a pointer to the first iocbq in the list. If the function is unable
14636 * to allocate an iocbq then it throw out the received frames that were not
14637 * able to be described and return a pointer to the first iocbq. If unable to
14638 * allocate any iocbqs (including the first) this function will return NULL.
14639 **/
14640 static struct lpfc_iocbq *
lpfc_prep_seq(struct lpfc_vport * vport,struct hbq_dmabuf * seq_dmabuf)14641 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
14642 {
14643 struct hbq_dmabuf *hbq_buf;
14644 struct lpfc_dmabuf *d_buf, *n_buf;
14645 struct lpfc_iocbq *first_iocbq, *iocbq;
14646 struct fc_frame_header *fc_hdr;
14647 uint32_t sid;
14648 uint32_t len, tot_len;
14649 struct ulp_bde64 *pbde;
14650
14651 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14652 /* remove from receive buffer list */
14653 list_del_init(&seq_dmabuf->hbuf.list);
14654 lpfc_update_rcv_time_stamp(vport);
14655 /* get the Remote Port's SID */
14656 sid = sli4_sid_from_fc_hdr(fc_hdr);
14657 tot_len = 0;
14658 /* Get an iocbq struct to fill in. */
14659 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
14660 if (first_iocbq) {
14661 /* Initialize the first IOCB. */
14662 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
14663 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
14664
14665 /* Check FC Header to see what TYPE of frame we are rcv'ing */
14666 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
14667 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
14668 first_iocbq->iocb.un.rcvels.parmRo =
14669 sli4_did_from_fc_hdr(fc_hdr);
14670 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
14671 } else
14672 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
14673 first_iocbq->iocb.ulpContext = NO_XRI;
14674 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
14675 be16_to_cpu(fc_hdr->fh_ox_id);
14676 /* iocbq is prepped for internal consumption. Physical vpi. */
14677 first_iocbq->iocb.unsli3.rcvsli3.vpi =
14678 vport->phba->vpi_ids[vport->vpi];
14679 /* put the first buffer into the first IOCBq */
14680 first_iocbq->context2 = &seq_dmabuf->dbuf;
14681 first_iocbq->context3 = NULL;
14682 first_iocbq->iocb.ulpBdeCount = 1;
14683 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14684 LPFC_DATA_BUF_SIZE;
14685 first_iocbq->iocb.un.rcvels.remoteID = sid;
14686 tot_len = bf_get(lpfc_rcqe_length,
14687 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
14688 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
14689 }
14690 iocbq = first_iocbq;
14691 /*
14692 * Each IOCBq can have two Buffers assigned, so go through the list
14693 * of buffers for this sequence and save two buffers in each IOCBq
14694 */
14695 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
14696 if (!iocbq) {
14697 lpfc_in_buf_free(vport->phba, d_buf);
14698 continue;
14699 }
14700 if (!iocbq->context3) {
14701 iocbq->context3 = d_buf;
14702 iocbq->iocb.ulpBdeCount++;
14703 pbde = (struct ulp_bde64 *)
14704 &iocbq->iocb.unsli3.sli3Words[4];
14705 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
14706
14707 /* We need to get the size out of the right CQE */
14708 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14709 len = bf_get(lpfc_rcqe_length,
14710 &hbq_buf->cq_event.cqe.rcqe_cmpl);
14711 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
14712 tot_len += len;
14713 } else {
14714 iocbq = lpfc_sli_get_iocbq(vport->phba);
14715 if (!iocbq) {
14716 if (first_iocbq) {
14717 first_iocbq->iocb.ulpStatus =
14718 IOSTAT_FCP_RSP_ERROR;
14719 first_iocbq->iocb.un.ulpWord[4] =
14720 IOERR_NO_RESOURCES;
14721 }
14722 lpfc_in_buf_free(vport->phba, d_buf);
14723 continue;
14724 }
14725 iocbq->context2 = d_buf;
14726 iocbq->context3 = NULL;
14727 iocbq->iocb.ulpBdeCount = 1;
14728 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14729 LPFC_DATA_BUF_SIZE;
14730
14731 /* We need to get the size out of the right CQE */
14732 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14733 len = bf_get(lpfc_rcqe_length,
14734 &hbq_buf->cq_event.cqe.rcqe_cmpl);
14735 tot_len += len;
14736 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
14737
14738 iocbq->iocb.un.rcvels.remoteID = sid;
14739 list_add_tail(&iocbq->list, &first_iocbq->list);
14740 }
14741 }
14742 return first_iocbq;
14743 }
14744
14745 static void
lpfc_sli4_send_seq_to_ulp(struct lpfc_vport * vport,struct hbq_dmabuf * seq_dmabuf)14746 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
14747 struct hbq_dmabuf *seq_dmabuf)
14748 {
14749 struct fc_frame_header *fc_hdr;
14750 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
14751 struct lpfc_hba *phba = vport->phba;
14752
14753 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14754 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
14755 if (!iocbq) {
14756 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14757 "2707 Ring %d handler: Failed to allocate "
14758 "iocb Rctl x%x Type x%x received\n",
14759 LPFC_ELS_RING,
14760 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14761 return;
14762 }
14763 if (!lpfc_complete_unsol_iocb(phba,
14764 &phba->sli.ring[LPFC_ELS_RING],
14765 iocbq, fc_hdr->fh_r_ctl,
14766 fc_hdr->fh_type))
14767 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14768 "2540 Ring %d handler: unexpected Rctl "
14769 "x%x Type x%x received\n",
14770 LPFC_ELS_RING,
14771 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14772
14773 /* Free iocb created in lpfc_prep_seq */
14774 list_for_each_entry_safe(curr_iocb, next_iocb,
14775 &iocbq->list, list) {
14776 list_del_init(&curr_iocb->list);
14777 lpfc_sli_release_iocbq(phba, curr_iocb);
14778 }
14779 lpfc_sli_release_iocbq(phba, iocbq);
14780 }
14781
14782 /**
14783 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
14784 * @phba: Pointer to HBA context object.
14785 *
14786 * This function is called with no lock held. This function processes all
14787 * the received buffers and gives it to upper layers when a received buffer
14788 * indicates that it is the final frame in the sequence. The interrupt
14789 * service routine processes received buffers at interrupt contexts and adds
14790 * received dma buffers to the rb_pend_list queue and signals the worker thread.
14791 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
14792 * appropriate receive function when the final frame in a sequence is received.
14793 **/
14794 void
lpfc_sli4_handle_received_buffer(struct lpfc_hba * phba,struct hbq_dmabuf * dmabuf)14795 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
14796 struct hbq_dmabuf *dmabuf)
14797 {
14798 struct hbq_dmabuf *seq_dmabuf;
14799 struct fc_frame_header *fc_hdr;
14800 struct lpfc_vport *vport;
14801 uint32_t fcfi;
14802 uint32_t did;
14803
14804 /* Process each received buffer */
14805 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14806 /* check to see if this a valid type of frame */
14807 if (lpfc_fc_frame_check(phba, fc_hdr)) {
14808 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14809 return;
14810 }
14811 if ((bf_get(lpfc_cqe_code,
14812 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
14813 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
14814 &dmabuf->cq_event.cqe.rcqe_cmpl);
14815 else
14816 fcfi = bf_get(lpfc_rcqe_fcf_id,
14817 &dmabuf->cq_event.cqe.rcqe_cmpl);
14818
14819 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
14820 if (!vport) {
14821 /* throw out the frame */
14822 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14823 return;
14824 }
14825
14826 /* d_id this frame is directed to */
14827 did = sli4_did_from_fc_hdr(fc_hdr);
14828
14829 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
14830 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
14831 (did != Fabric_DID)) {
14832 /*
14833 * Throw out the frame if we are not pt2pt.
14834 * The pt2pt protocol allows for discovery frames
14835 * to be received without a registered VPI.
14836 */
14837 if (!(vport->fc_flag & FC_PT2PT) ||
14838 (phba->link_state == LPFC_HBA_READY)) {
14839 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14840 return;
14841 }
14842 }
14843
14844 /* Handle the basic abort sequence (BA_ABTS) event */
14845 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
14846 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
14847 return;
14848 }
14849
14850 /* Link this frame */
14851 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
14852 if (!seq_dmabuf) {
14853 /* unable to add frame to vport - throw it out */
14854 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14855 return;
14856 }
14857 /* If not last frame in sequence continue processing frames. */
14858 if (!lpfc_seq_complete(seq_dmabuf))
14859 return;
14860
14861 /* Send the complete sequence to the upper layer protocol */
14862 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
14863 }
14864
14865 /**
14866 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
14867 * @phba: pointer to lpfc hba data structure.
14868 *
14869 * This routine is invoked to post rpi header templates to the
14870 * HBA consistent with the SLI-4 interface spec. This routine
14871 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14872 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
14873 *
14874 * This routine does not require any locks. It's usage is expected
14875 * to be driver load or reset recovery when the driver is
14876 * sequential.
14877 *
14878 * Return codes
14879 * 0 - successful
14880 * -EIO - The mailbox failed to complete successfully.
14881 * When this error occurs, the driver is not guaranteed
14882 * to have any rpi regions posted to the device and
14883 * must either attempt to repost the regions or take a
14884 * fatal error.
14885 **/
14886 int
lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba * phba)14887 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
14888 {
14889 struct lpfc_rpi_hdr *rpi_page;
14890 uint32_t rc = 0;
14891 uint16_t lrpi = 0;
14892
14893 /* SLI4 ports that support extents do not require RPI headers. */
14894 if (!phba->sli4_hba.rpi_hdrs_in_use)
14895 goto exit;
14896 if (phba->sli4_hba.extents_in_use)
14897 return -EIO;
14898
14899 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
14900 /*
14901 * Assign the rpi headers a physical rpi only if the driver
14902 * has not initialized those resources. A port reset only
14903 * needs the headers posted.
14904 */
14905 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
14906 LPFC_RPI_RSRC_RDY)
14907 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
14908
14909 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
14910 if (rc != MBX_SUCCESS) {
14911 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14912 "2008 Error %d posting all rpi "
14913 "headers\n", rc);
14914 rc = -EIO;
14915 break;
14916 }
14917 }
14918
14919 exit:
14920 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
14921 LPFC_RPI_RSRC_RDY);
14922 return rc;
14923 }
14924
14925 /**
14926 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
14927 * @phba: pointer to lpfc hba data structure.
14928 * @rpi_page: pointer to the rpi memory region.
14929 *
14930 * This routine is invoked to post a single rpi header to the
14931 * HBA consistent with the SLI-4 interface spec. This memory region
14932 * maps up to 64 rpi context regions.
14933 *
14934 * Return codes
14935 * 0 - successful
14936 * -ENOMEM - No available memory
14937 * -EIO - The mailbox failed to complete successfully.
14938 **/
14939 int
lpfc_sli4_post_rpi_hdr(struct lpfc_hba * phba,struct lpfc_rpi_hdr * rpi_page)14940 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
14941 {
14942 LPFC_MBOXQ_t *mboxq;
14943 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
14944 uint32_t rc = 0;
14945 uint32_t shdr_status, shdr_add_status;
14946 union lpfc_sli4_cfg_shdr *shdr;
14947
14948 /* SLI4 ports that support extents do not require RPI headers. */
14949 if (!phba->sli4_hba.rpi_hdrs_in_use)
14950 return rc;
14951 if (phba->sli4_hba.extents_in_use)
14952 return -EIO;
14953
14954 /* The port is notified of the header region via a mailbox command. */
14955 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14956 if (!mboxq) {
14957 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14958 "2001 Unable to allocate memory for issuing "
14959 "SLI_CONFIG_SPECIAL mailbox command\n");
14960 return -ENOMEM;
14961 }
14962
14963 /* Post all rpi memory regions to the port. */
14964 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
14965 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
14966 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
14967 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
14968 sizeof(struct lpfc_sli4_cfg_mhdr),
14969 LPFC_SLI4_MBX_EMBED);
14970
14971
14972 /* Post the physical rpi to the port for this rpi header. */
14973 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
14974 rpi_page->start_rpi);
14975 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
14976 hdr_tmpl, rpi_page->page_count);
14977
14978 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
14979 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
14980 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
14981 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
14982 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14983 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14984 if (rc != MBX_TIMEOUT)
14985 mempool_free(mboxq, phba->mbox_mem_pool);
14986 if (shdr_status || shdr_add_status || rc) {
14987 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14988 "2514 POST_RPI_HDR mailbox failed with "
14989 "status x%x add_status x%x, mbx status x%x\n",
14990 shdr_status, shdr_add_status, rc);
14991 rc = -ENXIO;
14992 }
14993 return rc;
14994 }
14995
14996 /**
14997 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
14998 * @phba: pointer to lpfc hba data structure.
14999 *
15000 * This routine is invoked to post rpi header templates to the
15001 * HBA consistent with the SLI-4 interface spec. This routine
15002 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
15003 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
15004 *
15005 * Returns
15006 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
15007 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
15008 **/
15009 int
lpfc_sli4_alloc_rpi(struct lpfc_hba * phba)15010 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
15011 {
15012 unsigned long rpi;
15013 uint16_t max_rpi, rpi_limit;
15014 uint16_t rpi_remaining, lrpi = 0;
15015 struct lpfc_rpi_hdr *rpi_hdr;
15016
15017 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
15018 rpi_limit = phba->sli4_hba.next_rpi;
15019
15020 /*
15021 * Fetch the next logical rpi. Because this index is logical,
15022 * the driver starts at 0 each time.
15023 */
15024 spin_lock_irq(&phba->hbalock);
15025 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
15026 if (rpi >= rpi_limit)
15027 rpi = LPFC_RPI_ALLOC_ERROR;
15028 else {
15029 set_bit(rpi, phba->sli4_hba.rpi_bmask);
15030 phba->sli4_hba.max_cfg_param.rpi_used++;
15031 phba->sli4_hba.rpi_count++;
15032 }
15033
15034 /*
15035 * Don't try to allocate more rpi header regions if the device limit
15036 * has been exhausted.
15037 */
15038 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
15039 (phba->sli4_hba.rpi_count >= max_rpi)) {
15040 spin_unlock_irq(&phba->hbalock);
15041 return rpi;
15042 }
15043
15044 /*
15045 * RPI header postings are not required for SLI4 ports capable of
15046 * extents.
15047 */
15048 if (!phba->sli4_hba.rpi_hdrs_in_use) {
15049 spin_unlock_irq(&phba->hbalock);
15050 return rpi;
15051 }
15052
15053 /*
15054 * If the driver is running low on rpi resources, allocate another
15055 * page now. Note that the next_rpi value is used because
15056 * it represents how many are actually in use whereas max_rpi notes
15057 * how many are supported max by the device.
15058 */
15059 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
15060 spin_unlock_irq(&phba->hbalock);
15061 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
15062 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
15063 if (!rpi_hdr) {
15064 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15065 "2002 Error Could not grow rpi "
15066 "count\n");
15067 } else {
15068 lrpi = rpi_hdr->start_rpi;
15069 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
15070 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
15071 }
15072 }
15073
15074 return rpi;
15075 }
15076
15077 /**
15078 * lpfc_sli4_free_rpi - Release an rpi for reuse.
15079 * @phba: pointer to lpfc hba data structure.
15080 *
15081 * This routine is invoked to release an rpi to the pool of
15082 * available rpis maintained by the driver.
15083 **/
15084 void
__lpfc_sli4_free_rpi(struct lpfc_hba * phba,int rpi)15085 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
15086 {
15087 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
15088 phba->sli4_hba.rpi_count--;
15089 phba->sli4_hba.max_cfg_param.rpi_used--;
15090 }
15091 }
15092
15093 /**
15094 * lpfc_sli4_free_rpi - Release an rpi for reuse.
15095 * @phba: pointer to lpfc hba data structure.
15096 *
15097 * This routine is invoked to release an rpi to the pool of
15098 * available rpis maintained by the driver.
15099 **/
15100 void
lpfc_sli4_free_rpi(struct lpfc_hba * phba,int rpi)15101 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
15102 {
15103 spin_lock_irq(&phba->hbalock);
15104 __lpfc_sli4_free_rpi(phba, rpi);
15105 spin_unlock_irq(&phba->hbalock);
15106 }
15107
15108 /**
15109 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
15110 * @phba: pointer to lpfc hba data structure.
15111 *
15112 * This routine is invoked to remove the memory region that
15113 * provided rpi via a bitmask.
15114 **/
15115 void
lpfc_sli4_remove_rpis(struct lpfc_hba * phba)15116 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
15117 {
15118 kfree(phba->sli4_hba.rpi_bmask);
15119 kfree(phba->sli4_hba.rpi_ids);
15120 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
15121 }
15122
15123 /**
15124 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
15125 * @phba: pointer to lpfc hba data structure.
15126 *
15127 * This routine is invoked to remove the memory region that
15128 * provided rpi via a bitmask.
15129 **/
15130 int
lpfc_sli4_resume_rpi(struct lpfc_nodelist * ndlp,void (* cmpl)(struct lpfc_hba *,LPFC_MBOXQ_t *),void * arg)15131 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
15132 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
15133 {
15134 LPFC_MBOXQ_t *mboxq;
15135 struct lpfc_hba *phba = ndlp->phba;
15136 int rc;
15137
15138 /* The port is notified of the header region via a mailbox command. */
15139 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15140 if (!mboxq)
15141 return -ENOMEM;
15142
15143 /* Post all rpi memory regions to the port. */
15144 lpfc_resume_rpi(mboxq, ndlp);
15145 if (cmpl) {
15146 mboxq->mbox_cmpl = cmpl;
15147 mboxq->context1 = arg;
15148 mboxq->context2 = ndlp;
15149 } else
15150 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15151 mboxq->vport = ndlp->vport;
15152 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15153 if (rc == MBX_NOT_FINISHED) {
15154 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15155 "2010 Resume RPI Mailbox failed "
15156 "status %d, mbxStatus x%x\n", rc,
15157 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
15158 mempool_free(mboxq, phba->mbox_mem_pool);
15159 return -EIO;
15160 }
15161 return 0;
15162 }
15163
15164 /**
15165 * lpfc_sli4_init_vpi - Initialize a vpi with the port
15166 * @vport: Pointer to the vport for which the vpi is being initialized
15167 *
15168 * This routine is invoked to activate a vpi with the port.
15169 *
15170 * Returns:
15171 * 0 success
15172 * -Evalue otherwise
15173 **/
15174 int
lpfc_sli4_init_vpi(struct lpfc_vport * vport)15175 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
15176 {
15177 LPFC_MBOXQ_t *mboxq;
15178 int rc = 0;
15179 int retval = MBX_SUCCESS;
15180 uint32_t mbox_tmo;
15181 struct lpfc_hba *phba = vport->phba;
15182 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15183 if (!mboxq)
15184 return -ENOMEM;
15185 lpfc_init_vpi(phba, mboxq, vport->vpi);
15186 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
15187 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
15188 if (rc != MBX_SUCCESS) {
15189 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
15190 "2022 INIT VPI Mailbox failed "
15191 "status %d, mbxStatus x%x\n", rc,
15192 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
15193 retval = -EIO;
15194 }
15195 if (rc != MBX_TIMEOUT)
15196 mempool_free(mboxq, vport->phba->mbox_mem_pool);
15197
15198 return retval;
15199 }
15200
15201 /**
15202 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
15203 * @phba: pointer to lpfc hba data structure.
15204 * @mboxq: Pointer to mailbox object.
15205 *
15206 * This routine is invoked to manually add a single FCF record. The caller
15207 * must pass a completely initialized FCF_Record. This routine takes
15208 * care of the nonembedded mailbox operations.
15209 **/
15210 static void
lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)15211 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
15212 {
15213 void *virt_addr;
15214 union lpfc_sli4_cfg_shdr *shdr;
15215 uint32_t shdr_status, shdr_add_status;
15216
15217 virt_addr = mboxq->sge_array->addr[0];
15218 /* The IOCTL status is embedded in the mailbox subheader. */
15219 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
15220 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15221 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15222
15223 if ((shdr_status || shdr_add_status) &&
15224 (shdr_status != STATUS_FCF_IN_USE))
15225 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15226 "2558 ADD_FCF_RECORD mailbox failed with "
15227 "status x%x add_status x%x\n",
15228 shdr_status, shdr_add_status);
15229
15230 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15231 }
15232
15233 /**
15234 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
15235 * @phba: pointer to lpfc hba data structure.
15236 * @fcf_record: pointer to the initialized fcf record to add.
15237 *
15238 * This routine is invoked to manually add a single FCF record. The caller
15239 * must pass a completely initialized FCF_Record. This routine takes
15240 * care of the nonembedded mailbox operations.
15241 **/
15242 int
lpfc_sli4_add_fcf_record(struct lpfc_hba * phba,struct fcf_record * fcf_record)15243 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
15244 {
15245 int rc = 0;
15246 LPFC_MBOXQ_t *mboxq;
15247 uint8_t *bytep;
15248 void *virt_addr;
15249 dma_addr_t phys_addr;
15250 struct lpfc_mbx_sge sge;
15251 uint32_t alloc_len, req_len;
15252 uint32_t fcfindex;
15253
15254 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15255 if (!mboxq) {
15256 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15257 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
15258 return -ENOMEM;
15259 }
15260
15261 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
15262 sizeof(uint32_t);
15263
15264 /* Allocate DMA memory and set up the non-embedded mailbox command */
15265 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
15266 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
15267 req_len, LPFC_SLI4_MBX_NEMBED);
15268 if (alloc_len < req_len) {
15269 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15270 "2523 Allocated DMA memory size (x%x) is "
15271 "less than the requested DMA memory "
15272 "size (x%x)\n", alloc_len, req_len);
15273 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15274 return -ENOMEM;
15275 }
15276
15277 /*
15278 * Get the first SGE entry from the non-embedded DMA memory. This
15279 * routine only uses a single SGE.
15280 */
15281 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
15282 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
15283 virt_addr = mboxq->sge_array->addr[0];
15284 /*
15285 * Configure the FCF record for FCFI 0. This is the driver's
15286 * hardcoded default and gets used in nonFIP mode.
15287 */
15288 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
15289 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
15290 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
15291
15292 /*
15293 * Copy the fcf_index and the FCF Record Data. The data starts after
15294 * the FCoE header plus word10. The data copy needs to be endian
15295 * correct.
15296 */
15297 bytep += sizeof(uint32_t);
15298 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
15299 mboxq->vport = phba->pport;
15300 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
15301 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15302 if (rc == MBX_NOT_FINISHED) {
15303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15304 "2515 ADD_FCF_RECORD mailbox failed with "
15305 "status 0x%x\n", rc);
15306 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15307 rc = -EIO;
15308 } else
15309 rc = 0;
15310
15311 return rc;
15312 }
15313
15314 /**
15315 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
15316 * @phba: pointer to lpfc hba data structure.
15317 * @fcf_record: pointer to the fcf record to write the default data.
15318 * @fcf_index: FCF table entry index.
15319 *
15320 * This routine is invoked to build the driver's default FCF record. The
15321 * values used are hardcoded. This routine handles memory initialization.
15322 *
15323 **/
15324 void
lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba * phba,struct fcf_record * fcf_record,uint16_t fcf_index)15325 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
15326 struct fcf_record *fcf_record,
15327 uint16_t fcf_index)
15328 {
15329 memset(fcf_record, 0, sizeof(struct fcf_record));
15330 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
15331 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
15332 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
15333 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
15334 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
15335 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
15336 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
15337 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
15338 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
15339 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
15340 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
15341 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
15342 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
15343 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
15344 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
15345 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
15346 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
15347 /* Set the VLAN bit map */
15348 if (phba->valid_vlan) {
15349 fcf_record->vlan_bitmap[phba->vlan_id / 8]
15350 = 1 << (phba->vlan_id % 8);
15351 }
15352 }
15353
15354 /**
15355 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
15356 * @phba: pointer to lpfc hba data structure.
15357 * @fcf_index: FCF table entry offset.
15358 *
15359 * This routine is invoked to scan the entire FCF table by reading FCF
15360 * record and processing it one at a time starting from the @fcf_index
15361 * for initial FCF discovery or fast FCF failover rediscovery.
15362 *
15363 * Return 0 if the mailbox command is submitted successfully, none 0
15364 * otherwise.
15365 **/
15366 int
lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba * phba,uint16_t fcf_index)15367 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
15368 {
15369 int rc = 0, error;
15370 LPFC_MBOXQ_t *mboxq;
15371
15372 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
15373 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
15374 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15375 if (!mboxq) {
15376 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15377 "2000 Failed to allocate mbox for "
15378 "READ_FCF cmd\n");
15379 error = -ENOMEM;
15380 goto fail_fcf_scan;
15381 }
15382 /* Construct the read FCF record mailbox command */
15383 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
15384 if (rc) {
15385 error = -EINVAL;
15386 goto fail_fcf_scan;
15387 }
15388 /* Issue the mailbox command asynchronously */
15389 mboxq->vport = phba->pport;
15390 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
15391
15392 spin_lock_irq(&phba->hbalock);
15393 phba->hba_flag |= FCF_TS_INPROG;
15394 spin_unlock_irq(&phba->hbalock);
15395
15396 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15397 if (rc == MBX_NOT_FINISHED)
15398 error = -EIO;
15399 else {
15400 /* Reset eligible FCF count for new scan */
15401 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
15402 phba->fcf.eligible_fcf_cnt = 0;
15403 error = 0;
15404 }
15405 fail_fcf_scan:
15406 if (error) {
15407 if (mboxq)
15408 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15409 /* FCF scan failed, clear FCF_TS_INPROG flag */
15410 spin_lock_irq(&phba->hbalock);
15411 phba->hba_flag &= ~FCF_TS_INPROG;
15412 spin_unlock_irq(&phba->hbalock);
15413 }
15414 return error;
15415 }
15416
15417 /**
15418 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
15419 * @phba: pointer to lpfc hba data structure.
15420 * @fcf_index: FCF table entry offset.
15421 *
15422 * This routine is invoked to read an FCF record indicated by @fcf_index
15423 * and to use it for FLOGI roundrobin FCF failover.
15424 *
15425 * Return 0 if the mailbox command is submitted successfully, none 0
15426 * otherwise.
15427 **/
15428 int
lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba * phba,uint16_t fcf_index)15429 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
15430 {
15431 int rc = 0, error;
15432 LPFC_MBOXQ_t *mboxq;
15433
15434 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15435 if (!mboxq) {
15436 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
15437 "2763 Failed to allocate mbox for "
15438 "READ_FCF cmd\n");
15439 error = -ENOMEM;
15440 goto fail_fcf_read;
15441 }
15442 /* Construct the read FCF record mailbox command */
15443 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
15444 if (rc) {
15445 error = -EINVAL;
15446 goto fail_fcf_read;
15447 }
15448 /* Issue the mailbox command asynchronously */
15449 mboxq->vport = phba->pport;
15450 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
15451 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15452 if (rc == MBX_NOT_FINISHED)
15453 error = -EIO;
15454 else
15455 error = 0;
15456
15457 fail_fcf_read:
15458 if (error && mboxq)
15459 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15460 return error;
15461 }
15462
15463 /**
15464 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
15465 * @phba: pointer to lpfc hba data structure.
15466 * @fcf_index: FCF table entry offset.
15467 *
15468 * This routine is invoked to read an FCF record indicated by @fcf_index to
15469 * determine whether it's eligible for FLOGI roundrobin failover list.
15470 *
15471 * Return 0 if the mailbox command is submitted successfully, none 0
15472 * otherwise.
15473 **/
15474 int
lpfc_sli4_read_fcf_rec(struct lpfc_hba * phba,uint16_t fcf_index)15475 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
15476 {
15477 int rc = 0, error;
15478 LPFC_MBOXQ_t *mboxq;
15479
15480 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15481 if (!mboxq) {
15482 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
15483 "2758 Failed to allocate mbox for "
15484 "READ_FCF cmd\n");
15485 error = -ENOMEM;
15486 goto fail_fcf_read;
15487 }
15488 /* Construct the read FCF record mailbox command */
15489 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
15490 if (rc) {
15491 error = -EINVAL;
15492 goto fail_fcf_read;
15493 }
15494 /* Issue the mailbox command asynchronously */
15495 mboxq->vport = phba->pport;
15496 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
15497 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15498 if (rc == MBX_NOT_FINISHED)
15499 error = -EIO;
15500 else
15501 error = 0;
15502
15503 fail_fcf_read:
15504 if (error && mboxq)
15505 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15506 return error;
15507 }
15508
15509 /**
15510 * lpfc_check_next_fcf_pri
15511 * phba pointer to the lpfc_hba struct for this port.
15512 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
15513 * routine when the rr_bmask is empty. The FCF indecies are put into the
15514 * rr_bmask based on their priority level. Starting from the highest priority
15515 * to the lowest. The most likely FCF candidate will be in the highest
15516 * priority group. When this routine is called it searches the fcf_pri list for
15517 * next lowest priority group and repopulates the rr_bmask with only those
15518 * fcf_indexes.
15519 * returns:
15520 * 1=success 0=failure
15521 **/
15522 int
lpfc_check_next_fcf_pri_level(struct lpfc_hba * phba)15523 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
15524 {
15525 uint16_t next_fcf_pri;
15526 uint16_t last_index;
15527 struct lpfc_fcf_pri *fcf_pri;
15528 int rc;
15529 int ret = 0;
15530
15531 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
15532 LPFC_SLI4_FCF_TBL_INDX_MAX);
15533 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15534 "3060 Last IDX %d\n", last_index);
15535
15536 /* Verify the priority list has 2 or more entries */
15537 spin_lock_irq(&phba->hbalock);
15538 if (list_empty(&phba->fcf.fcf_pri_list) ||
15539 list_is_singular(&phba->fcf.fcf_pri_list)) {
15540 spin_unlock_irq(&phba->hbalock);
15541 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15542 "3061 Last IDX %d\n", last_index);
15543 return 0; /* Empty rr list */
15544 }
15545 spin_unlock_irq(&phba->hbalock);
15546
15547 next_fcf_pri = 0;
15548 /*
15549 * Clear the rr_bmask and set all of the bits that are at this
15550 * priority.
15551 */
15552 memset(phba->fcf.fcf_rr_bmask, 0,
15553 sizeof(*phba->fcf.fcf_rr_bmask));
15554 spin_lock_irq(&phba->hbalock);
15555 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15556 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
15557 continue;
15558 /*
15559 * the 1st priority that has not FLOGI failed
15560 * will be the highest.
15561 */
15562 if (!next_fcf_pri)
15563 next_fcf_pri = fcf_pri->fcf_rec.priority;
15564 spin_unlock_irq(&phba->hbalock);
15565 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
15566 rc = lpfc_sli4_fcf_rr_index_set(phba,
15567 fcf_pri->fcf_rec.fcf_index);
15568 if (rc)
15569 return 0;
15570 }
15571 spin_lock_irq(&phba->hbalock);
15572 }
15573 /*
15574 * if next_fcf_pri was not set above and the list is not empty then
15575 * we have failed flogis on all of them. So reset flogi failed
15576 * and start at the beginning.
15577 */
15578 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
15579 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15580 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
15581 /*
15582 * the 1st priority that has not FLOGI failed
15583 * will be the highest.
15584 */
15585 if (!next_fcf_pri)
15586 next_fcf_pri = fcf_pri->fcf_rec.priority;
15587 spin_unlock_irq(&phba->hbalock);
15588 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
15589 rc = lpfc_sli4_fcf_rr_index_set(phba,
15590 fcf_pri->fcf_rec.fcf_index);
15591 if (rc)
15592 return 0;
15593 }
15594 spin_lock_irq(&phba->hbalock);
15595 }
15596 } else
15597 ret = 1;
15598 spin_unlock_irq(&phba->hbalock);
15599
15600 return ret;
15601 }
15602 /**
15603 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
15604 * @phba: pointer to lpfc hba data structure.
15605 *
15606 * This routine is to get the next eligible FCF record index in a round
15607 * robin fashion. If the next eligible FCF record index equals to the
15608 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
15609 * shall be returned, otherwise, the next eligible FCF record's index
15610 * shall be returned.
15611 **/
15612 uint16_t
lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba * phba)15613 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
15614 {
15615 uint16_t next_fcf_index;
15616
15617 initial_priority:
15618 /* Search start from next bit of currently registered FCF index */
15619 next_fcf_index = phba->fcf.current_rec.fcf_indx;
15620
15621 next_priority:
15622 /* Determine the next fcf index to check */
15623 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
15624 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15625 LPFC_SLI4_FCF_TBL_INDX_MAX,
15626 next_fcf_index);
15627
15628 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
15629 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15630 /*
15631 * If we have wrapped then we need to clear the bits that
15632 * have been tested so that we can detect when we should
15633 * change the priority level.
15634 */
15635 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15636 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
15637 }
15638
15639
15640 /* Check roundrobin failover list empty condition */
15641 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
15642 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
15643 /*
15644 * If next fcf index is not found check if there are lower
15645 * Priority level fcf's in the fcf_priority list.
15646 * Set up the rr_bmask with all of the avaiable fcf bits
15647 * at that level and continue the selection process.
15648 */
15649 if (lpfc_check_next_fcf_pri_level(phba))
15650 goto initial_priority;
15651 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15652 "2844 No roundrobin failover FCF available\n");
15653 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
15654 return LPFC_FCOE_FCF_NEXT_NONE;
15655 else {
15656 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15657 "3063 Only FCF available idx %d, flag %x\n",
15658 next_fcf_index,
15659 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
15660 return next_fcf_index;
15661 }
15662 }
15663
15664 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
15665 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
15666 LPFC_FCF_FLOGI_FAILED)
15667 goto next_priority;
15668
15669 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15670 "2845 Get next roundrobin failover FCF (x%x)\n",
15671 next_fcf_index);
15672
15673 return next_fcf_index;
15674 }
15675
15676 /**
15677 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
15678 * @phba: pointer to lpfc hba data structure.
15679 *
15680 * This routine sets the FCF record index in to the eligible bmask for
15681 * roundrobin failover search. It checks to make sure that the index
15682 * does not go beyond the range of the driver allocated bmask dimension
15683 * before setting the bit.
15684 *
15685 * Returns 0 if the index bit successfully set, otherwise, it returns
15686 * -EINVAL.
15687 **/
15688 int
lpfc_sli4_fcf_rr_index_set(struct lpfc_hba * phba,uint16_t fcf_index)15689 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
15690 {
15691 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15692 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15693 "2610 FCF (x%x) reached driver's book "
15694 "keeping dimension:x%x\n",
15695 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15696 return -EINVAL;
15697 }
15698 /* Set the eligible FCF record index bmask */
15699 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
15700
15701 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15702 "2790 Set FCF (x%x) to roundrobin FCF failover "
15703 "bmask\n", fcf_index);
15704
15705 return 0;
15706 }
15707
15708 /**
15709 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
15710 * @phba: pointer to lpfc hba data structure.
15711 *
15712 * This routine clears the FCF record index from the eligible bmask for
15713 * roundrobin failover search. It checks to make sure that the index
15714 * does not go beyond the range of the driver allocated bmask dimension
15715 * before clearing the bit.
15716 **/
15717 void
lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba * phba,uint16_t fcf_index)15718 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
15719 {
15720 struct lpfc_fcf_pri *fcf_pri;
15721 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15722 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15723 "2762 FCF (x%x) reached driver's book "
15724 "keeping dimension:x%x\n",
15725 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15726 return;
15727 }
15728 /* Clear the eligible FCF record index bmask */
15729 spin_lock_irq(&phba->hbalock);
15730 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15731 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
15732 list_del_init(&fcf_pri->list);
15733 break;
15734 }
15735 }
15736 spin_unlock_irq(&phba->hbalock);
15737 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
15738
15739 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15740 "2791 Clear FCF (x%x) from roundrobin failover "
15741 "bmask\n", fcf_index);
15742 }
15743
15744 /**
15745 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
15746 * @phba: pointer to lpfc hba data structure.
15747 *
15748 * This routine is the completion routine for the rediscover FCF table mailbox
15749 * command. If the mailbox command returned failure, it will try to stop the
15750 * FCF rediscover wait timer.
15751 **/
15752 void
lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba * phba,LPFC_MBOXQ_t * mbox)15753 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
15754 {
15755 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15756 uint32_t shdr_status, shdr_add_status;
15757
15758 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15759
15760 shdr_status = bf_get(lpfc_mbox_hdr_status,
15761 &redisc_fcf->header.cfg_shdr.response);
15762 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
15763 &redisc_fcf->header.cfg_shdr.response);
15764 if (shdr_status || shdr_add_status) {
15765 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
15766 "2746 Requesting for FCF rediscovery failed "
15767 "status x%x add_status x%x\n",
15768 shdr_status, shdr_add_status);
15769 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
15770 spin_lock_irq(&phba->hbalock);
15771 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
15772 spin_unlock_irq(&phba->hbalock);
15773 /*
15774 * CVL event triggered FCF rediscover request failed,
15775 * last resort to re-try current registered FCF entry.
15776 */
15777 lpfc_retry_pport_discovery(phba);
15778 } else {
15779 spin_lock_irq(&phba->hbalock);
15780 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
15781 spin_unlock_irq(&phba->hbalock);
15782 /*
15783 * DEAD FCF event triggered FCF rediscover request
15784 * failed, last resort to fail over as a link down
15785 * to FCF registration.
15786 */
15787 lpfc_sli4_fcf_dead_failthrough(phba);
15788 }
15789 } else {
15790 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
15791 "2775 Start FCF rediscover quiescent timer\n");
15792 /*
15793 * Start FCF rediscovery wait timer for pending FCF
15794 * before rescan FCF record table.
15795 */
15796 lpfc_fcf_redisc_wait_start_timer(phba);
15797 }
15798
15799 mempool_free(mbox, phba->mbox_mem_pool);
15800 }
15801
15802 /**
15803 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
15804 * @phba: pointer to lpfc hba data structure.
15805 *
15806 * This routine is invoked to request for rediscovery of the entire FCF table
15807 * by the port.
15808 **/
15809 int
lpfc_sli4_redisc_fcf_table(struct lpfc_hba * phba)15810 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
15811 {
15812 LPFC_MBOXQ_t *mbox;
15813 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15814 int rc, length;
15815
15816 /* Cancel retry delay timers to all vports before FCF rediscover */
15817 lpfc_cancel_all_vport_retry_delay_timer(phba);
15818
15819 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15820 if (!mbox) {
15821 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15822 "2745 Failed to allocate mbox for "
15823 "requesting FCF rediscover.\n");
15824 return -ENOMEM;
15825 }
15826
15827 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
15828 sizeof(struct lpfc_sli4_cfg_mhdr));
15829 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15830 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
15831 length, LPFC_SLI4_MBX_EMBED);
15832
15833 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15834 /* Set count to 0 for invalidating the entire FCF database */
15835 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
15836
15837 /* Issue the mailbox command asynchronously */
15838 mbox->vport = phba->pport;
15839 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
15840 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
15841
15842 if (rc == MBX_NOT_FINISHED) {
15843 mempool_free(mbox, phba->mbox_mem_pool);
15844 return -EIO;
15845 }
15846 return 0;
15847 }
15848
15849 /**
15850 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
15851 * @phba: pointer to lpfc hba data structure.
15852 *
15853 * This function is the failover routine as a last resort to the FCF DEAD
15854 * event when driver failed to perform fast FCF failover.
15855 **/
15856 void
lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba * phba)15857 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
15858 {
15859 uint32_t link_state;
15860
15861 /*
15862 * Last resort as FCF DEAD event failover will treat this as
15863 * a link down, but save the link state because we don't want
15864 * it to be changed to Link Down unless it is already down.
15865 */
15866 link_state = phba->link_state;
15867 lpfc_linkdown(phba);
15868 phba->link_state = link_state;
15869
15870 /* Unregister FCF if no devices connected to it */
15871 lpfc_unregister_unused_fcf(phba);
15872 }
15873
15874 /**
15875 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
15876 * @phba: pointer to lpfc hba data structure.
15877 * @rgn23_data: pointer to configure region 23 data.
15878 *
15879 * This function gets SLI3 port configure region 23 data through memory dump
15880 * mailbox command. When it successfully retrieves data, the size of the data
15881 * will be returned, otherwise, 0 will be returned.
15882 **/
15883 static uint32_t
lpfc_sli_get_config_region23(struct lpfc_hba * phba,char * rgn23_data)15884 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
15885 {
15886 LPFC_MBOXQ_t *pmb = NULL;
15887 MAILBOX_t *mb;
15888 uint32_t offset = 0;
15889 int rc;
15890
15891 if (!rgn23_data)
15892 return 0;
15893
15894 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15895 if (!pmb) {
15896 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15897 "2600 failed to allocate mailbox memory\n");
15898 return 0;
15899 }
15900 mb = &pmb->u.mb;
15901
15902 do {
15903 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
15904 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
15905
15906 if (rc != MBX_SUCCESS) {
15907 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15908 "2601 failed to read config "
15909 "region 23, rc 0x%x Status 0x%x\n",
15910 rc, mb->mbxStatus);
15911 mb->un.varDmp.word_cnt = 0;
15912 }
15913 /*
15914 * dump mem may return a zero when finished or we got a
15915 * mailbox error, either way we are done.
15916 */
15917 if (mb->un.varDmp.word_cnt == 0)
15918 break;
15919 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
15920 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
15921
15922 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
15923 rgn23_data + offset,
15924 mb->un.varDmp.word_cnt);
15925 offset += mb->un.varDmp.word_cnt;
15926 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
15927
15928 mempool_free(pmb, phba->mbox_mem_pool);
15929 return offset;
15930 }
15931
15932 /**
15933 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
15934 * @phba: pointer to lpfc hba data structure.
15935 * @rgn23_data: pointer to configure region 23 data.
15936 *
15937 * This function gets SLI4 port configure region 23 data through memory dump
15938 * mailbox command. When it successfully retrieves data, the size of the data
15939 * will be returned, otherwise, 0 will be returned.
15940 **/
15941 static uint32_t
lpfc_sli4_get_config_region23(struct lpfc_hba * phba,char * rgn23_data)15942 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
15943 {
15944 LPFC_MBOXQ_t *mboxq = NULL;
15945 struct lpfc_dmabuf *mp = NULL;
15946 struct lpfc_mqe *mqe;
15947 uint32_t data_length = 0;
15948 int rc;
15949
15950 if (!rgn23_data)
15951 return 0;
15952
15953 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15954 if (!mboxq) {
15955 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15956 "3105 failed to allocate mailbox memory\n");
15957 return 0;
15958 }
15959
15960 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
15961 goto out;
15962 mqe = &mboxq->u.mqe;
15963 mp = (struct lpfc_dmabuf *) mboxq->context1;
15964 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
15965 if (rc)
15966 goto out;
15967 data_length = mqe->un.mb_words[5];
15968 if (data_length == 0)
15969 goto out;
15970 if (data_length > DMP_RGN23_SIZE) {
15971 data_length = 0;
15972 goto out;
15973 }
15974 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
15975 out:
15976 mempool_free(mboxq, phba->mbox_mem_pool);
15977 if (mp) {
15978 lpfc_mbuf_free(phba, mp->virt, mp->phys);
15979 kfree(mp);
15980 }
15981 return data_length;
15982 }
15983
15984 /**
15985 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
15986 * @phba: pointer to lpfc hba data structure.
15987 *
15988 * This function read region 23 and parse TLV for port status to
15989 * decide if the user disaled the port. If the TLV indicates the
15990 * port is disabled, the hba_flag is set accordingly.
15991 **/
15992 void
lpfc_sli_read_link_ste(struct lpfc_hba * phba)15993 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
15994 {
15995 uint8_t *rgn23_data = NULL;
15996 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
15997 uint32_t offset = 0;
15998
15999 /* Get adapter Region 23 data */
16000 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
16001 if (!rgn23_data)
16002 goto out;
16003
16004 if (phba->sli_rev < LPFC_SLI_REV4)
16005 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
16006 else {
16007 if_type = bf_get(lpfc_sli_intf_if_type,
16008 &phba->sli4_hba.sli_intf);
16009 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
16010 goto out;
16011 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
16012 }
16013
16014 if (!data_size)
16015 goto out;
16016
16017 /* Check the region signature first */
16018 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
16019 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16020 "2619 Config region 23 has bad signature\n");
16021 goto out;
16022 }
16023 offset += 4;
16024
16025 /* Check the data structure version */
16026 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
16027 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16028 "2620 Config region 23 has bad version\n");
16029 goto out;
16030 }
16031 offset += 4;
16032
16033 /* Parse TLV entries in the region */
16034 while (offset < data_size) {
16035 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
16036 break;
16037 /*
16038 * If the TLV is not driver specific TLV or driver id is
16039 * not linux driver id, skip the record.
16040 */
16041 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
16042 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
16043 (rgn23_data[offset + 3] != 0)) {
16044 offset += rgn23_data[offset + 1] * 4 + 4;
16045 continue;
16046 }
16047
16048 /* Driver found a driver specific TLV in the config region */
16049 sub_tlv_len = rgn23_data[offset + 1] * 4;
16050 offset += 4;
16051 tlv_offset = 0;
16052
16053 /*
16054 * Search for configured port state sub-TLV.
16055 */
16056 while ((offset < data_size) &&
16057 (tlv_offset < sub_tlv_len)) {
16058 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
16059 offset += 4;
16060 tlv_offset += 4;
16061 break;
16062 }
16063 if (rgn23_data[offset] != PORT_STE_TYPE) {
16064 offset += rgn23_data[offset + 1] * 4 + 4;
16065 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
16066 continue;
16067 }
16068
16069 /* This HBA contains PORT_STE configured */
16070 if (!rgn23_data[offset + 2])
16071 phba->hba_flag |= LINK_DISABLED;
16072
16073 goto out;
16074 }
16075 }
16076
16077 out:
16078 kfree(rgn23_data);
16079 return;
16080 }
16081
16082 /**
16083 * lpfc_wr_object - write an object to the firmware
16084 * @phba: HBA structure that indicates port to create a queue on.
16085 * @dmabuf_list: list of dmabufs to write to the port.
16086 * @size: the total byte value of the objects to write to the port.
16087 * @offset: the current offset to be used to start the transfer.
16088 *
16089 * This routine will create a wr_object mailbox command to send to the port.
16090 * the mailbox command will be constructed using the dma buffers described in
16091 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
16092 * BDEs that the imbedded mailbox can support. The @offset variable will be
16093 * used to indicate the starting offset of the transfer and will also return
16094 * the offset after the write object mailbox has completed. @size is used to
16095 * determine the end of the object and whether the eof bit should be set.
16096 *
16097 * Return 0 is successful and offset will contain the the new offset to use
16098 * for the next write.
16099 * Return negative value for error cases.
16100 **/
16101 int
lpfc_wr_object(struct lpfc_hba * phba,struct list_head * dmabuf_list,uint32_t size,uint32_t * offset)16102 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
16103 uint32_t size, uint32_t *offset)
16104 {
16105 struct lpfc_mbx_wr_object *wr_object;
16106 LPFC_MBOXQ_t *mbox;
16107 int rc = 0, i = 0;
16108 uint32_t shdr_status, shdr_add_status;
16109 uint32_t mbox_tmo;
16110 union lpfc_sli4_cfg_shdr *shdr;
16111 struct lpfc_dmabuf *dmabuf;
16112 uint32_t written = 0;
16113
16114 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16115 if (!mbox)
16116 return -ENOMEM;
16117
16118 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16119 LPFC_MBOX_OPCODE_WRITE_OBJECT,
16120 sizeof(struct lpfc_mbx_wr_object) -
16121 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16122
16123 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
16124 wr_object->u.request.write_offset = *offset;
16125 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
16126 wr_object->u.request.object_name[0] =
16127 cpu_to_le32(wr_object->u.request.object_name[0]);
16128 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
16129 list_for_each_entry(dmabuf, dmabuf_list, list) {
16130 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
16131 break;
16132 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
16133 wr_object->u.request.bde[i].addrHigh =
16134 putPaddrHigh(dmabuf->phys);
16135 if (written + SLI4_PAGE_SIZE >= size) {
16136 wr_object->u.request.bde[i].tus.f.bdeSize =
16137 (size - written);
16138 written += (size - written);
16139 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
16140 } else {
16141 wr_object->u.request.bde[i].tus.f.bdeSize =
16142 SLI4_PAGE_SIZE;
16143 written += SLI4_PAGE_SIZE;
16144 }
16145 i++;
16146 }
16147 wr_object->u.request.bde_count = i;
16148 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
16149 if (!phba->sli4_hba.intr_enable)
16150 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16151 else {
16152 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16153 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16154 }
16155 /* The IOCTL status is embedded in the mailbox subheader. */
16156 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
16157 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16158 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16159 if (rc != MBX_TIMEOUT)
16160 mempool_free(mbox, phba->mbox_mem_pool);
16161 if (shdr_status || shdr_add_status || rc) {
16162 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16163 "3025 Write Object mailbox failed with "
16164 "status x%x add_status x%x, mbx status x%x\n",
16165 shdr_status, shdr_add_status, rc);
16166 rc = -ENXIO;
16167 } else
16168 *offset += wr_object->u.response.actual_write_length;
16169 return rc;
16170 }
16171
16172 /**
16173 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
16174 * @vport: pointer to vport data structure.
16175 *
16176 * This function iterate through the mailboxq and clean up all REG_LOGIN
16177 * and REG_VPI mailbox commands associated with the vport. This function
16178 * is called when driver want to restart discovery of the vport due to
16179 * a Clear Virtual Link event.
16180 **/
16181 void
lpfc_cleanup_pending_mbox(struct lpfc_vport * vport)16182 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
16183 {
16184 struct lpfc_hba *phba = vport->phba;
16185 LPFC_MBOXQ_t *mb, *nextmb;
16186 struct lpfc_dmabuf *mp;
16187 struct lpfc_nodelist *ndlp;
16188 struct lpfc_nodelist *act_mbx_ndlp = NULL;
16189 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
16190 LIST_HEAD(mbox_cmd_list);
16191 uint8_t restart_loop;
16192
16193 /* Clean up internally queued mailbox commands with the vport */
16194 spin_lock_irq(&phba->hbalock);
16195 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
16196 if (mb->vport != vport)
16197 continue;
16198
16199 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
16200 (mb->u.mb.mbxCommand != MBX_REG_VPI))
16201 continue;
16202
16203 list_del(&mb->list);
16204 list_add_tail(&mb->list, &mbox_cmd_list);
16205 }
16206 /* Clean up active mailbox command with the vport */
16207 mb = phba->sli.mbox_active;
16208 if (mb && (mb->vport == vport)) {
16209 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
16210 (mb->u.mb.mbxCommand == MBX_REG_VPI))
16211 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16212 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
16213 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
16214 /* Put reference count for delayed processing */
16215 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
16216 /* Unregister the RPI when mailbox complete */
16217 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
16218 }
16219 }
16220 /* Cleanup any mailbox completions which are not yet processed */
16221 do {
16222 restart_loop = 0;
16223 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
16224 /*
16225 * If this mailox is already processed or it is
16226 * for another vport ignore it.
16227 */
16228 if ((mb->vport != vport) ||
16229 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
16230 continue;
16231
16232 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
16233 (mb->u.mb.mbxCommand != MBX_REG_VPI))
16234 continue;
16235
16236 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16237 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
16238 ndlp = (struct lpfc_nodelist *)mb->context2;
16239 /* Unregister the RPI when mailbox complete */
16240 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
16241 restart_loop = 1;
16242 spin_unlock_irq(&phba->hbalock);
16243 spin_lock(shost->host_lock);
16244 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
16245 spin_unlock(shost->host_lock);
16246 spin_lock_irq(&phba->hbalock);
16247 break;
16248 }
16249 }
16250 } while (restart_loop);
16251
16252 spin_unlock_irq(&phba->hbalock);
16253
16254 /* Release the cleaned-up mailbox commands */
16255 while (!list_empty(&mbox_cmd_list)) {
16256 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
16257 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
16258 mp = (struct lpfc_dmabuf *) (mb->context1);
16259 if (mp) {
16260 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
16261 kfree(mp);
16262 }
16263 ndlp = (struct lpfc_nodelist *) mb->context2;
16264 mb->context2 = NULL;
16265 if (ndlp) {
16266 spin_lock(shost->host_lock);
16267 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
16268 spin_unlock(shost->host_lock);
16269 lpfc_nlp_put(ndlp);
16270 }
16271 }
16272 mempool_free(mb, phba->mbox_mem_pool);
16273 }
16274
16275 /* Release the ndlp with the cleaned-up active mailbox command */
16276 if (act_mbx_ndlp) {
16277 spin_lock(shost->host_lock);
16278 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
16279 spin_unlock(shost->host_lock);
16280 lpfc_nlp_put(act_mbx_ndlp);
16281 }
16282 }
16283
16284 /**
16285 * lpfc_drain_txq - Drain the txq
16286 * @phba: Pointer to HBA context object.
16287 *
16288 * This function attempt to submit IOCBs on the txq
16289 * to the adapter. For SLI4 adapters, the txq contains
16290 * ELS IOCBs that have been deferred because the there
16291 * are no SGLs. This congestion can occur with large
16292 * vport counts during node discovery.
16293 **/
16294
16295 uint32_t
lpfc_drain_txq(struct lpfc_hba * phba)16296 lpfc_drain_txq(struct lpfc_hba *phba)
16297 {
16298 LIST_HEAD(completions);
16299 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
16300 struct lpfc_iocbq *piocbq = 0;
16301 unsigned long iflags = 0;
16302 char *fail_msg = NULL;
16303 struct lpfc_sglq *sglq;
16304 union lpfc_wqe wqe;
16305 int txq_cnt = 0;
16306
16307 spin_lock_irqsave(&phba->hbalock, iflags);
16308 list_for_each_entry(piocbq, &pring->txq, list) {
16309 txq_cnt++;
16310 }
16311
16312 if (txq_cnt > pring->txq_max)
16313 pring->txq_max = txq_cnt;
16314
16315 spin_unlock_irqrestore(&phba->hbalock, iflags);
16316
16317 while (!list_empty(&pring->txq)) {
16318 spin_lock_irqsave(&phba->hbalock, iflags);
16319
16320 piocbq = lpfc_sli_ringtx_get(phba, pring);
16321 if (!piocbq) {
16322 spin_unlock_irqrestore(&phba->hbalock, iflags);
16323 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16324 "2823 txq empty and txq_cnt is %d\n ",
16325 txq_cnt);
16326 break;
16327 }
16328 sglq = __lpfc_sli_get_sglq(phba, piocbq);
16329 if (!sglq) {
16330 __lpfc_sli_ringtx_put(phba, pring, piocbq);
16331 spin_unlock_irqrestore(&phba->hbalock, iflags);
16332 break;
16333 }
16334 txq_cnt--;
16335
16336 /* The xri and iocb resources secured,
16337 * attempt to issue request
16338 */
16339 piocbq->sli4_lxritag = sglq->sli4_lxritag;
16340 piocbq->sli4_xritag = sglq->sli4_xritag;
16341 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
16342 fail_msg = "to convert bpl to sgl";
16343 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
16344 fail_msg = "to convert iocb to wqe";
16345 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
16346 fail_msg = " - Wq is full";
16347 else
16348 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
16349
16350 if (fail_msg) {
16351 /* Failed means we can't issue and need to cancel */
16352 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16353 "2822 IOCB failed %s iotag 0x%x "
16354 "xri 0x%x\n",
16355 fail_msg,
16356 piocbq->iotag, piocbq->sli4_xritag);
16357 list_add_tail(&piocbq->list, &completions);
16358 }
16359 spin_unlock_irqrestore(&phba->hbalock, iflags);
16360 }
16361
16362 /* Cancel all the IOCBs that cannot be issued */
16363 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
16364 IOERR_SLI_ABORTED);
16365
16366 return txq_cnt;
16367 }
16368