1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23
24 #include <linux/ctype.h>
25 #include <linux/delay.h>
26 #include <linux/pci.h>
27 #include <linux/interrupt.h>
28 #include <linux/module.h>
29 #include <linux/aer.h>
30 #include <linux/gfp.h>
31 #include <linux/kernel.h>
32
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_tcq.h>
37 #include <scsi/scsi_transport_fc.h>
38 #include <scsi/fc/fc_fs.h>
39
40 #include "lpfc_hw4.h"
41 #include "lpfc_hw.h"
42 #include "lpfc_sli.h"
43 #include "lpfc_sli4.h"
44 #include "lpfc_nl.h"
45 #include "lpfc_disc.h"
46 #include "lpfc.h"
47 #include "lpfc_scsi.h"
48 #include "lpfc_nvme.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_version.h"
51 #include "lpfc_compat.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_vport.h"
54 #include "lpfc_attr.h"
55
56 #define LPFC_DEF_DEVLOSS_TMO 30
57 #define LPFC_MIN_DEVLOSS_TMO 1
58 #define LPFC_MAX_DEVLOSS_TMO 255
59
60 #define LPFC_DEF_MRQ_POST 512
61 #define LPFC_MIN_MRQ_POST 512
62 #define LPFC_MAX_MRQ_POST 2048
63
64 /*
65 * Write key size should be multiple of 4. If write key is changed
66 * make sure that library write key is also changed.
67 */
68 #define LPFC_REG_WRITE_KEY_SIZE 4
69 #define LPFC_REG_WRITE_KEY "EMLX"
70
71 const char *const trunk_errmsg[] = { /* map errcode */
72 "", /* There is no such error code at index 0*/
73 "link negotiated speed does not match existing"
74 " trunk - link was \"low\" speed",
75 "link negotiated speed does not match"
76 " existing trunk - link was \"middle\" speed",
77 "link negotiated speed does not match existing"
78 " trunk - link was \"high\" speed",
79 "Attached to non-trunking port - F_Port",
80 "Attached to non-trunking port - N_Port",
81 "FLOGI response timeout",
82 "non-FLOGI frame received",
83 "Invalid FLOGI response",
84 "Trunking initialization protocol",
85 "Trunk peer device mismatch",
86 };
87
88 /**
89 * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
90 * @incr: integer to convert.
91 * @hdw: ascii string holding converted integer plus a string terminator.
92 *
93 * Description:
94 * JEDEC Joint Electron Device Engineering Council.
95 * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii
96 * character string. The string is then terminated with a NULL in byte 9.
97 * Hex 0-9 becomes ascii '0' to '9'.
98 * Hex a-f becomes ascii '=' to 'B' capital B.
99 *
100 * Notes:
101 * Coded for 32 bit integers only.
102 **/
103 static void
lpfc_jedec_to_ascii(int incr,char hdw[])104 lpfc_jedec_to_ascii(int incr, char hdw[])
105 {
106 int i, j;
107 for (i = 0; i < 8; i++) {
108 j = (incr & 0xf);
109 if (j <= 9)
110 hdw[7 - i] = 0x30 + j;
111 else
112 hdw[7 - i] = 0x61 + j - 10;
113 incr = (incr >> 4);
114 }
115 hdw[8] = 0;
116 return;
117 }
118
119 /**
120 * lpfc_drvr_version_show - Return the Emulex driver string with version number
121 * @dev: class unused variable.
122 * @attr: device attribute, not used.
123 * @buf: on return contains the module description text.
124 *
125 * Returns: size of formatted string.
126 **/
127 static ssize_t
lpfc_drvr_version_show(struct device * dev,struct device_attribute * attr,char * buf)128 lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
129 char *buf)
130 {
131 return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
132 }
133
134 /**
135 * lpfc_enable_fip_show - Return the fip mode of the HBA
136 * @dev: class unused variable.
137 * @attr: device attribute, not used.
138 * @buf: on return contains the module description text.
139 *
140 * Returns: size of formatted string.
141 **/
142 static ssize_t
lpfc_enable_fip_show(struct device * dev,struct device_attribute * attr,char * buf)143 lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
144 char *buf)
145 {
146 struct Scsi_Host *shost = class_to_shost(dev);
147 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
148 struct lpfc_hba *phba = vport->phba;
149
150 if (phba->hba_flag & HBA_FIP_SUPPORT)
151 return scnprintf(buf, PAGE_SIZE, "1\n");
152 else
153 return scnprintf(buf, PAGE_SIZE, "0\n");
154 }
155
156 static ssize_t
lpfc_nvme_info_show(struct device * dev,struct device_attribute * attr,char * buf)157 lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
158 char *buf)
159 {
160 struct Scsi_Host *shost = class_to_shost(dev);
161 struct lpfc_vport *vport = shost_priv(shost);
162 struct lpfc_hba *phba = vport->phba;
163 struct lpfc_nvmet_tgtport *tgtp;
164 struct nvme_fc_local_port *localport;
165 struct lpfc_nvme_lport *lport;
166 struct lpfc_nvme_rport *rport;
167 struct lpfc_nodelist *ndlp;
168 struct nvme_fc_remote_port *nrport;
169 struct lpfc_fc4_ctrl_stat *cstat;
170 uint64_t data1, data2, data3;
171 uint64_t totin, totout, tot;
172 char *statep;
173 int i;
174 int len = 0;
175 char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
176
177 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
178 len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
179 return len;
180 }
181 if (phba->nvmet_support) {
182 if (!phba->targetport) {
183 len = scnprintf(buf, PAGE_SIZE,
184 "NVME Target: x%llx is not allocated\n",
185 wwn_to_u64(vport->fc_portname.u.wwn));
186 return len;
187 }
188 /* Port state is only one of two values for now. */
189 if (phba->targetport->port_id)
190 statep = "REGISTERED";
191 else
192 statep = "INIT";
193 scnprintf(tmp, sizeof(tmp),
194 "NVME Target Enabled State %s\n",
195 statep);
196 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
197 goto buffer_done;
198
199 scnprintf(tmp, sizeof(tmp),
200 "%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
201 "NVME Target: lpfc",
202 phba->brd_no,
203 wwn_to_u64(vport->fc_portname.u.wwn),
204 wwn_to_u64(vport->fc_nodename.u.wwn),
205 phba->targetport->port_id);
206 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
207 goto buffer_done;
208
209 if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE)
210 >= PAGE_SIZE)
211 goto buffer_done;
212
213 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
214 scnprintf(tmp, sizeof(tmp),
215 "LS: Rcv %08x Drop %08x Abort %08x\n",
216 atomic_read(&tgtp->rcv_ls_req_in),
217 atomic_read(&tgtp->rcv_ls_req_drop),
218 atomic_read(&tgtp->xmt_ls_abort));
219 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
220 goto buffer_done;
221
222 if (atomic_read(&tgtp->rcv_ls_req_in) !=
223 atomic_read(&tgtp->rcv_ls_req_out)) {
224 scnprintf(tmp, sizeof(tmp),
225 "Rcv LS: in %08x != out %08x\n",
226 atomic_read(&tgtp->rcv_ls_req_in),
227 atomic_read(&tgtp->rcv_ls_req_out));
228 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
229 goto buffer_done;
230 }
231
232 scnprintf(tmp, sizeof(tmp),
233 "LS: Xmt %08x Drop %08x Cmpl %08x\n",
234 atomic_read(&tgtp->xmt_ls_rsp),
235 atomic_read(&tgtp->xmt_ls_drop),
236 atomic_read(&tgtp->xmt_ls_rsp_cmpl));
237 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
238 goto buffer_done;
239
240 scnprintf(tmp, sizeof(tmp),
241 "LS: RSP Abort %08x xb %08x Err %08x\n",
242 atomic_read(&tgtp->xmt_ls_rsp_aborted),
243 atomic_read(&tgtp->xmt_ls_rsp_xb_set),
244 atomic_read(&tgtp->xmt_ls_rsp_error));
245 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
246 goto buffer_done;
247
248 scnprintf(tmp, sizeof(tmp),
249 "FCP: Rcv %08x Defer %08x Release %08x "
250 "Drop %08x\n",
251 atomic_read(&tgtp->rcv_fcp_cmd_in),
252 atomic_read(&tgtp->rcv_fcp_cmd_defer),
253 atomic_read(&tgtp->xmt_fcp_release),
254 atomic_read(&tgtp->rcv_fcp_cmd_drop));
255 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
256 goto buffer_done;
257
258 if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
259 atomic_read(&tgtp->rcv_fcp_cmd_out)) {
260 scnprintf(tmp, sizeof(tmp),
261 "Rcv FCP: in %08x != out %08x\n",
262 atomic_read(&tgtp->rcv_fcp_cmd_in),
263 atomic_read(&tgtp->rcv_fcp_cmd_out));
264 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
265 goto buffer_done;
266 }
267
268 scnprintf(tmp, sizeof(tmp),
269 "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
270 "drop %08x\n",
271 atomic_read(&tgtp->xmt_fcp_read),
272 atomic_read(&tgtp->xmt_fcp_read_rsp),
273 atomic_read(&tgtp->xmt_fcp_write),
274 atomic_read(&tgtp->xmt_fcp_rsp),
275 atomic_read(&tgtp->xmt_fcp_drop));
276 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
277 goto buffer_done;
278
279 scnprintf(tmp, sizeof(tmp),
280 "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
281 atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
282 atomic_read(&tgtp->xmt_fcp_rsp_error),
283 atomic_read(&tgtp->xmt_fcp_rsp_drop));
284 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
285 goto buffer_done;
286
287 scnprintf(tmp, sizeof(tmp),
288 "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
289 atomic_read(&tgtp->xmt_fcp_rsp_aborted),
290 atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
291 atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
292 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
293 goto buffer_done;
294
295 scnprintf(tmp, sizeof(tmp),
296 "ABORT: Xmt %08x Cmpl %08x\n",
297 atomic_read(&tgtp->xmt_fcp_abort),
298 atomic_read(&tgtp->xmt_fcp_abort_cmpl));
299 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
300 goto buffer_done;
301
302 scnprintf(tmp, sizeof(tmp),
303 "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n",
304 atomic_read(&tgtp->xmt_abort_sol),
305 atomic_read(&tgtp->xmt_abort_unsol),
306 atomic_read(&tgtp->xmt_abort_rsp),
307 atomic_read(&tgtp->xmt_abort_rsp_error));
308 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
309 goto buffer_done;
310
311 scnprintf(tmp, sizeof(tmp),
312 "DELAY: ctx %08x fod %08x wqfull %08x\n",
313 atomic_read(&tgtp->defer_ctx),
314 atomic_read(&tgtp->defer_fod),
315 atomic_read(&tgtp->defer_wqfull));
316 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
317 goto buffer_done;
318
319 /* Calculate outstanding IOs */
320 tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
321 tot += atomic_read(&tgtp->xmt_fcp_release);
322 tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
323
324 scnprintf(tmp, sizeof(tmp),
325 "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
326 "CTX Outstanding %08llx\n\n",
327 phba->sli4_hba.nvmet_xri_cnt,
328 phba->sli4_hba.nvmet_io_wait_cnt,
329 phba->sli4_hba.nvmet_io_wait_total,
330 tot);
331 strlcat(buf, tmp, PAGE_SIZE);
332 goto buffer_done;
333 }
334
335 localport = vport->localport;
336 if (!localport) {
337 len = scnprintf(buf, PAGE_SIZE,
338 "NVME Initiator x%llx is not allocated\n",
339 wwn_to_u64(vport->fc_portname.u.wwn));
340 return len;
341 }
342 lport = (struct lpfc_nvme_lport *)localport->private;
343 if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
344 goto buffer_done;
345
346 scnprintf(tmp, sizeof(tmp),
347 "XRI Dist lpfc%d Total %d IO %d ELS %d\n",
348 phba->brd_no,
349 phba->sli4_hba.max_cfg_param.max_xri,
350 phba->sli4_hba.io_xri_max,
351 lpfc_sli4_get_els_iocb_cnt(phba));
352 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
353 goto buffer_done;
354
355 /* Port state is only one of two values for now. */
356 if (localport->port_id)
357 statep = "ONLINE";
358 else
359 statep = "UNKNOWN ";
360
361 scnprintf(tmp, sizeof(tmp),
362 "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
363 "NVME LPORT lpfc",
364 phba->brd_no,
365 wwn_to_u64(vport->fc_portname.u.wwn),
366 wwn_to_u64(vport->fc_nodename.u.wwn),
367 localport->port_id, statep);
368 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
369 goto buffer_done;
370
371 spin_lock_irq(shost->host_lock);
372
373 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
374 nrport = NULL;
375 spin_lock(&vport->phba->hbalock);
376 rport = lpfc_ndlp_get_nrport(ndlp);
377 if (rport)
378 nrport = rport->remoteport;
379 spin_unlock(&vport->phba->hbalock);
380 if (!nrport)
381 continue;
382
383 /* Port state is only one of two values for now. */
384 switch (nrport->port_state) {
385 case FC_OBJSTATE_ONLINE:
386 statep = "ONLINE";
387 break;
388 case FC_OBJSTATE_UNKNOWN:
389 statep = "UNKNOWN ";
390 break;
391 default:
392 statep = "UNSUPPORTED";
393 break;
394 }
395
396 /* Tab in to show lport ownership. */
397 if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
398 goto unlock_buf_done;
399 if (phba->brd_no >= 10) {
400 if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
401 goto unlock_buf_done;
402 }
403
404 scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
405 nrport->port_name);
406 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
407 goto unlock_buf_done;
408
409 scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
410 nrport->node_name);
411 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
412 goto unlock_buf_done;
413
414 scnprintf(tmp, sizeof(tmp), "DID x%06x ",
415 nrport->port_id);
416 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
417 goto unlock_buf_done;
418
419 /* An NVME rport can have multiple roles. */
420 if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
421 if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
422 goto unlock_buf_done;
423 }
424 if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
425 if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
426 goto unlock_buf_done;
427 }
428 if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
429 if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
430 goto unlock_buf_done;
431 }
432 if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
433 FC_PORT_ROLE_NVME_TARGET |
434 FC_PORT_ROLE_NVME_DISCOVERY)) {
435 scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
436 nrport->port_role);
437 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
438 goto unlock_buf_done;
439 }
440
441 scnprintf(tmp, sizeof(tmp), "%s\n", statep);
442 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
443 goto unlock_buf_done;
444 }
445 spin_unlock_irq(shost->host_lock);
446
447 if (!lport)
448 goto buffer_done;
449
450 if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE)
451 goto buffer_done;
452
453 scnprintf(tmp, sizeof(tmp),
454 "LS: Xmt %010x Cmpl %010x Abort %08x\n",
455 atomic_read(&lport->fc4NvmeLsRequests),
456 atomic_read(&lport->fc4NvmeLsCmpls),
457 atomic_read(&lport->xmt_ls_abort));
458 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
459 goto buffer_done;
460
461 scnprintf(tmp, sizeof(tmp),
462 "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
463 atomic_read(&lport->xmt_ls_err),
464 atomic_read(&lport->cmpl_ls_xb),
465 atomic_read(&lport->cmpl_ls_err));
466 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
467 goto buffer_done;
468
469 totin = 0;
470 totout = 0;
471 for (i = 0; i < phba->cfg_hdw_queue; i++) {
472 cstat = &phba->sli4_hba.hdwq[i].nvme_cstat;
473 tot = cstat->io_cmpls;
474 totin += tot;
475 data1 = cstat->input_requests;
476 data2 = cstat->output_requests;
477 data3 = cstat->control_requests;
478 totout += (data1 + data2 + data3);
479 }
480 scnprintf(tmp, sizeof(tmp),
481 "Total FCP Cmpl %016llx Issue %016llx "
482 "OutIO %016llx\n",
483 totin, totout, totout - totin);
484 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
485 goto buffer_done;
486
487 scnprintf(tmp, sizeof(tmp),
488 "\tabort %08x noxri %08x nondlp %08x qdepth %08x "
489 "wqerr %08x err %08x\n",
490 atomic_read(&lport->xmt_fcp_abort),
491 atomic_read(&lport->xmt_fcp_noxri),
492 atomic_read(&lport->xmt_fcp_bad_ndlp),
493 atomic_read(&lport->xmt_fcp_qdepth),
494 atomic_read(&lport->xmt_fcp_err),
495 atomic_read(&lport->xmt_fcp_wqerr));
496 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
497 goto buffer_done;
498
499 scnprintf(tmp, sizeof(tmp),
500 "FCP CMPL: xb %08x Err %08x\n",
501 atomic_read(&lport->cmpl_fcp_xb),
502 atomic_read(&lport->cmpl_fcp_err));
503 strlcat(buf, tmp, PAGE_SIZE);
504
505 /* host_lock is already unlocked. */
506 goto buffer_done;
507
508 unlock_buf_done:
509 spin_unlock_irq(shost->host_lock);
510
511 buffer_done:
512 len = strnlen(buf, PAGE_SIZE);
513
514 if (unlikely(len >= (PAGE_SIZE - 1))) {
515 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
516 "6314 Catching potential buffer "
517 "overflow > PAGE_SIZE = %lu bytes\n",
518 PAGE_SIZE);
519 strlcpy(buf + PAGE_SIZE - 1 -
520 strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1),
521 LPFC_NVME_INFO_MORE_STR,
522 strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1)
523 + 1);
524 }
525
526 return len;
527 }
528
529 static ssize_t
lpfc_scsi_stat_show(struct device * dev,struct device_attribute * attr,char * buf)530 lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr,
531 char *buf)
532 {
533 struct Scsi_Host *shost = class_to_shost(dev);
534 struct lpfc_vport *vport = shost_priv(shost);
535 struct lpfc_hba *phba = vport->phba;
536 int len;
537 struct lpfc_fc4_ctrl_stat *cstat;
538 u64 data1, data2, data3;
539 u64 tot, totin, totout;
540 int i;
541 char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
542
543 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ||
544 (phba->sli_rev != LPFC_SLI_REV4))
545 return 0;
546
547 scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n");
548
549 totin = 0;
550 totout = 0;
551 for (i = 0; i < phba->cfg_hdw_queue; i++) {
552 cstat = &phba->sli4_hba.hdwq[i].scsi_cstat;
553 tot = cstat->io_cmpls;
554 totin += tot;
555 data1 = cstat->input_requests;
556 data2 = cstat->output_requests;
557 data3 = cstat->control_requests;
558 totout += (data1 + data2 + data3);
559
560 scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx "
561 "IO %016llx ", i, data1, data2, data3);
562 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
563 goto buffer_done;
564
565 scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n",
566 tot, ((data1 + data2 + data3) - tot));
567 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
568 goto buffer_done;
569 }
570 scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx "
571 "OutIO %016llx\n", totin, totout, totout - totin);
572 strlcat(buf, tmp, PAGE_SIZE);
573
574 buffer_done:
575 len = strnlen(buf, PAGE_SIZE);
576
577 return len;
578 }
579
580 static ssize_t
lpfc_bg_info_show(struct device * dev,struct device_attribute * attr,char * buf)581 lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
582 char *buf)
583 {
584 struct Scsi_Host *shost = class_to_shost(dev);
585 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
586 struct lpfc_hba *phba = vport->phba;
587
588 if (phba->cfg_enable_bg) {
589 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
590 return scnprintf(buf, PAGE_SIZE,
591 "BlockGuard Enabled\n");
592 else
593 return scnprintf(buf, PAGE_SIZE,
594 "BlockGuard Not Supported\n");
595 } else
596 return scnprintf(buf, PAGE_SIZE,
597 "BlockGuard Disabled\n");
598 }
599
600 static ssize_t
lpfc_bg_guard_err_show(struct device * dev,struct device_attribute * attr,char * buf)601 lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
602 char *buf)
603 {
604 struct Scsi_Host *shost = class_to_shost(dev);
605 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
606 struct lpfc_hba *phba = vport->phba;
607
608 return scnprintf(buf, PAGE_SIZE, "%llu\n",
609 (unsigned long long)phba->bg_guard_err_cnt);
610 }
611
612 static ssize_t
lpfc_bg_apptag_err_show(struct device * dev,struct device_attribute * attr,char * buf)613 lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
614 char *buf)
615 {
616 struct Scsi_Host *shost = class_to_shost(dev);
617 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
618 struct lpfc_hba *phba = vport->phba;
619
620 return scnprintf(buf, PAGE_SIZE, "%llu\n",
621 (unsigned long long)phba->bg_apptag_err_cnt);
622 }
623
624 static ssize_t
lpfc_bg_reftag_err_show(struct device * dev,struct device_attribute * attr,char * buf)625 lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
626 char *buf)
627 {
628 struct Scsi_Host *shost = class_to_shost(dev);
629 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
630 struct lpfc_hba *phba = vport->phba;
631
632 return scnprintf(buf, PAGE_SIZE, "%llu\n",
633 (unsigned long long)phba->bg_reftag_err_cnt);
634 }
635
636 /**
637 * lpfc_info_show - Return some pci info about the host in ascii
638 * @dev: class converted to a Scsi_host structure.
639 * @attr: device attribute, not used.
640 * @buf: on return contains the formatted text from lpfc_info().
641 *
642 * Returns: size of formatted string.
643 **/
644 static ssize_t
lpfc_info_show(struct device * dev,struct device_attribute * attr,char * buf)645 lpfc_info_show(struct device *dev, struct device_attribute *attr,
646 char *buf)
647 {
648 struct Scsi_Host *host = class_to_shost(dev);
649
650 return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host));
651 }
652
653 /**
654 * lpfc_serialnum_show - Return the hba serial number in ascii
655 * @dev: class converted to a Scsi_host structure.
656 * @attr: device attribute, not used.
657 * @buf: on return contains the formatted text serial number.
658 *
659 * Returns: size of formatted string.
660 **/
661 static ssize_t
lpfc_serialnum_show(struct device * dev,struct device_attribute * attr,char * buf)662 lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
663 char *buf)
664 {
665 struct Scsi_Host *shost = class_to_shost(dev);
666 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
667 struct lpfc_hba *phba = vport->phba;
668
669 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber);
670 }
671
672 /**
673 * lpfc_temp_sensor_show - Return the temperature sensor level
674 * @dev: class converted to a Scsi_host structure.
675 * @attr: device attribute, not used.
676 * @buf: on return contains the formatted support level.
677 *
678 * Description:
679 * Returns a number indicating the temperature sensor level currently
680 * supported, zero or one in ascii.
681 *
682 * Returns: size of formatted string.
683 **/
684 static ssize_t
lpfc_temp_sensor_show(struct device * dev,struct device_attribute * attr,char * buf)685 lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
686 char *buf)
687 {
688 struct Scsi_Host *shost = class_to_shost(dev);
689 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
690 struct lpfc_hba *phba = vport->phba;
691 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support);
692 }
693
694 /**
695 * lpfc_modeldesc_show - Return the model description of the hba
696 * @dev: class converted to a Scsi_host structure.
697 * @attr: device attribute, not used.
698 * @buf: on return contains the scsi vpd model description.
699 *
700 * Returns: size of formatted string.
701 **/
702 static ssize_t
lpfc_modeldesc_show(struct device * dev,struct device_attribute * attr,char * buf)703 lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
704 char *buf)
705 {
706 struct Scsi_Host *shost = class_to_shost(dev);
707 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
708 struct lpfc_hba *phba = vport->phba;
709
710 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc);
711 }
712
713 /**
714 * lpfc_modelname_show - Return the model name of the hba
715 * @dev: class converted to a Scsi_host structure.
716 * @attr: device attribute, not used.
717 * @buf: on return contains the scsi vpd model name.
718 *
719 * Returns: size of formatted string.
720 **/
721 static ssize_t
lpfc_modelname_show(struct device * dev,struct device_attribute * attr,char * buf)722 lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
723 char *buf)
724 {
725 struct Scsi_Host *shost = class_to_shost(dev);
726 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
727 struct lpfc_hba *phba = vport->phba;
728
729 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName);
730 }
731
732 /**
733 * lpfc_programtype_show - Return the program type of the hba
734 * @dev: class converted to a Scsi_host structure.
735 * @attr: device attribute, not used.
736 * @buf: on return contains the scsi vpd program type.
737 *
738 * Returns: size of formatted string.
739 **/
740 static ssize_t
lpfc_programtype_show(struct device * dev,struct device_attribute * attr,char * buf)741 lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
742 char *buf)
743 {
744 struct Scsi_Host *shost = class_to_shost(dev);
745 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
746 struct lpfc_hba *phba = vport->phba;
747
748 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType);
749 }
750
751 /**
752 * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag
753 * @dev: class converted to a Scsi_host structure.
754 * @attr: device attribute, not used.
755 * @buf: on return contains the Menlo Maintenance sli flag.
756 *
757 * Returns: size of formatted string.
758 **/
759 static ssize_t
lpfc_mlomgmt_show(struct device * dev,struct device_attribute * attr,char * buf)760 lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
761 {
762 struct Scsi_Host *shost = class_to_shost(dev);
763 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
764 struct lpfc_hba *phba = vport->phba;
765
766 return scnprintf(buf, PAGE_SIZE, "%d\n",
767 (phba->sli.sli_flag & LPFC_MENLO_MAINT));
768 }
769
770 /**
771 * lpfc_vportnum_show - Return the port number in ascii of the hba
772 * @dev: class converted to a Scsi_host structure.
773 * @attr: device attribute, not used.
774 * @buf: on return contains scsi vpd program type.
775 *
776 * Returns: size of formatted string.
777 **/
778 static ssize_t
lpfc_vportnum_show(struct device * dev,struct device_attribute * attr,char * buf)779 lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
780 char *buf)
781 {
782 struct Scsi_Host *shost = class_to_shost(dev);
783 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
784 struct lpfc_hba *phba = vport->phba;
785
786 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port);
787 }
788
789 /**
790 * lpfc_fwrev_show - Return the firmware rev running in the hba
791 * @dev: class converted to a Scsi_host structure.
792 * @attr: device attribute, not used.
793 * @buf: on return contains the scsi vpd program type.
794 *
795 * Returns: size of formatted string.
796 **/
797 static ssize_t
lpfc_fwrev_show(struct device * dev,struct device_attribute * attr,char * buf)798 lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
799 char *buf)
800 {
801 struct Scsi_Host *shost = class_to_shost(dev);
802 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
803 struct lpfc_hba *phba = vport->phba;
804 uint32_t if_type;
805 uint8_t sli_family;
806 char fwrev[FW_REV_STR_SIZE];
807 int len;
808
809 lpfc_decode_firmware_rev(phba, fwrev, 1);
810 if_type = phba->sli4_hba.pc_sli4_params.if_type;
811 sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
812
813 if (phba->sli_rev < LPFC_SLI_REV4)
814 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
815 fwrev, phba->sli_rev);
816 else
817 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
818 fwrev, phba->sli_rev, if_type, sli_family);
819
820 return len;
821 }
822
823 /**
824 * lpfc_hdw_show - Return the jedec information about the hba
825 * @dev: class converted to a Scsi_host structure.
826 * @attr: device attribute, not used.
827 * @buf: on return contains the scsi vpd program type.
828 *
829 * Returns: size of formatted string.
830 **/
831 static ssize_t
lpfc_hdw_show(struct device * dev,struct device_attribute * attr,char * buf)832 lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
833 {
834 char hdw[9];
835 struct Scsi_Host *shost = class_to_shost(dev);
836 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
837 struct lpfc_hba *phba = vport->phba;
838 lpfc_vpd_t *vp = &phba->vpd;
839
840 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
841 return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw,
842 vp->rev.smRev, vp->rev.smFwRev);
843 }
844
845 /**
846 * lpfc_option_rom_version_show - Return the adapter ROM FCode version
847 * @dev: class converted to a Scsi_host structure.
848 * @attr: device attribute, not used.
849 * @buf: on return contains the ROM and FCode ascii strings.
850 *
851 * Returns: size of formatted string.
852 **/
853 static ssize_t
lpfc_option_rom_version_show(struct device * dev,struct device_attribute * attr,char * buf)854 lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
855 char *buf)
856 {
857 struct Scsi_Host *shost = class_to_shost(dev);
858 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
859 struct lpfc_hba *phba = vport->phba;
860 char fwrev[FW_REV_STR_SIZE];
861
862 if (phba->sli_rev < LPFC_SLI_REV4)
863 return scnprintf(buf, PAGE_SIZE, "%s\n",
864 phba->OptionROMVersion);
865
866 lpfc_decode_firmware_rev(phba, fwrev, 1);
867 return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev);
868 }
869
870 /**
871 * lpfc_state_show - Return the link state of the port
872 * @dev: class converted to a Scsi_host structure.
873 * @attr: device attribute, not used.
874 * @buf: on return contains text describing the state of the link.
875 *
876 * Notes:
877 * The switch statement has no default so zero will be returned.
878 *
879 * Returns: size of formatted string.
880 **/
881 static ssize_t
lpfc_link_state_show(struct device * dev,struct device_attribute * attr,char * buf)882 lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
883 char *buf)
884 {
885 struct Scsi_Host *shost = class_to_shost(dev);
886 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
887 struct lpfc_hba *phba = vport->phba;
888 int len = 0;
889
890 switch (phba->link_state) {
891 case LPFC_LINK_UNKNOWN:
892 case LPFC_WARM_START:
893 case LPFC_INIT_START:
894 case LPFC_INIT_MBX_CMDS:
895 case LPFC_LINK_DOWN:
896 case LPFC_HBA_ERROR:
897 if (phba->hba_flag & LINK_DISABLED)
898 len += scnprintf(buf + len, PAGE_SIZE-len,
899 "Link Down - User disabled\n");
900 else
901 len += scnprintf(buf + len, PAGE_SIZE-len,
902 "Link Down\n");
903 break;
904 case LPFC_LINK_UP:
905 case LPFC_CLEAR_LA:
906 case LPFC_HBA_READY:
907 len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
908
909 switch (vport->port_state) {
910 case LPFC_LOCAL_CFG_LINK:
911 len += scnprintf(buf + len, PAGE_SIZE-len,
912 "Configuring Link\n");
913 break;
914 case LPFC_FDISC:
915 case LPFC_FLOGI:
916 case LPFC_FABRIC_CFG_LINK:
917 case LPFC_NS_REG:
918 case LPFC_NS_QRY:
919 case LPFC_BUILD_DISC_LIST:
920 case LPFC_DISC_AUTH:
921 len += scnprintf(buf + len, PAGE_SIZE - len,
922 "Discovery\n");
923 break;
924 case LPFC_VPORT_READY:
925 len += scnprintf(buf + len, PAGE_SIZE - len,
926 "Ready\n");
927 break;
928
929 case LPFC_VPORT_FAILED:
930 len += scnprintf(buf + len, PAGE_SIZE - len,
931 "Failed\n");
932 break;
933
934 case LPFC_VPORT_UNKNOWN:
935 len += scnprintf(buf + len, PAGE_SIZE - len,
936 "Unknown\n");
937 break;
938 }
939 if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
940 len += scnprintf(buf + len, PAGE_SIZE-len,
941 " Menlo Maint Mode\n");
942 else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
943 if (vport->fc_flag & FC_PUBLIC_LOOP)
944 len += scnprintf(buf + len, PAGE_SIZE-len,
945 " Public Loop\n");
946 else
947 len += scnprintf(buf + len, PAGE_SIZE-len,
948 " Private Loop\n");
949 } else {
950 if (vport->fc_flag & FC_FABRIC)
951 len += scnprintf(buf + len, PAGE_SIZE-len,
952 " Fabric\n");
953 else
954 len += scnprintf(buf + len, PAGE_SIZE-len,
955 " Point-2-Point\n");
956 }
957 }
958
959 if ((phba->sli_rev == LPFC_SLI_REV4) &&
960 ((bf_get(lpfc_sli_intf_if_type,
961 &phba->sli4_hba.sli_intf) ==
962 LPFC_SLI_INTF_IF_TYPE_6))) {
963 struct lpfc_trunk_link link = phba->trunk_link;
964
965 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
966 len += scnprintf(buf + len, PAGE_SIZE - len,
967 "Trunk port 0: Link %s %s\n",
968 (link.link0.state == LPFC_LINK_UP) ?
969 "Up" : "Down. ",
970 trunk_errmsg[link.link0.fault]);
971
972 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
973 len += scnprintf(buf + len, PAGE_SIZE - len,
974 "Trunk port 1: Link %s %s\n",
975 (link.link1.state == LPFC_LINK_UP) ?
976 "Up" : "Down. ",
977 trunk_errmsg[link.link1.fault]);
978
979 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
980 len += scnprintf(buf + len, PAGE_SIZE - len,
981 "Trunk port 2: Link %s %s\n",
982 (link.link2.state == LPFC_LINK_UP) ?
983 "Up" : "Down. ",
984 trunk_errmsg[link.link2.fault]);
985
986 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
987 len += scnprintf(buf + len, PAGE_SIZE - len,
988 "Trunk port 3: Link %s %s\n",
989 (link.link3.state == LPFC_LINK_UP) ?
990 "Up" : "Down. ",
991 trunk_errmsg[link.link3.fault]);
992
993 }
994
995 return len;
996 }
997
998 /**
999 * lpfc_sli4_protocol_show - Return the fip mode of the HBA
1000 * @dev: class unused variable.
1001 * @attr: device attribute, not used.
1002 * @buf: on return contains the module description text.
1003 *
1004 * Returns: size of formatted string.
1005 **/
1006 static ssize_t
lpfc_sli4_protocol_show(struct device * dev,struct device_attribute * attr,char * buf)1007 lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
1008 char *buf)
1009 {
1010 struct Scsi_Host *shost = class_to_shost(dev);
1011 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1012 struct lpfc_hba *phba = vport->phba;
1013
1014 if (phba->sli_rev < LPFC_SLI_REV4)
1015 return scnprintf(buf, PAGE_SIZE, "fc\n");
1016
1017 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
1018 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
1019 return scnprintf(buf, PAGE_SIZE, "fcoe\n");
1020 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
1021 return scnprintf(buf, PAGE_SIZE, "fc\n");
1022 }
1023 return scnprintf(buf, PAGE_SIZE, "unknown\n");
1024 }
1025
1026 /**
1027 * lpfc_oas_supported_show - Return whether or not Optimized Access Storage
1028 * (OAS) is supported.
1029 * @dev: class unused variable.
1030 * @attr: device attribute, not used.
1031 * @buf: on return contains the module description text.
1032 *
1033 * Returns: size of formatted string.
1034 **/
1035 static ssize_t
lpfc_oas_supported_show(struct device * dev,struct device_attribute * attr,char * buf)1036 lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
1037 char *buf)
1038 {
1039 struct Scsi_Host *shost = class_to_shost(dev);
1040 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
1041 struct lpfc_hba *phba = vport->phba;
1042
1043 return scnprintf(buf, PAGE_SIZE, "%d\n",
1044 phba->sli4_hba.pc_sli4_params.oas_supported);
1045 }
1046
1047 /**
1048 * lpfc_link_state_store - Transition the link_state on an HBA port
1049 * @dev: class device that is converted into a Scsi_host.
1050 * @attr: device attribute, not used.
1051 * @buf: one or more lpfc_polling_flags values.
1052 * @count: not used.
1053 *
1054 * Returns:
1055 * -EINVAL if the buffer is not "up" or "down"
1056 * return from link state change function if non-zero
1057 * length of the buf on success
1058 **/
1059 static ssize_t
lpfc_link_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1060 lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
1061 const char *buf, size_t count)
1062 {
1063 struct Scsi_Host *shost = class_to_shost(dev);
1064 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1065 struct lpfc_hba *phba = vport->phba;
1066
1067 int status = -EINVAL;
1068
1069 if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
1070 (phba->link_state == LPFC_LINK_DOWN))
1071 status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
1072 else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
1073 (phba->link_state >= LPFC_LINK_UP))
1074 status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT);
1075
1076 if (status == 0)
1077 return strlen(buf);
1078 else
1079 return status;
1080 }
1081
1082 /**
1083 * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports
1084 * @dev: class device that is converted into a Scsi_host.
1085 * @attr: device attribute, not used.
1086 * @buf: on return contains the sum of fc mapped and unmapped.
1087 *
1088 * Description:
1089 * Returns the ascii text number of the sum of the fc mapped and unmapped
1090 * vport counts.
1091 *
1092 * Returns: size of formatted string.
1093 **/
1094 static ssize_t
lpfc_num_discovered_ports_show(struct device * dev,struct device_attribute * attr,char * buf)1095 lpfc_num_discovered_ports_show(struct device *dev,
1096 struct device_attribute *attr, char *buf)
1097 {
1098 struct Scsi_Host *shost = class_to_shost(dev);
1099 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1100
1101 return scnprintf(buf, PAGE_SIZE, "%d\n",
1102 vport->fc_map_cnt + vport->fc_unmap_cnt);
1103 }
1104
1105 /**
1106 * lpfc_issue_lip - Misnomer, name carried over from long ago
1107 * @shost: Scsi_Host pointer.
1108 *
1109 * Description:
1110 * Bring the link down gracefully then re-init the link. The firmware will
1111 * re-init the fiber channel interface as required. Does not issue a LIP.
1112 *
1113 * Returns:
1114 * -EPERM port offline or management commands are being blocked
1115 * -ENOMEM cannot allocate memory for the mailbox command
1116 * -EIO error sending the mailbox command
1117 * zero for success
1118 **/
1119 static int
lpfc_issue_lip(struct Scsi_Host * shost)1120 lpfc_issue_lip(struct Scsi_Host *shost)
1121 {
1122 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1123 struct lpfc_hba *phba = vport->phba;
1124 LPFC_MBOXQ_t *pmboxq;
1125 int mbxstatus = MBXERR_ERROR;
1126
1127 /*
1128 * If the link is offline, disabled or BLOCK_MGMT_IO
1129 * it doesn't make any sense to allow issue_lip
1130 */
1131 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1132 (phba->hba_flag & LINK_DISABLED) ||
1133 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
1134 return -EPERM;
1135
1136 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
1137
1138 if (!pmboxq)
1139 return -ENOMEM;
1140
1141 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1142 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1143 pmboxq->u.mb.mbxOwner = OWN_HOST;
1144
1145 if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
1146 vport->fc_flag &= ~FC_PT2PT_NO_NVME;
1147
1148 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
1149
1150 if ((mbxstatus == MBX_SUCCESS) &&
1151 (pmboxq->u.mb.mbxStatus == 0 ||
1152 pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
1153 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1154 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
1155 phba->cfg_link_speed);
1156 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1157 phba->fc_ratov * 2);
1158 if ((mbxstatus == MBX_SUCCESS) &&
1159 (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
1160 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1161 "2859 SLI authentication is required "
1162 "for INIT_LINK but has not done yet\n");
1163 }
1164
1165 lpfc_set_loopback_flag(phba);
1166 if (mbxstatus != MBX_TIMEOUT)
1167 mempool_free(pmboxq, phba->mbox_mem_pool);
1168
1169 if (mbxstatus == MBXERR_ERROR)
1170 return -EIO;
1171
1172 return 0;
1173 }
1174
1175 int
lpfc_emptyq_wait(struct lpfc_hba * phba,struct list_head * q,spinlock_t * lock)1176 lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
1177 {
1178 int cnt = 0;
1179
1180 spin_lock_irq(lock);
1181 while (!list_empty(q)) {
1182 spin_unlock_irq(lock);
1183 msleep(20);
1184 if (cnt++ > 250) { /* 5 secs */
1185 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1186 "0466 %s %s\n",
1187 "Outstanding IO when ",
1188 "bringing Adapter offline\n");
1189 return 0;
1190 }
1191 spin_lock_irq(lock);
1192 }
1193 spin_unlock_irq(lock);
1194 return 1;
1195 }
1196
1197 /**
1198 * lpfc_do_offline - Issues a mailbox command to bring the link down
1199 * @phba: lpfc_hba pointer.
1200 * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL.
1201 *
1202 * Notes:
1203 * Assumes any error from lpfc_do_offline() will be negative.
1204 * Can wait up to 5 seconds for the port ring buffers count
1205 * to reach zero, prints a warning if it is not zero and continues.
1206 * lpfc_workq_post_event() returns a non-zero return code if call fails.
1207 *
1208 * Returns:
1209 * -EIO error posting the event
1210 * zero for success
1211 **/
1212 static int
lpfc_do_offline(struct lpfc_hba * phba,uint32_t type)1213 lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
1214 {
1215 struct completion online_compl;
1216 struct lpfc_queue *qp = NULL;
1217 struct lpfc_sli_ring *pring;
1218 struct lpfc_sli *psli;
1219 int status = 0;
1220 int i;
1221 int rc;
1222
1223 init_completion(&online_compl);
1224 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1225 LPFC_EVT_OFFLINE_PREP);
1226 if (rc == 0)
1227 return -ENOMEM;
1228
1229 wait_for_completion(&online_compl);
1230
1231 if (status != 0)
1232 return -EIO;
1233
1234 psli = &phba->sli;
1235
1236 /*
1237 * If freeing the queues have already started, don't access them.
1238 * Otherwise set FREE_WAIT to indicate that queues are being used
1239 * to hold the freeing process until we finish.
1240 */
1241 spin_lock_irq(&phba->hbalock);
1242 if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) {
1243 psli->sli_flag |= LPFC_QUEUE_FREE_WAIT;
1244 } else {
1245 spin_unlock_irq(&phba->hbalock);
1246 goto skip_wait;
1247 }
1248 spin_unlock_irq(&phba->hbalock);
1249
1250 /* Wait a little for things to settle down, but not
1251 * long enough for dev loss timeout to expire.
1252 */
1253 if (phba->sli_rev != LPFC_SLI_REV4) {
1254 for (i = 0; i < psli->num_rings; i++) {
1255 pring = &psli->sli3_ring[i];
1256 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1257 &phba->hbalock))
1258 goto out;
1259 }
1260 } else {
1261 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1262 pring = qp->pring;
1263 if (!pring)
1264 continue;
1265 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1266 &pring->ring_lock))
1267 goto out;
1268 }
1269 }
1270 out:
1271 spin_lock_irq(&phba->hbalock);
1272 psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT;
1273 spin_unlock_irq(&phba->hbalock);
1274
1275 skip_wait:
1276 init_completion(&online_compl);
1277 rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
1278 if (rc == 0)
1279 return -ENOMEM;
1280
1281 wait_for_completion(&online_compl);
1282
1283 if (status != 0)
1284 return -EIO;
1285
1286 return 0;
1287 }
1288
1289 /**
1290 * lpfc_reset_pci_bus - resets PCI bridge controller's secondary bus of an HBA
1291 * @phba: lpfc_hba pointer.
1292 *
1293 * Description:
1294 * Issues a PCI secondary bus reset for the phba->pcidev.
1295 *
1296 * Notes:
1297 * First walks the bus_list to ensure only PCI devices with Emulex
1298 * vendor id, device ids that support hot reset, only one occurrence
1299 * of function 0, and all ports on the bus are in offline mode to ensure the
1300 * hot reset only affects one valid HBA.
1301 *
1302 * Returns:
1303 * -ENOTSUPP, cfg_enable_hba_reset must be of value 2
1304 * -ENODEV, NULL ptr to pcidev
1305 * -EBADSLT, detected invalid device
1306 * -EBUSY, port is not in offline state
1307 * 0, successful
1308 */
1309 static int
lpfc_reset_pci_bus(struct lpfc_hba * phba)1310 lpfc_reset_pci_bus(struct lpfc_hba *phba)
1311 {
1312 struct pci_dev *pdev = phba->pcidev;
1313 struct Scsi_Host *shost = NULL;
1314 struct lpfc_hba *phba_other = NULL;
1315 struct pci_dev *ptr = NULL;
1316 int res;
1317
1318 if (phba->cfg_enable_hba_reset != 2)
1319 return -ENOTSUPP;
1320
1321 if (!pdev) {
1322 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n");
1323 return -ENODEV;
1324 }
1325
1326 res = lpfc_check_pci_resettable(phba);
1327 if (res)
1328 return res;
1329
1330 /* Walk the list of devices on the pci_dev's bus */
1331 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
1332 /* Check port is offline */
1333 shost = pci_get_drvdata(ptr);
1334 if (shost) {
1335 phba_other =
1336 ((struct lpfc_vport *)shost->hostdata)->phba;
1337 if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) {
1338 lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT,
1339 "8349 WWPN = 0x%02x%02x%02x%02x"
1340 "%02x%02x%02x%02x is not "
1341 "offline!\n",
1342 phba_other->wwpn[0],
1343 phba_other->wwpn[1],
1344 phba_other->wwpn[2],
1345 phba_other->wwpn[3],
1346 phba_other->wwpn[4],
1347 phba_other->wwpn[5],
1348 phba_other->wwpn[6],
1349 phba_other->wwpn[7]);
1350 return -EBUSY;
1351 }
1352 }
1353 }
1354
1355 /* Issue PCI bus reset */
1356 res = pci_reset_bus(pdev);
1357 if (res) {
1358 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1359 "8350 PCI reset bus failed: %d\n", res);
1360 }
1361
1362 return res;
1363 }
1364
1365 /**
1366 * lpfc_selective_reset - Offline then onlines the port
1367 * @phba: lpfc_hba pointer.
1368 *
1369 * Description:
1370 * If the port is configured to allow a reset then the hba is brought
1371 * offline then online.
1372 *
1373 * Notes:
1374 * Assumes any error from lpfc_do_offline() will be negative.
1375 * Do not make this function static.
1376 *
1377 * Returns:
1378 * lpfc_do_offline() return code if not zero
1379 * -EIO reset not configured or error posting the event
1380 * zero for success
1381 **/
1382 int
lpfc_selective_reset(struct lpfc_hba * phba)1383 lpfc_selective_reset(struct lpfc_hba *phba)
1384 {
1385 struct completion online_compl;
1386 int status = 0;
1387 int rc;
1388
1389 if (!phba->cfg_enable_hba_reset)
1390 return -EACCES;
1391
1392 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
1393 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1394
1395 if (status != 0)
1396 return status;
1397 }
1398
1399 init_completion(&online_compl);
1400 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1401 LPFC_EVT_ONLINE);
1402 if (rc == 0)
1403 return -ENOMEM;
1404
1405 wait_for_completion(&online_compl);
1406
1407 if (status != 0)
1408 return -EIO;
1409
1410 return 0;
1411 }
1412
1413 /**
1414 * lpfc_issue_reset - Selectively resets an adapter
1415 * @dev: class device that is converted into a Scsi_host.
1416 * @attr: device attribute, not used.
1417 * @buf: containing the string "selective".
1418 * @count: unused variable.
1419 *
1420 * Description:
1421 * If the buf contains the string "selective" then lpfc_selective_reset()
1422 * is called to perform the reset.
1423 *
1424 * Notes:
1425 * Assumes any error from lpfc_selective_reset() will be negative.
1426 * If lpfc_selective_reset() returns zero then the length of the buffer
1427 * is returned which indicates success
1428 *
1429 * Returns:
1430 * -EINVAL if the buffer does not contain the string "selective"
1431 * length of buf if lpfc-selective_reset() if the call succeeds
1432 * return value of lpfc_selective_reset() if the call fails
1433 **/
1434 static ssize_t
lpfc_issue_reset(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1435 lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
1436 const char *buf, size_t count)
1437 {
1438 struct Scsi_Host *shost = class_to_shost(dev);
1439 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1440 struct lpfc_hba *phba = vport->phba;
1441 int status = -EINVAL;
1442
1443 if (!phba->cfg_enable_hba_reset)
1444 return -EACCES;
1445
1446 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
1447 status = phba->lpfc_selective_reset(phba);
1448
1449 if (status == 0)
1450 return strlen(buf);
1451 else
1452 return status;
1453 }
1454
1455 /**
1456 * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
1457 * @phba: lpfc_hba pointer.
1458 *
1459 * Description:
1460 * SLI4 interface type-2 device to wait on the sliport status register for
1461 * the readyness after performing a firmware reset.
1462 *
1463 * Returns:
1464 * zero for success, -EPERM when port does not have privilege to perform the
1465 * reset, -EIO when port timeout from recovering from the reset.
1466 *
1467 * Note:
1468 * As the caller will interpret the return code by value, be careful in making
1469 * change or addition to return codes.
1470 **/
1471 int
lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba * phba)1472 lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
1473 {
1474 struct lpfc_register portstat_reg = {0};
1475 int i;
1476
1477 msleep(100);
1478 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1479 &portstat_reg.word0))
1480 return -EIO;
1481
1482 /* verify if privileged for the request operation */
1483 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
1484 !bf_get(lpfc_sliport_status_err, &portstat_reg))
1485 return -EPERM;
1486
1487 /* wait for the SLI port firmware ready after firmware reset */
1488 for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
1489 msleep(10);
1490 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1491 &portstat_reg.word0))
1492 continue;
1493 if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
1494 continue;
1495 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
1496 continue;
1497 if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
1498 continue;
1499 break;
1500 }
1501
1502 if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
1503 return 0;
1504 else
1505 return -EIO;
1506 }
1507
1508 /**
1509 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
1510 * @phba: lpfc_hba pointer.
1511 *
1512 * Description:
1513 * Request SLI4 interface type-2 device to perform a physical register set
1514 * access.
1515 *
1516 * Returns:
1517 * zero for success
1518 **/
1519 static ssize_t
lpfc_sli4_pdev_reg_request(struct lpfc_hba * phba,uint32_t opcode)1520 lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
1521 {
1522 struct completion online_compl;
1523 struct pci_dev *pdev = phba->pcidev;
1524 uint32_t before_fc_flag;
1525 uint32_t sriov_nr_virtfn;
1526 uint32_t reg_val;
1527 int status = 0, rc = 0;
1528 int job_posted = 1, sriov_err;
1529
1530 if (!phba->cfg_enable_hba_reset)
1531 return -EACCES;
1532
1533 if ((phba->sli_rev < LPFC_SLI_REV4) ||
1534 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
1535 LPFC_SLI_INTF_IF_TYPE_2))
1536 return -EPERM;
1537
1538 /* Keep state if we need to restore back */
1539 before_fc_flag = phba->pport->fc_flag;
1540 sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
1541
1542 if (opcode == LPFC_FW_DUMP) {
1543 init_completion(&online_compl);
1544 phba->fw_dump_cmpl = &online_compl;
1545 } else {
1546 /* Disable SR-IOV virtual functions if enabled */
1547 if (phba->cfg_sriov_nr_virtfn) {
1548 pci_disable_sriov(pdev);
1549 phba->cfg_sriov_nr_virtfn = 0;
1550 }
1551
1552 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1553
1554 if (status != 0)
1555 return status;
1556
1557 /* wait for the device to be quiesced before firmware reset */
1558 msleep(100);
1559 }
1560
1561 reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
1562 LPFC_CTL_PDEV_CTL_OFFSET);
1563
1564 if (opcode == LPFC_FW_DUMP)
1565 reg_val |= LPFC_FW_DUMP_REQUEST;
1566 else if (opcode == LPFC_FW_RESET)
1567 reg_val |= LPFC_CTL_PDEV_CTL_FRST;
1568 else if (opcode == LPFC_DV_RESET)
1569 reg_val |= LPFC_CTL_PDEV_CTL_DRST;
1570
1571 writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
1572 LPFC_CTL_PDEV_CTL_OFFSET);
1573 /* flush */
1574 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
1575
1576 /* delay driver action following IF_TYPE_2 reset */
1577 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1578
1579 if (rc == -EPERM) {
1580 /* no privilege for reset */
1581 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1582 "3150 No privilege to perform the requested "
1583 "access: x%x\n", reg_val);
1584 } else if (rc == -EIO) {
1585 /* reset failed, there is nothing more we can do */
1586 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1587 "3153 Fail to perform the requested "
1588 "access: x%x\n", reg_val);
1589 if (phba->fw_dump_cmpl)
1590 phba->fw_dump_cmpl = NULL;
1591 return rc;
1592 }
1593
1594 /* keep the original port state */
1595 if (before_fc_flag & FC_OFFLINE_MODE) {
1596 if (phba->fw_dump_cmpl)
1597 phba->fw_dump_cmpl = NULL;
1598 goto out;
1599 }
1600
1601 /* Firmware dump will trigger an HA_ERATT event, and
1602 * lpfc_handle_eratt_s4 routine already handles bringing the port back
1603 * online.
1604 */
1605 if (opcode == LPFC_FW_DUMP) {
1606 wait_for_completion(phba->fw_dump_cmpl);
1607 } else {
1608 init_completion(&online_compl);
1609 job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
1610 LPFC_EVT_ONLINE);
1611 if (!job_posted)
1612 goto out;
1613
1614 wait_for_completion(&online_compl);
1615 }
1616 out:
1617 /* in any case, restore the virtual functions enabled as before */
1618 if (sriov_nr_virtfn) {
1619 /* If fw_dump was performed, first disable to clean up */
1620 if (opcode == LPFC_FW_DUMP) {
1621 pci_disable_sriov(pdev);
1622 phba->cfg_sriov_nr_virtfn = 0;
1623 }
1624
1625 sriov_err =
1626 lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
1627 if (!sriov_err)
1628 phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
1629 }
1630
1631 /* return proper error code */
1632 if (!rc) {
1633 if (!job_posted)
1634 rc = -ENOMEM;
1635 else if (status)
1636 rc = -EIO;
1637 }
1638 return rc;
1639 }
1640
1641 /**
1642 * lpfc_nport_evt_cnt_show - Return the number of nport events
1643 * @dev: class device that is converted into a Scsi_host.
1644 * @attr: device attribute, not used.
1645 * @buf: on return contains the ascii number of nport events.
1646 *
1647 * Returns: size of formatted string.
1648 **/
1649 static ssize_t
lpfc_nport_evt_cnt_show(struct device * dev,struct device_attribute * attr,char * buf)1650 lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
1651 char *buf)
1652 {
1653 struct Scsi_Host *shost = class_to_shost(dev);
1654 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1655 struct lpfc_hba *phba = vport->phba;
1656
1657 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
1658 }
1659
1660 static int
lpfc_set_trunking(struct lpfc_hba * phba,char * buff_out)1661 lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
1662 {
1663 LPFC_MBOXQ_t *mbox = NULL;
1664 unsigned long val = 0;
1665 char *pval = NULL;
1666 int rc = 0;
1667
1668 if (!strncmp("enable", buff_out,
1669 strlen("enable"))) {
1670 pval = buff_out + strlen("enable") + 1;
1671 rc = kstrtoul(pval, 0, &val);
1672 if (rc)
1673 return rc; /* Invalid number */
1674 } else if (!strncmp("disable", buff_out,
1675 strlen("disable"))) {
1676 val = 0;
1677 } else {
1678 return -EINVAL; /* Invalid command */
1679 }
1680
1681 switch (val) {
1682 case 0:
1683 val = 0x0; /* Disable */
1684 break;
1685 case 2:
1686 val = 0x1; /* Enable two port trunk */
1687 break;
1688 case 4:
1689 val = 0x2; /* Enable four port trunk */
1690 break;
1691 default:
1692 return -EINVAL;
1693 }
1694
1695 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1696 "0070 Set trunk mode with val %ld ", val);
1697
1698 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1699 if (!mbox)
1700 return -ENOMEM;
1701
1702 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1703 LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE,
1704 12, LPFC_SLI4_MBX_EMBED);
1705
1706 bf_set(lpfc_mbx_set_trunk_mode,
1707 &mbox->u.mqe.un.set_trunk_mode,
1708 val);
1709 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
1710 if (rc)
1711 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1712 "0071 Set trunk mode failed with status: %d",
1713 rc);
1714 mempool_free(mbox, phba->mbox_mem_pool);
1715
1716 return 0;
1717 }
1718
1719 /**
1720 * lpfc_board_mode_show - Return the state of the board
1721 * @dev: class device that is converted into a Scsi_host.
1722 * @attr: device attribute, not used.
1723 * @buf: on return contains the state of the adapter.
1724 *
1725 * Returns: size of formatted string.
1726 **/
1727 static ssize_t
lpfc_board_mode_show(struct device * dev,struct device_attribute * attr,char * buf)1728 lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
1729 char *buf)
1730 {
1731 struct Scsi_Host *shost = class_to_shost(dev);
1732 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1733 struct lpfc_hba *phba = vport->phba;
1734 char * state;
1735
1736 if (phba->link_state == LPFC_HBA_ERROR)
1737 state = "error";
1738 else if (phba->link_state == LPFC_WARM_START)
1739 state = "warm start";
1740 else if (phba->link_state == LPFC_INIT_START)
1741 state = "offline";
1742 else
1743 state = "online";
1744
1745 return scnprintf(buf, PAGE_SIZE, "%s\n", state);
1746 }
1747
1748 /**
1749 * lpfc_board_mode_store - Puts the hba in online, offline, warm or error state
1750 * @dev: class device that is converted into a Scsi_host.
1751 * @attr: device attribute, not used.
1752 * @buf: containing one of the strings "online", "offline", "warm" or "error".
1753 * @count: unused variable.
1754 *
1755 * Returns:
1756 * -EACCES if enable hba reset not enabled
1757 * -EINVAL if the buffer does not contain a valid string (see above)
1758 * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails
1759 * buf length greater than zero indicates success
1760 **/
1761 static ssize_t
lpfc_board_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1762 lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
1763 const char *buf, size_t count)
1764 {
1765 struct Scsi_Host *shost = class_to_shost(dev);
1766 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1767 struct lpfc_hba *phba = vport->phba;
1768 struct completion online_compl;
1769 char *board_mode_str = NULL;
1770 int status = 0;
1771 int rc;
1772
1773 if (!phba->cfg_enable_hba_reset) {
1774 status = -EACCES;
1775 goto board_mode_out;
1776 }
1777
1778 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1779 "3050 lpfc_board_mode set to %s\n", buf);
1780
1781 init_completion(&online_compl);
1782
1783 if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
1784 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1785 LPFC_EVT_ONLINE);
1786 if (rc == 0) {
1787 status = -ENOMEM;
1788 goto board_mode_out;
1789 }
1790 wait_for_completion(&online_compl);
1791 if (status)
1792 status = -EIO;
1793 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
1794 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1795 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
1796 if (phba->sli_rev == LPFC_SLI_REV4)
1797 status = -EINVAL;
1798 else
1799 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
1800 else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
1801 if (phba->sli_rev == LPFC_SLI_REV4)
1802 status = -EINVAL;
1803 else
1804 status = lpfc_do_offline(phba, LPFC_EVT_KILL);
1805 else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
1806 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
1807 else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
1808 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
1809 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
1810 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
1811 else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1)
1812 == 0)
1813 status = lpfc_reset_pci_bus(phba);
1814 else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0)
1815 status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk"));
1816 else
1817 status = -EINVAL;
1818
1819 board_mode_out:
1820 if (!status)
1821 return strlen(buf);
1822 else {
1823 board_mode_str = strchr(buf, '\n');
1824 if (board_mode_str)
1825 *board_mode_str = '\0';
1826 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1827 "3097 Failed \"%s\", status(%d), "
1828 "fc_flag(x%x)\n",
1829 buf, status, phba->pport->fc_flag);
1830 return status;
1831 }
1832 }
1833
1834 /**
1835 * lpfc_get_hba_info - Return various bits of informaton about the adapter
1836 * @phba: pointer to the adapter structure.
1837 * @mxri: max xri count.
1838 * @axri: available xri count.
1839 * @mrpi: max rpi count.
1840 * @arpi: available rpi count.
1841 * @mvpi: max vpi count.
1842 * @avpi: available vpi count.
1843 *
1844 * Description:
1845 * If an integer pointer for an count is not null then the value for the
1846 * count is returned.
1847 *
1848 * Returns:
1849 * zero on error
1850 * one for success
1851 **/
1852 static int
lpfc_get_hba_info(struct lpfc_hba * phba,uint32_t * mxri,uint32_t * axri,uint32_t * mrpi,uint32_t * arpi,uint32_t * mvpi,uint32_t * avpi)1853 lpfc_get_hba_info(struct lpfc_hba *phba,
1854 uint32_t *mxri, uint32_t *axri,
1855 uint32_t *mrpi, uint32_t *arpi,
1856 uint32_t *mvpi, uint32_t *avpi)
1857 {
1858 struct lpfc_mbx_read_config *rd_config;
1859 LPFC_MBOXQ_t *pmboxq;
1860 MAILBOX_t *pmb;
1861 int rc = 0;
1862 uint32_t max_vpi;
1863
1864 /*
1865 * prevent udev from issuing mailbox commands until the port is
1866 * configured.
1867 */
1868 if (phba->link_state < LPFC_LINK_DOWN ||
1869 !phba->mbox_mem_pool ||
1870 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
1871 return 0;
1872
1873 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
1874 return 0;
1875
1876 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1877 if (!pmboxq)
1878 return 0;
1879 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1880
1881 pmb = &pmboxq->u.mb;
1882 pmb->mbxCommand = MBX_READ_CONFIG;
1883 pmb->mbxOwner = OWN_HOST;
1884 pmboxq->ctx_buf = NULL;
1885
1886 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
1887 rc = MBX_NOT_FINISHED;
1888 else
1889 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1890
1891 if (rc != MBX_SUCCESS) {
1892 if (rc != MBX_TIMEOUT)
1893 mempool_free(pmboxq, phba->mbox_mem_pool);
1894 return 0;
1895 }
1896
1897 if (phba->sli_rev == LPFC_SLI_REV4) {
1898 rd_config = &pmboxq->u.mqe.un.rd_config;
1899 if (mrpi)
1900 *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
1901 if (arpi)
1902 *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
1903 phba->sli4_hba.max_cfg_param.rpi_used;
1904 if (mxri)
1905 *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
1906 if (axri)
1907 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
1908 phba->sli4_hba.max_cfg_param.xri_used;
1909
1910 /* Account for differences with SLI-3. Get vpi count from
1911 * mailbox data and subtract one for max vpi value.
1912 */
1913 max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
1914 (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
1915
1916 /* Limit the max we support */
1917 if (max_vpi > LPFC_MAX_VPI)
1918 max_vpi = LPFC_MAX_VPI;
1919 if (mvpi)
1920 *mvpi = max_vpi;
1921 if (avpi)
1922 *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
1923 } else {
1924 if (mrpi)
1925 *mrpi = pmb->un.varRdConfig.max_rpi;
1926 if (arpi)
1927 *arpi = pmb->un.varRdConfig.avail_rpi;
1928 if (mxri)
1929 *mxri = pmb->un.varRdConfig.max_xri;
1930 if (axri)
1931 *axri = pmb->un.varRdConfig.avail_xri;
1932 if (mvpi)
1933 *mvpi = pmb->un.varRdConfig.max_vpi;
1934 if (avpi) {
1935 /* avail_vpi is only valid if link is up and ready */
1936 if (phba->link_state == LPFC_HBA_READY)
1937 *avpi = pmb->un.varRdConfig.avail_vpi;
1938 else
1939 *avpi = pmb->un.varRdConfig.max_vpi;
1940 }
1941 }
1942
1943 mempool_free(pmboxq, phba->mbox_mem_pool);
1944 return 1;
1945 }
1946
1947 /**
1948 * lpfc_max_rpi_show - Return maximum rpi
1949 * @dev: class device that is converted into a Scsi_host.
1950 * @attr: device attribute, not used.
1951 * @buf: on return contains the maximum rpi count in decimal or "Unknown".
1952 *
1953 * Description:
1954 * Calls lpfc_get_hba_info() asking for just the mrpi count.
1955 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
1956 * to "Unknown" and the buffer length is returned, therefore the caller
1957 * must check for "Unknown" in the buffer to detect a failure.
1958 *
1959 * Returns: size of formatted string.
1960 **/
1961 static ssize_t
lpfc_max_rpi_show(struct device * dev,struct device_attribute * attr,char * buf)1962 lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
1963 char *buf)
1964 {
1965 struct Scsi_Host *shost = class_to_shost(dev);
1966 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1967 struct lpfc_hba *phba = vport->phba;
1968 uint32_t cnt;
1969
1970 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
1971 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
1972 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
1973 }
1974
1975 /**
1976 * lpfc_used_rpi_show - Return maximum rpi minus available rpi
1977 * @dev: class device that is converted into a Scsi_host.
1978 * @attr: device attribute, not used.
1979 * @buf: containing the used rpi count in decimal or "Unknown".
1980 *
1981 * Description:
1982 * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts.
1983 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
1984 * to "Unknown" and the buffer length is returned, therefore the caller
1985 * must check for "Unknown" in the buffer to detect a failure.
1986 *
1987 * Returns: size of formatted string.
1988 **/
1989 static ssize_t
lpfc_used_rpi_show(struct device * dev,struct device_attribute * attr,char * buf)1990 lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
1991 char *buf)
1992 {
1993 struct Scsi_Host *shost = class_to_shost(dev);
1994 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1995 struct lpfc_hba *phba = vport->phba;
1996 uint32_t cnt, acnt;
1997
1998 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
1999 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2000 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2001 }
2002
2003 /**
2004 * lpfc_max_xri_show - Return maximum xri
2005 * @dev: class device that is converted into a Scsi_host.
2006 * @attr: device attribute, not used.
2007 * @buf: on return contains the maximum xri count in decimal or "Unknown".
2008 *
2009 * Description:
2010 * Calls lpfc_get_hba_info() asking for just the mrpi count.
2011 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2012 * to "Unknown" and the buffer length is returned, therefore the caller
2013 * must check for "Unknown" in the buffer to detect a failure.
2014 *
2015 * Returns: size of formatted string.
2016 **/
2017 static ssize_t
lpfc_max_xri_show(struct device * dev,struct device_attribute * attr,char * buf)2018 lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
2019 char *buf)
2020 {
2021 struct Scsi_Host *shost = class_to_shost(dev);
2022 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2023 struct lpfc_hba *phba = vport->phba;
2024 uint32_t cnt;
2025
2026 if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
2027 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2028 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2029 }
2030
2031 /**
2032 * lpfc_used_xri_show - Return maximum xpi minus the available xpi
2033 * @dev: class device that is converted into a Scsi_host.
2034 * @attr: device attribute, not used.
2035 * @buf: on return contains the used xri count in decimal or "Unknown".
2036 *
2037 * Description:
2038 * Calls lpfc_get_hba_info() asking for just the mxri and axri counts.
2039 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2040 * to "Unknown" and the buffer length is returned, therefore the caller
2041 * must check for "Unknown" in the buffer to detect a failure.
2042 *
2043 * Returns: size of formatted string.
2044 **/
2045 static ssize_t
lpfc_used_xri_show(struct device * dev,struct device_attribute * attr,char * buf)2046 lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
2047 char *buf)
2048 {
2049 struct Scsi_Host *shost = class_to_shost(dev);
2050 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2051 struct lpfc_hba *phba = vport->phba;
2052 uint32_t cnt, acnt;
2053
2054 if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
2055 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2056 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2057 }
2058
2059 /**
2060 * lpfc_max_vpi_show - Return maximum vpi
2061 * @dev: class device that is converted into a Scsi_host.
2062 * @attr: device attribute, not used.
2063 * @buf: on return contains the maximum vpi count in decimal or "Unknown".
2064 *
2065 * Description:
2066 * Calls lpfc_get_hba_info() asking for just the mvpi count.
2067 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2068 * to "Unknown" and the buffer length is returned, therefore the caller
2069 * must check for "Unknown" in the buffer to detect a failure.
2070 *
2071 * Returns: size of formatted string.
2072 **/
2073 static ssize_t
lpfc_max_vpi_show(struct device * dev,struct device_attribute * attr,char * buf)2074 lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
2075 char *buf)
2076 {
2077 struct Scsi_Host *shost = class_to_shost(dev);
2078 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2079 struct lpfc_hba *phba = vport->phba;
2080 uint32_t cnt;
2081
2082 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
2083 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2084 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2085 }
2086
2087 /**
2088 * lpfc_used_vpi_show - Return maximum vpi minus the available vpi
2089 * @dev: class device that is converted into a Scsi_host.
2090 * @attr: device attribute, not used.
2091 * @buf: on return contains the used vpi count in decimal or "Unknown".
2092 *
2093 * Description:
2094 * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts.
2095 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2096 * to "Unknown" and the buffer length is returned, therefore the caller
2097 * must check for "Unknown" in the buffer to detect a failure.
2098 *
2099 * Returns: size of formatted string.
2100 **/
2101 static ssize_t
lpfc_used_vpi_show(struct device * dev,struct device_attribute * attr,char * buf)2102 lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
2103 char *buf)
2104 {
2105 struct Scsi_Host *shost = class_to_shost(dev);
2106 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2107 struct lpfc_hba *phba = vport->phba;
2108 uint32_t cnt, acnt;
2109
2110 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
2111 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2112 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2113 }
2114
2115 /**
2116 * lpfc_npiv_info_show - Return text about NPIV support for the adapter
2117 * @dev: class device that is converted into a Scsi_host.
2118 * @attr: device attribute, not used.
2119 * @buf: text that must be interpreted to determine if npiv is supported.
2120 *
2121 * Description:
2122 * Buffer will contain text indicating npiv is not suppoerted on the port,
2123 * the port is an NPIV physical port, or it is an npiv virtual port with
2124 * the id of the vport.
2125 *
2126 * Returns: size of formatted string.
2127 **/
2128 static ssize_t
lpfc_npiv_info_show(struct device * dev,struct device_attribute * attr,char * buf)2129 lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
2130 char *buf)
2131 {
2132 struct Scsi_Host *shost = class_to_shost(dev);
2133 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2134 struct lpfc_hba *phba = vport->phba;
2135
2136 if (!(phba->max_vpi))
2137 return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
2138 if (vport->port_type == LPFC_PHYSICAL_PORT)
2139 return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n");
2140 return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
2141 }
2142
2143 /**
2144 * lpfc_poll_show - Return text about poll support for the adapter
2145 * @dev: class device that is converted into a Scsi_host.
2146 * @attr: device attribute, not used.
2147 * @buf: on return contains the cfg_poll in hex.
2148 *
2149 * Notes:
2150 * cfg_poll should be a lpfc_polling_flags type.
2151 *
2152 * Returns: size of formatted string.
2153 **/
2154 static ssize_t
lpfc_poll_show(struct device * dev,struct device_attribute * attr,char * buf)2155 lpfc_poll_show(struct device *dev, struct device_attribute *attr,
2156 char *buf)
2157 {
2158 struct Scsi_Host *shost = class_to_shost(dev);
2159 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2160 struct lpfc_hba *phba = vport->phba;
2161
2162 return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
2163 }
2164
2165 /**
2166 * lpfc_poll_store - Set the value of cfg_poll for the adapter
2167 * @dev: class device that is converted into a Scsi_host.
2168 * @attr: device attribute, not used.
2169 * @buf: one or more lpfc_polling_flags values.
2170 * @count: not used.
2171 *
2172 * Notes:
2173 * buf contents converted to integer and checked for a valid value.
2174 *
2175 * Returns:
2176 * -EINVAL if the buffer connot be converted or is out of range
2177 * length of the buf on success
2178 **/
2179 static ssize_t
lpfc_poll_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2180 lpfc_poll_store(struct device *dev, struct device_attribute *attr,
2181 const char *buf, size_t count)
2182 {
2183 struct Scsi_Host *shost = class_to_shost(dev);
2184 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2185 struct lpfc_hba *phba = vport->phba;
2186 uint32_t creg_val;
2187 uint32_t old_val;
2188 int val=0;
2189
2190 if (!isdigit(buf[0]))
2191 return -EINVAL;
2192
2193 if (sscanf(buf, "%i", &val) != 1)
2194 return -EINVAL;
2195
2196 if ((val & 0x3) != val)
2197 return -EINVAL;
2198
2199 if (phba->sli_rev == LPFC_SLI_REV4)
2200 val = 0;
2201
2202 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2203 "3051 lpfc_poll changed from %d to %d\n",
2204 phba->cfg_poll, val);
2205
2206 spin_lock_irq(&phba->hbalock);
2207
2208 old_val = phba->cfg_poll;
2209
2210 if (val & ENABLE_FCP_RING_POLLING) {
2211 if ((val & DISABLE_FCP_RING_INT) &&
2212 !(old_val & DISABLE_FCP_RING_INT)) {
2213 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2214 spin_unlock_irq(&phba->hbalock);
2215 return -EINVAL;
2216 }
2217 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
2218 writel(creg_val, phba->HCregaddr);
2219 readl(phba->HCregaddr); /* flush */
2220
2221 lpfc_poll_start_timer(phba);
2222 }
2223 } else if (val != 0x0) {
2224 spin_unlock_irq(&phba->hbalock);
2225 return -EINVAL;
2226 }
2227
2228 if (!(val & DISABLE_FCP_RING_INT) &&
2229 (old_val & DISABLE_FCP_RING_INT))
2230 {
2231 spin_unlock_irq(&phba->hbalock);
2232 del_timer(&phba->fcp_poll_timer);
2233 spin_lock_irq(&phba->hbalock);
2234 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2235 spin_unlock_irq(&phba->hbalock);
2236 return -EINVAL;
2237 }
2238 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
2239 writel(creg_val, phba->HCregaddr);
2240 readl(phba->HCregaddr); /* flush */
2241 }
2242
2243 phba->cfg_poll = val;
2244
2245 spin_unlock_irq(&phba->hbalock);
2246
2247 return strlen(buf);
2248 }
2249
2250 /**
2251 * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
2252 * @dev: class converted to a Scsi_host structure.
2253 * @attr: device attribute, not used.
2254 * @buf: on return contains the formatted support level.
2255 *
2256 * Description:
2257 * Returns the maximum number of virtual functions a physical function can
2258 * support, 0 will be returned if called on virtual function.
2259 *
2260 * Returns: size of formatted string.
2261 **/
2262 static ssize_t
lpfc_sriov_hw_max_virtfn_show(struct device * dev,struct device_attribute * attr,char * buf)2263 lpfc_sriov_hw_max_virtfn_show(struct device *dev,
2264 struct device_attribute *attr,
2265 char *buf)
2266 {
2267 struct Scsi_Host *shost = class_to_shost(dev);
2268 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2269 struct lpfc_hba *phba = vport->phba;
2270 uint16_t max_nr_virtfn;
2271
2272 max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
2273 return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
2274 }
2275
lpfc_rangecheck(uint val,uint min,uint max)2276 static inline bool lpfc_rangecheck(uint val, uint min, uint max)
2277 {
2278 return val >= min && val <= max;
2279 }
2280
2281 /**
2282 * lpfc_enable_bbcr_set: Sets an attribute value.
2283 * @phba: pointer the the adapter structure.
2284 * @val: integer attribute value.
2285 *
2286 * Description:
2287 * Validates the min and max values then sets the
2288 * adapter config field if in the valid range. prints error message
2289 * and does not set the parameter if invalid.
2290 *
2291 * Returns:
2292 * zero on success
2293 * -EINVAL if val is invalid
2294 */
2295 static ssize_t
lpfc_enable_bbcr_set(struct lpfc_hba * phba,uint val)2296 lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
2297 {
2298 if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) {
2299 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2300 "3068 %s_enable_bbcr changed from %d to %d\n",
2301 LPFC_DRIVER_NAME, phba->cfg_enable_bbcr, val);
2302 phba->cfg_enable_bbcr = val;
2303 return 0;
2304 }
2305 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2306 "0451 %s_enable_bbcr cannot set to %d, range is 0, 1\n",
2307 LPFC_DRIVER_NAME, val);
2308 return -EINVAL;
2309 }
2310
2311 /**
2312 * lpfc_param_show - Return a cfg attribute value in decimal
2313 *
2314 * Description:
2315 * Macro that given an attr e.g. hba_queue_depth expands
2316 * into a function with the name lpfc_hba_queue_depth_show.
2317 *
2318 * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field.
2319 * @dev: class device that is converted into a Scsi_host.
2320 * @attr: device attribute, not used.
2321 * @buf: on return contains the attribute value in decimal.
2322 *
2323 * Returns: size of formatted string.
2324 **/
2325 #define lpfc_param_show(attr) \
2326 static ssize_t \
2327 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2328 char *buf) \
2329 { \
2330 struct Scsi_Host *shost = class_to_shost(dev);\
2331 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2332 struct lpfc_hba *phba = vport->phba;\
2333 return scnprintf(buf, PAGE_SIZE, "%d\n",\
2334 phba->cfg_##attr);\
2335 }
2336
2337 /**
2338 * lpfc_param_hex_show - Return a cfg attribute value in hex
2339 *
2340 * Description:
2341 * Macro that given an attr e.g. hba_queue_depth expands
2342 * into a function with the name lpfc_hba_queue_depth_show
2343 *
2344 * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field.
2345 * @dev: class device that is converted into a Scsi_host.
2346 * @attr: device attribute, not used.
2347 * @buf: on return contains the attribute value in hexadecimal.
2348 *
2349 * Returns: size of formatted string.
2350 **/
2351 #define lpfc_param_hex_show(attr) \
2352 static ssize_t \
2353 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2354 char *buf) \
2355 { \
2356 struct Scsi_Host *shost = class_to_shost(dev);\
2357 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2358 struct lpfc_hba *phba = vport->phba;\
2359 uint val = 0;\
2360 val = phba->cfg_##attr;\
2361 return scnprintf(buf, PAGE_SIZE, "%#x\n",\
2362 phba->cfg_##attr);\
2363 }
2364
2365 /**
2366 * lpfc_param_init - Initializes a cfg attribute
2367 *
2368 * Description:
2369 * Macro that given an attr e.g. hba_queue_depth expands
2370 * into a function with the name lpfc_hba_queue_depth_init. The macro also
2371 * takes a default argument, a minimum and maximum argument.
2372 *
2373 * lpfc_##attr##_init: Initializes an attribute.
2374 * @phba: pointer the the adapter structure.
2375 * @val: integer attribute value.
2376 *
2377 * Validates the min and max values then sets the adapter config field
2378 * accordingly, or uses the default if out of range and prints an error message.
2379 *
2380 * Returns:
2381 * zero on success
2382 * -EINVAL if default used
2383 **/
2384 #define lpfc_param_init(attr, default, minval, maxval) \
2385 static int \
2386 lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
2387 { \
2388 if (lpfc_rangecheck(val, minval, maxval)) {\
2389 phba->cfg_##attr = val;\
2390 return 0;\
2391 }\
2392 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2393 "0449 lpfc_"#attr" attribute cannot be set to %d, "\
2394 "allowed range is ["#minval", "#maxval"]\n", val); \
2395 phba->cfg_##attr = default;\
2396 return -EINVAL;\
2397 }
2398
2399 /**
2400 * lpfc_param_set - Set a cfg attribute value
2401 *
2402 * Description:
2403 * Macro that given an attr e.g. hba_queue_depth expands
2404 * into a function with the name lpfc_hba_queue_depth_set
2405 *
2406 * lpfc_##attr##_set: Sets an attribute value.
2407 * @phba: pointer the the adapter structure.
2408 * @val: integer attribute value.
2409 *
2410 * Description:
2411 * Validates the min and max values then sets the
2412 * adapter config field if in the valid range. prints error message
2413 * and does not set the parameter if invalid.
2414 *
2415 * Returns:
2416 * zero on success
2417 * -EINVAL if val is invalid
2418 **/
2419 #define lpfc_param_set(attr, default, minval, maxval) \
2420 static int \
2421 lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
2422 { \
2423 if (lpfc_rangecheck(val, minval, maxval)) {\
2424 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2425 "3052 lpfc_" #attr " changed from %d to %d\n", \
2426 phba->cfg_##attr, val); \
2427 phba->cfg_##attr = val;\
2428 return 0;\
2429 }\
2430 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2431 "0450 lpfc_"#attr" attribute cannot be set to %d, "\
2432 "allowed range is ["#minval", "#maxval"]\n", val); \
2433 return -EINVAL;\
2434 }
2435
2436 /**
2437 * lpfc_param_store - Set a vport attribute value
2438 *
2439 * Description:
2440 * Macro that given an attr e.g. hba_queue_depth expands
2441 * into a function with the name lpfc_hba_queue_depth_store.
2442 *
2443 * lpfc_##attr##_store: Set an sttribute value.
2444 * @dev: class device that is converted into a Scsi_host.
2445 * @attr: device attribute, not used.
2446 * @buf: contains the attribute value in ascii.
2447 * @count: not used.
2448 *
2449 * Description:
2450 * Convert the ascii text number to an integer, then
2451 * use the lpfc_##attr##_set function to set the value.
2452 *
2453 * Returns:
2454 * -EINVAL if val is invalid or lpfc_##attr##_set() fails
2455 * length of buffer upon success.
2456 **/
2457 #define lpfc_param_store(attr) \
2458 static ssize_t \
2459 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2460 const char *buf, size_t count) \
2461 { \
2462 struct Scsi_Host *shost = class_to_shost(dev);\
2463 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2464 struct lpfc_hba *phba = vport->phba;\
2465 uint val = 0;\
2466 if (!isdigit(buf[0]))\
2467 return -EINVAL;\
2468 if (sscanf(buf, "%i", &val) != 1)\
2469 return -EINVAL;\
2470 if (lpfc_##attr##_set(phba, val) == 0) \
2471 return strlen(buf);\
2472 else \
2473 return -EINVAL;\
2474 }
2475
2476 /**
2477 * lpfc_vport_param_show - Return decimal formatted cfg attribute value
2478 *
2479 * Description:
2480 * Macro that given an attr e.g. hba_queue_depth expands
2481 * into a function with the name lpfc_hba_queue_depth_show
2482 *
2483 * lpfc_##attr##_show: prints the attribute value in decimal.
2484 * @dev: class device that is converted into a Scsi_host.
2485 * @attr: device attribute, not used.
2486 * @buf: on return contains the attribute value in decimal.
2487 *
2488 * Returns: length of formatted string.
2489 **/
2490 #define lpfc_vport_param_show(attr) \
2491 static ssize_t \
2492 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2493 char *buf) \
2494 { \
2495 struct Scsi_Host *shost = class_to_shost(dev);\
2496 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2497 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
2498 }
2499
2500 /**
2501 * lpfc_vport_param_hex_show - Return hex formatted attribute value
2502 *
2503 * Description:
2504 * Macro that given an attr e.g.
2505 * hba_queue_depth expands into a function with the name
2506 * lpfc_hba_queue_depth_show
2507 *
2508 * lpfc_##attr##_show: prints the attribute value in hexadecimal.
2509 * @dev: class device that is converted into a Scsi_host.
2510 * @attr: device attribute, not used.
2511 * @buf: on return contains the attribute value in hexadecimal.
2512 *
2513 * Returns: length of formatted string.
2514 **/
2515 #define lpfc_vport_param_hex_show(attr) \
2516 static ssize_t \
2517 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2518 char *buf) \
2519 { \
2520 struct Scsi_Host *shost = class_to_shost(dev);\
2521 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2522 return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
2523 }
2524
2525 /**
2526 * lpfc_vport_param_init - Initialize a vport cfg attribute
2527 *
2528 * Description:
2529 * Macro that given an attr e.g. hba_queue_depth expands
2530 * into a function with the name lpfc_hba_queue_depth_init. The macro also
2531 * takes a default argument, a minimum and maximum argument.
2532 *
2533 * lpfc_##attr##_init: validates the min and max values then sets the
2534 * adapter config field accordingly, or uses the default if out of range
2535 * and prints an error message.
2536 * @phba: pointer the the adapter structure.
2537 * @val: integer attribute value.
2538 *
2539 * Returns:
2540 * zero on success
2541 * -EINVAL if default used
2542 **/
2543 #define lpfc_vport_param_init(attr, default, minval, maxval) \
2544 static int \
2545 lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
2546 { \
2547 if (lpfc_rangecheck(val, minval, maxval)) {\
2548 vport->cfg_##attr = val;\
2549 return 0;\
2550 }\
2551 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2552 "0423 lpfc_"#attr" attribute cannot be set to %d, "\
2553 "allowed range is ["#minval", "#maxval"]\n", val); \
2554 vport->cfg_##attr = default;\
2555 return -EINVAL;\
2556 }
2557
2558 /**
2559 * lpfc_vport_param_set - Set a vport cfg attribute
2560 *
2561 * Description:
2562 * Macro that given an attr e.g. hba_queue_depth expands
2563 * into a function with the name lpfc_hba_queue_depth_set
2564 *
2565 * lpfc_##attr##_set: validates the min and max values then sets the
2566 * adapter config field if in the valid range. prints error message
2567 * and does not set the parameter if invalid.
2568 * @phba: pointer the the adapter structure.
2569 * @val: integer attribute value.
2570 *
2571 * Returns:
2572 * zero on success
2573 * -EINVAL if val is invalid
2574 **/
2575 #define lpfc_vport_param_set(attr, default, minval, maxval) \
2576 static int \
2577 lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
2578 { \
2579 if (lpfc_rangecheck(val, minval, maxval)) {\
2580 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2581 "3053 lpfc_" #attr \
2582 " changed from %d (x%x) to %d (x%x)\n", \
2583 vport->cfg_##attr, vport->cfg_##attr, \
2584 val, val); \
2585 vport->cfg_##attr = val;\
2586 return 0;\
2587 }\
2588 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2589 "0424 lpfc_"#attr" attribute cannot be set to %d, "\
2590 "allowed range is ["#minval", "#maxval"]\n", val); \
2591 return -EINVAL;\
2592 }
2593
2594 /**
2595 * lpfc_vport_param_store - Set a vport attribute
2596 *
2597 * Description:
2598 * Macro that given an attr e.g. hba_queue_depth
2599 * expands into a function with the name lpfc_hba_queue_depth_store
2600 *
2601 * lpfc_##attr##_store: convert the ascii text number to an integer, then
2602 * use the lpfc_##attr##_set function to set the value.
2603 * @cdev: class device that is converted into a Scsi_host.
2604 * @buf: contains the attribute value in decimal.
2605 * @count: not used.
2606 *
2607 * Returns:
2608 * -EINVAL if val is invalid or lpfc_##attr##_set() fails
2609 * length of buffer upon success.
2610 **/
2611 #define lpfc_vport_param_store(attr) \
2612 static ssize_t \
2613 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2614 const char *buf, size_t count) \
2615 { \
2616 struct Scsi_Host *shost = class_to_shost(dev);\
2617 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2618 uint val = 0;\
2619 if (!isdigit(buf[0]))\
2620 return -EINVAL;\
2621 if (sscanf(buf, "%i", &val) != 1)\
2622 return -EINVAL;\
2623 if (lpfc_##attr##_set(vport, val) == 0) \
2624 return strlen(buf);\
2625 else \
2626 return -EINVAL;\
2627 }
2628
2629
2630 static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL);
2631 static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL);
2632 static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
2633 static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
2634 static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
2635 static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
2636 static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
2637 static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
2638 static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
2639 static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
2640 static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
2641 static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
2642 static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
2643 static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
2644 static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
2645 lpfc_link_state_store);
2646 static DEVICE_ATTR(option_rom_version, S_IRUGO,
2647 lpfc_option_rom_version_show, NULL);
2648 static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
2649 lpfc_num_discovered_ports_show, NULL);
2650 static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
2651 static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
2652 static DEVICE_ATTR_RO(lpfc_drvr_version);
2653 static DEVICE_ATTR_RO(lpfc_enable_fip);
2654 static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
2655 lpfc_board_mode_show, lpfc_board_mode_store);
2656 static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
2657 static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
2658 static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
2659 static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
2660 static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
2661 static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
2662 static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
2663 static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
2664 static DEVICE_ATTR_RO(lpfc_temp_sensor);
2665 static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
2666 static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
2667 static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
2668 NULL);
2669
2670 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
2671 #define WWN_SZ 8
2672 /**
2673 * lpfc_wwn_set - Convert string to the 8 byte WWN value.
2674 * @buf: WWN string.
2675 * @cnt: Length of string.
2676 * @wwn: Array to receive converted wwn value.
2677 *
2678 * Returns:
2679 * -EINVAL if the buffer does not contain a valid wwn
2680 * 0 success
2681 **/
2682 static size_t
lpfc_wwn_set(const char * buf,size_t cnt,char wwn[])2683 lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
2684 {
2685 unsigned int i, j;
2686
2687 /* Count may include a LF at end of string */
2688 if (buf[cnt-1] == '\n')
2689 cnt--;
2690
2691 if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
2692 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
2693 return -EINVAL;
2694
2695 memset(wwn, 0, WWN_SZ);
2696
2697 /* Validate and store the new name */
2698 for (i = 0, j = 0; i < 16; i++) {
2699 if ((*buf >= 'a') && (*buf <= 'f'))
2700 j = ((j << 4) | ((*buf++ - 'a') + 10));
2701 else if ((*buf >= 'A') && (*buf <= 'F'))
2702 j = ((j << 4) | ((*buf++ - 'A') + 10));
2703 else if ((*buf >= '0') && (*buf <= '9'))
2704 j = ((j << 4) | (*buf++ - '0'));
2705 else
2706 return -EINVAL;
2707 if (i % 2) {
2708 wwn[i/2] = j & 0xff;
2709 j = 0;
2710 }
2711 }
2712 return 0;
2713 }
2714 /**
2715 * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
2716 * @dev: class device that is converted into a Scsi_host.
2717 * @attr: device attribute, not used.
2718 * @buf: containing the string lpfc_soft_wwn_key.
2719 * @count: must be size of lpfc_soft_wwn_key.
2720 *
2721 * Returns:
2722 * -EINVAL if the buffer does not contain lpfc_soft_wwn_key
2723 * length of buf indicates success
2724 **/
2725 static ssize_t
lpfc_soft_wwn_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2726 lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
2727 const char *buf, size_t count)
2728 {
2729 struct Scsi_Host *shost = class_to_shost(dev);
2730 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2731 struct lpfc_hba *phba = vport->phba;
2732 unsigned int cnt = count;
2733 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
2734 u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0];
2735
2736 /*
2737 * We're doing a simple sanity check for soft_wwpn setting.
2738 * We require that the user write a specific key to enable
2739 * the soft_wwpn attribute to be settable. Once the attribute
2740 * is written, the enable key resets. If further updates are
2741 * desired, the key must be written again to re-enable the
2742 * attribute.
2743 *
2744 * The "key" is not secret - it is a hardcoded string shown
2745 * here. The intent is to protect against the random user or
2746 * application that is just writing attributes.
2747 */
2748 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) {
2749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2750 "0051 "LPFC_DRIVER_NAME" soft wwpn can not"
2751 " be enabled: fawwpn is enabled\n");
2752 return -EINVAL;
2753 }
2754
2755 /* count may include a LF at end of string */
2756 if (buf[cnt-1] == '\n')
2757 cnt--;
2758
2759 if ((cnt != strlen(lpfc_soft_wwn_key)) ||
2760 (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
2761 return -EINVAL;
2762
2763 phba->soft_wwn_enable = 1;
2764
2765 dev_printk(KERN_WARNING, &phba->pcidev->dev,
2766 "lpfc%d: soft_wwpn assignment has been enabled.\n",
2767 phba->brd_no);
2768 dev_printk(KERN_WARNING, &phba->pcidev->dev,
2769 " The soft_wwpn feature is not supported by Broadcom.");
2770
2771 return count;
2772 }
2773 static DEVICE_ATTR_WO(lpfc_soft_wwn_enable);
2774
2775 /**
2776 * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
2777 * @dev: class device that is converted into a Scsi_host.
2778 * @attr: device attribute, not used.
2779 * @buf: on return contains the wwpn in hexadecimal.
2780 *
2781 * Returns: size of formatted string.
2782 **/
2783 static ssize_t
lpfc_soft_wwpn_show(struct device * dev,struct device_attribute * attr,char * buf)2784 lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
2785 char *buf)
2786 {
2787 struct Scsi_Host *shost = class_to_shost(dev);
2788 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2789 struct lpfc_hba *phba = vport->phba;
2790
2791 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2792 (unsigned long long)phba->cfg_soft_wwpn);
2793 }
2794
2795 /**
2796 * lpfc_soft_wwpn_store - Set the ww port name of the adapter
2797 * @dev class device that is converted into a Scsi_host.
2798 * @attr: device attribute, not used.
2799 * @buf: contains the wwpn in hexadecimal.
2800 * @count: number of wwpn bytes in buf
2801 *
2802 * Returns:
2803 * -EACCES hba reset not enabled, adapter over temp
2804 * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
2805 * -EIO error taking adapter offline or online
2806 * value of count on success
2807 **/
2808 static ssize_t
lpfc_soft_wwpn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2809 lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
2810 const char *buf, size_t count)
2811 {
2812 struct Scsi_Host *shost = class_to_shost(dev);
2813 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2814 struct lpfc_hba *phba = vport->phba;
2815 struct completion online_compl;
2816 int stat1 = 0, stat2 = 0;
2817 unsigned int cnt = count;
2818 u8 wwpn[WWN_SZ];
2819 int rc;
2820
2821 if (!phba->cfg_enable_hba_reset)
2822 return -EACCES;
2823 spin_lock_irq(&phba->hbalock);
2824 if (phba->over_temp_state == HBA_OVER_TEMP) {
2825 spin_unlock_irq(&phba->hbalock);
2826 return -EACCES;
2827 }
2828 spin_unlock_irq(&phba->hbalock);
2829 /* count may include a LF at end of string */
2830 if (buf[cnt-1] == '\n')
2831 cnt--;
2832
2833 if (!phba->soft_wwn_enable)
2834 return -EINVAL;
2835
2836 /* lock setting wwpn, wwnn down */
2837 phba->soft_wwn_enable = 0;
2838
2839 rc = lpfc_wwn_set(buf, cnt, wwpn);
2840 if (rc) {
2841 /* not able to set wwpn, unlock it */
2842 phba->soft_wwn_enable = 1;
2843 return rc;
2844 }
2845
2846 phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
2847 fc_host_port_name(shost) = phba->cfg_soft_wwpn;
2848 if (phba->cfg_soft_wwnn)
2849 fc_host_node_name(shost) = phba->cfg_soft_wwnn;
2850
2851 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
2852 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
2853
2854 stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
2855 if (stat1)
2856 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2857 "0463 lpfc_soft_wwpn attribute set failed to "
2858 "reinit adapter - %d\n", stat1);
2859 init_completion(&online_compl);
2860 rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
2861 LPFC_EVT_ONLINE);
2862 if (rc == 0)
2863 return -ENOMEM;
2864
2865 wait_for_completion(&online_compl);
2866 if (stat2)
2867 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2868 "0464 lpfc_soft_wwpn attribute set failed to "
2869 "reinit adapter - %d\n", stat2);
2870 return (stat1 || stat2) ? -EIO : count;
2871 }
2872 static DEVICE_ATTR_RW(lpfc_soft_wwpn);
2873
2874 /**
2875 * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter
2876 * @dev: class device that is converted into a Scsi_host.
2877 * @attr: device attribute, not used.
2878 * @buf: on return contains the wwnn in hexadecimal.
2879 *
2880 * Returns: size of formatted string.
2881 **/
2882 static ssize_t
lpfc_soft_wwnn_show(struct device * dev,struct device_attribute * attr,char * buf)2883 lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
2884 char *buf)
2885 {
2886 struct Scsi_Host *shost = class_to_shost(dev);
2887 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2888 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2889 (unsigned long long)phba->cfg_soft_wwnn);
2890 }
2891
2892 /**
2893 * lpfc_soft_wwnn_store - sets the ww node name of the adapter
2894 * @cdev: class device that is converted into a Scsi_host.
2895 * @buf: contains the ww node name in hexadecimal.
2896 * @count: number of wwnn bytes in buf.
2897 *
2898 * Returns:
2899 * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
2900 * value of count on success
2901 **/
2902 static ssize_t
lpfc_soft_wwnn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2903 lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
2904 const char *buf, size_t count)
2905 {
2906 struct Scsi_Host *shost = class_to_shost(dev);
2907 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2908 unsigned int cnt = count;
2909 u8 wwnn[WWN_SZ];
2910 int rc;
2911
2912 /* count may include a LF at end of string */
2913 if (buf[cnt-1] == '\n')
2914 cnt--;
2915
2916 if (!phba->soft_wwn_enable)
2917 return -EINVAL;
2918
2919 rc = lpfc_wwn_set(buf, cnt, wwnn);
2920 if (rc) {
2921 /* Allow wwnn to be set many times, as long as the enable
2922 * is set. However, once the wwpn is set, everything locks.
2923 */
2924 return rc;
2925 }
2926
2927 phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
2928
2929 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
2930 "lpfc%d: soft_wwnn set. Value will take effect upon "
2931 "setting of the soft_wwpn\n", phba->brd_no);
2932
2933 return count;
2934 }
2935 static DEVICE_ATTR_RW(lpfc_soft_wwnn);
2936
2937 /**
2938 * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
2939 * Optimized Access Storage (OAS) operations.
2940 * @dev: class device that is converted into a Scsi_host.
2941 * @attr: device attribute, not used.
2942 * @buf: buffer for passing information.
2943 *
2944 * Returns:
2945 * value of count
2946 **/
2947 static ssize_t
lpfc_oas_tgt_show(struct device * dev,struct device_attribute * attr,char * buf)2948 lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
2949 char *buf)
2950 {
2951 struct Scsi_Host *shost = class_to_shost(dev);
2952 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2953
2954 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2955 wwn_to_u64(phba->cfg_oas_tgt_wwpn));
2956 }
2957
2958 /**
2959 * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for
2960 * Optimized Access Storage (OAS) operations.
2961 * @dev: class device that is converted into a Scsi_host.
2962 * @attr: device attribute, not used.
2963 * @buf: buffer for passing information.
2964 * @count: Size of the data buffer.
2965 *
2966 * Returns:
2967 * -EINVAL count is invalid, invalid wwpn byte invalid
2968 * -EPERM oas is not supported by hba
2969 * value of count on success
2970 **/
2971 static ssize_t
lpfc_oas_tgt_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2972 lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
2973 const char *buf, size_t count)
2974 {
2975 struct Scsi_Host *shost = class_to_shost(dev);
2976 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2977 unsigned int cnt = count;
2978 uint8_t wwpn[WWN_SZ];
2979 int rc;
2980
2981 if (!phba->cfg_fof)
2982 return -EPERM;
2983
2984 /* count may include a LF at end of string */
2985 if (buf[cnt-1] == '\n')
2986 cnt--;
2987
2988 rc = lpfc_wwn_set(buf, cnt, wwpn);
2989 if (rc)
2990 return rc;
2991
2992 memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2993 memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2994 if (wwn_to_u64(wwpn) == 0)
2995 phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
2996 else
2997 phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
2998 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
2999 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
3000 return count;
3001 }
3002 static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
3003 lpfc_oas_tgt_show, lpfc_oas_tgt_store);
3004
3005 /**
3006 * lpfc_oas_priority_show - Return wwpn of target whose luns maybe enabled for
3007 * Optimized Access Storage (OAS) operations.
3008 * @dev: class device that is converted into a Scsi_host.
3009 * @attr: device attribute, not used.
3010 * @buf: buffer for passing information.
3011 *
3012 * Returns:
3013 * value of count
3014 **/
3015 static ssize_t
lpfc_oas_priority_show(struct device * dev,struct device_attribute * attr,char * buf)3016 lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr,
3017 char *buf)
3018 {
3019 struct Scsi_Host *shost = class_to_shost(dev);
3020 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3021
3022 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
3023 }
3024
3025 /**
3026 * lpfc_oas_priority_store - Store wwpn of target whose luns maybe enabled for
3027 * Optimized Access Storage (OAS) operations.
3028 * @dev: class device that is converted into a Scsi_host.
3029 * @attr: device attribute, not used.
3030 * @buf: buffer for passing information.
3031 * @count: Size of the data buffer.
3032 *
3033 * Returns:
3034 * -EINVAL count is invalid, invalid wwpn byte invalid
3035 * -EPERM oas is not supported by hba
3036 * value of count on success
3037 **/
3038 static ssize_t
lpfc_oas_priority_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3039 lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr,
3040 const char *buf, size_t count)
3041 {
3042 struct Scsi_Host *shost = class_to_shost(dev);
3043 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3044 unsigned int cnt = count;
3045 unsigned long val;
3046 int ret;
3047
3048 if (!phba->cfg_fof)
3049 return -EPERM;
3050
3051 /* count may include a LF at end of string */
3052 if (buf[cnt-1] == '\n')
3053 cnt--;
3054
3055 ret = kstrtoul(buf, 0, &val);
3056 if (ret || (val > 0x7f))
3057 return -EINVAL;
3058
3059 if (val)
3060 phba->cfg_oas_priority = (uint8_t)val;
3061 else
3062 phba->cfg_oas_priority = phba->cfg_XLanePriority;
3063 return count;
3064 }
3065 static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR,
3066 lpfc_oas_priority_show, lpfc_oas_priority_store);
3067
3068 /**
3069 * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
3070 * for Optimized Access Storage (OAS) operations.
3071 * @dev: class device that is converted into a Scsi_host.
3072 * @attr: device attribute, not used.
3073 * @buf: buffer for passing information.
3074 *
3075 * Returns:
3076 * value of count on success
3077 **/
3078 static ssize_t
lpfc_oas_vpt_show(struct device * dev,struct device_attribute * attr,char * buf)3079 lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
3080 char *buf)
3081 {
3082 struct Scsi_Host *shost = class_to_shost(dev);
3083 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3084
3085 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
3086 wwn_to_u64(phba->cfg_oas_vpt_wwpn));
3087 }
3088
3089 /**
3090 * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled
3091 * for Optimized Access Storage (OAS) operations.
3092 * @dev: class device that is converted into a Scsi_host.
3093 * @attr: device attribute, not used.
3094 * @buf: buffer for passing information.
3095 * @count: Size of the data buffer.
3096 *
3097 * Returns:
3098 * -EINVAL count is invalid, invalid wwpn byte invalid
3099 * -EPERM oas is not supported by hba
3100 * value of count on success
3101 **/
3102 static ssize_t
lpfc_oas_vpt_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3103 lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
3104 const char *buf, size_t count)
3105 {
3106 struct Scsi_Host *shost = class_to_shost(dev);
3107 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3108 unsigned int cnt = count;
3109 uint8_t wwpn[WWN_SZ];
3110 int rc;
3111
3112 if (!phba->cfg_fof)
3113 return -EPERM;
3114
3115 /* count may include a LF at end of string */
3116 if (buf[cnt-1] == '\n')
3117 cnt--;
3118
3119 rc = lpfc_wwn_set(buf, cnt, wwpn);
3120 if (rc)
3121 return rc;
3122
3123 memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3124 memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3125 if (wwn_to_u64(wwpn) == 0)
3126 phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
3127 else
3128 phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
3129 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
3130 if (phba->cfg_oas_priority == 0)
3131 phba->cfg_oas_priority = phba->cfg_XLanePriority;
3132 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
3133 return count;
3134 }
3135 static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
3136 lpfc_oas_vpt_show, lpfc_oas_vpt_store);
3137
3138 /**
3139 * lpfc_oas_lun_state_show - Return the current state (enabled or disabled)
3140 * of whether luns will be enabled or disabled
3141 * for Optimized Access Storage (OAS) operations.
3142 * @dev: class device that is converted into a Scsi_host.
3143 * @attr: device attribute, not used.
3144 * @buf: buffer for passing information.
3145 *
3146 * Returns:
3147 * size of formatted string.
3148 **/
3149 static ssize_t
lpfc_oas_lun_state_show(struct device * dev,struct device_attribute * attr,char * buf)3150 lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
3151 char *buf)
3152 {
3153 struct Scsi_Host *shost = class_to_shost(dev);
3154 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3155
3156 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
3157 }
3158
3159 /**
3160 * lpfc_oas_lun_state_store - Store the state (enabled or disabled)
3161 * of whether luns will be enabled or disabled
3162 * for Optimized Access Storage (OAS) operations.
3163 * @dev: class device that is converted into a Scsi_host.
3164 * @attr: device attribute, not used.
3165 * @buf: buffer for passing information.
3166 * @count: Size of the data buffer.
3167 *
3168 * Returns:
3169 * -EINVAL count is invalid, invalid wwpn byte invalid
3170 * -EPERM oas is not supported by hba
3171 * value of count on success
3172 **/
3173 static ssize_t
lpfc_oas_lun_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3174 lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
3175 const char *buf, size_t count)
3176 {
3177 struct Scsi_Host *shost = class_to_shost(dev);
3178 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3179 int val = 0;
3180
3181 if (!phba->cfg_fof)
3182 return -EPERM;
3183
3184 if (!isdigit(buf[0]))
3185 return -EINVAL;
3186
3187 if (sscanf(buf, "%i", &val) != 1)
3188 return -EINVAL;
3189
3190 if ((val != 0) && (val != 1))
3191 return -EINVAL;
3192
3193 phba->cfg_oas_lun_state = val;
3194 return strlen(buf);
3195 }
3196 static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
3197 lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
3198
3199 /**
3200 * lpfc_oas_lun_status_show - Return the status of the Optimized Access
3201 * Storage (OAS) lun returned by the
3202 * lpfc_oas_lun_show function.
3203 * @dev: class device that is converted into a Scsi_host.
3204 * @attr: device attribute, not used.
3205 * @buf: buffer for passing information.
3206 *
3207 * Returns:
3208 * size of formatted string.
3209 **/
3210 static ssize_t
lpfc_oas_lun_status_show(struct device * dev,struct device_attribute * attr,char * buf)3211 lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
3212 char *buf)
3213 {
3214 struct Scsi_Host *shost = class_to_shost(dev);
3215 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3216
3217 if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
3218 return -EFAULT;
3219
3220 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
3221 }
3222 static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
3223 lpfc_oas_lun_status_show, NULL);
3224
3225
3226 /**
3227 * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
3228 * (OAS) operations.
3229 * @phba: lpfc_hba pointer.
3230 * @ndlp: pointer to fcp target node.
3231 * @lun: the fc lun for setting oas state.
3232 * @oas_state: the oas state to be set to the lun.
3233 *
3234 * Returns:
3235 * SUCCESS : 0
3236 * -EPERM OAS is not enabled or not supported by this port.
3237 *
3238 */
3239 static size_t
lpfc_oas_lun_state_set(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint64_t lun,uint32_t oas_state,uint8_t pri)3240 lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3241 uint8_t tgt_wwpn[], uint64_t lun,
3242 uint32_t oas_state, uint8_t pri)
3243 {
3244
3245 int rc = 0;
3246
3247 if (!phba->cfg_fof)
3248 return -EPERM;
3249
3250 if (oas_state) {
3251 if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3252 (struct lpfc_name *)tgt_wwpn,
3253 lun, pri))
3254 rc = -ENOMEM;
3255 } else {
3256 lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3257 (struct lpfc_name *)tgt_wwpn, lun, pri);
3258 }
3259 return rc;
3260
3261 }
3262
3263 /**
3264 * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized
3265 * Access Storage (OAS) operations.
3266 * @phba: lpfc_hba pointer.
3267 * @vpt_wwpn: wwpn of the vport associated with the returned lun
3268 * @tgt_wwpn: wwpn of the target associated with the returned lun
3269 * @lun_status: status of the lun returned lun
3270 *
3271 * Returns the first or next lun enabled for OAS operations for the vport/target
3272 * specified. If a lun is found, its vport wwpn, target wwpn and status is
3273 * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned.
3274 *
3275 * Return:
3276 * lun that is OAS enabled for the vport/target
3277 * NOT_OAS_ENABLED_LUN when no oas enabled lun found.
3278 */
3279 static uint64_t
lpfc_oas_lun_get_next(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint32_t * lun_status,uint32_t * lun_pri)3280 lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3281 uint8_t tgt_wwpn[], uint32_t *lun_status,
3282 uint32_t *lun_pri)
3283 {
3284 uint64_t found_lun;
3285
3286 if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
3287 return NOT_OAS_ENABLED_LUN;
3288 if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
3289 phba->sli4_hba.oas_next_vpt_wwpn,
3290 (struct lpfc_name *)
3291 phba->sli4_hba.oas_next_tgt_wwpn,
3292 &phba->sli4_hba.oas_next_lun,
3293 (struct lpfc_name *)vpt_wwpn,
3294 (struct lpfc_name *)tgt_wwpn,
3295 &found_lun, lun_status, lun_pri))
3296 return found_lun;
3297 else
3298 return NOT_OAS_ENABLED_LUN;
3299 }
3300
3301 /**
3302 * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations
3303 * @phba: lpfc_hba pointer.
3304 * @vpt_wwpn: vport wwpn by reference.
3305 * @tgt_wwpn: target wwpn by reference.
3306 * @lun: the fc lun for setting oas state.
3307 * @oas_state: the oas state to be set to the oas_lun.
3308 *
3309 * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
3310 * a lun for OAS operations.
3311 *
3312 * Return:
3313 * SUCCESS: 0
3314 * -ENOMEM: failed to enable an lun for OAS operations
3315 * -EPERM: OAS is not enabled
3316 */
3317 static ssize_t
lpfc_oas_lun_state_change(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint64_t lun,uint32_t oas_state,uint8_t pri)3318 lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3319 uint8_t tgt_wwpn[], uint64_t lun,
3320 uint32_t oas_state, uint8_t pri)
3321 {
3322
3323 int rc;
3324
3325 rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
3326 oas_state, pri);
3327 return rc;
3328 }
3329
3330 /**
3331 * lpfc_oas_lun_show - Return oas enabled luns from a chosen target
3332 * @dev: class device that is converted into a Scsi_host.
3333 * @attr: device attribute, not used.
3334 * @buf: buffer for passing information.
3335 *
3336 * This routine returns a lun enabled for OAS each time the function
3337 * is called.
3338 *
3339 * Returns:
3340 * SUCCESS: size of formatted string.
3341 * -EFAULT: target or vport wwpn was not set properly.
3342 * -EPERM: oas is not enabled.
3343 **/
3344 static ssize_t
lpfc_oas_lun_show(struct device * dev,struct device_attribute * attr,char * buf)3345 lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
3346 char *buf)
3347 {
3348 struct Scsi_Host *shost = class_to_shost(dev);
3349 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3350
3351 uint64_t oas_lun;
3352 int len = 0;
3353
3354 if (!phba->cfg_fof)
3355 return -EPERM;
3356
3357 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3358 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
3359 return -EFAULT;
3360
3361 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3362 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
3363 return -EFAULT;
3364
3365 oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
3366 phba->cfg_oas_tgt_wwpn,
3367 &phba->cfg_oas_lun_status,
3368 &phba->cfg_oas_priority);
3369 if (oas_lun != NOT_OAS_ENABLED_LUN)
3370 phba->cfg_oas_flags |= OAS_LUN_VALID;
3371
3372 len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
3373
3374 return len;
3375 }
3376
3377 /**
3378 * lpfc_oas_lun_store - Sets the OAS state for lun
3379 * @dev: class device that is converted into a Scsi_host.
3380 * @attr: device attribute, not used.
3381 * @buf: buffer for passing information.
3382 *
3383 * This function sets the OAS state for lun. Before this function is called,
3384 * the vport wwpn, target wwpn, and oas state need to be set.
3385 *
3386 * Returns:
3387 * SUCCESS: size of formatted string.
3388 * -EFAULT: target or vport wwpn was not set properly.
3389 * -EPERM: oas is not enabled.
3390 * size of formatted string.
3391 **/
3392 static ssize_t
lpfc_oas_lun_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3393 lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
3394 const char *buf, size_t count)
3395 {
3396 struct Scsi_Host *shost = class_to_shost(dev);
3397 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3398 uint64_t scsi_lun;
3399 uint32_t pri;
3400 ssize_t rc;
3401
3402 if (!phba->cfg_fof)
3403 return -EPERM;
3404
3405 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3406 return -EFAULT;
3407
3408 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3409 return -EFAULT;
3410
3411 if (!isdigit(buf[0]))
3412 return -EINVAL;
3413
3414 if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
3415 return -EINVAL;
3416
3417 pri = phba->cfg_oas_priority;
3418 if (pri == 0)
3419 pri = phba->cfg_XLanePriority;
3420
3421 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3422 "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx "
3423 "priority 0x%x with oas state %d\n",
3424 wwn_to_u64(phba->cfg_oas_vpt_wwpn),
3425 wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
3426 pri, phba->cfg_oas_lun_state);
3427
3428 rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
3429 phba->cfg_oas_tgt_wwpn, scsi_lun,
3430 phba->cfg_oas_lun_state, pri);
3431 if (rc)
3432 return rc;
3433
3434 return count;
3435 }
3436 static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
3437 lpfc_oas_lun_show, lpfc_oas_lun_store);
3438
3439 int lpfc_enable_nvmet_cnt;
3440 unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = {
3441 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3442 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3443 module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444);
3444 MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target");
3445
3446 static int lpfc_poll = 0;
3447 module_param(lpfc_poll, int, S_IRUGO);
3448 MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
3449 " 0 - none,"
3450 " 1 - poll with interrupts enabled"
3451 " 3 - poll and disable FCP ring interrupts");
3452
3453 static DEVICE_ATTR_RW(lpfc_poll);
3454
3455 int lpfc_no_hba_reset_cnt;
3456 unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
3457 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3458 module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
3459 MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
3460
3461 LPFC_ATTR(sli_mode, 0, 0, 3,
3462 "SLI mode selector:"
3463 " 0 - auto (SLI-3 if supported),"
3464 " 2 - select SLI-2 even on SLI-3 capable HBAs,"
3465 " 3 - select SLI-3");
3466
3467 LPFC_ATTR_R(enable_npiv, 1, 0, 1,
3468 "Enable NPIV functionality");
3469
3470 LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
3471 "FCF Fast failover=1 Priority failover=2");
3472
3473 /*
3474 # lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
3475 # 0x0 = disabled, XRI/OXID use not tracked.
3476 # 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
3477 # 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
3478 */
3479 LPFC_ATTR_R(enable_rrq, 2, 0, 2,
3480 "Enable RRQ functionality");
3481
3482 /*
3483 # lpfc_suppress_link_up: Bring link up at initialization
3484 # 0x0 = bring link up (issue MBX_INIT_LINK)
3485 # 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK)
3486 # 0x2 = never bring up link
3487 # Default value is 0.
3488 */
3489 LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
3490 LPFC_DELAY_INIT_LINK_INDEFINITELY,
3491 "Suppress Link Up at initialization");
3492
3493 static ssize_t
lpfc_pls_show(struct device * dev,struct device_attribute * attr,char * buf)3494 lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf)
3495 {
3496 struct Scsi_Host *shost = class_to_shost(dev);
3497 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3498
3499 return scnprintf(buf, PAGE_SIZE, "%d\n",
3500 phba->sli4_hba.pc_sli4_params.pls);
3501 }
3502 static DEVICE_ATTR(pls, 0444,
3503 lpfc_pls_show, NULL);
3504
3505 static ssize_t
lpfc_pt_show(struct device * dev,struct device_attribute * attr,char * buf)3506 lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf)
3507 {
3508 struct Scsi_Host *shost = class_to_shost(dev);
3509 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3510
3511 return scnprintf(buf, PAGE_SIZE, "%d\n",
3512 (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0);
3513 }
3514 static DEVICE_ATTR(pt, 0444,
3515 lpfc_pt_show, NULL);
3516
3517 /*
3518 # lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
3519 # 1 - (1024)
3520 # 2 - (2048)
3521 # 3 - (3072)
3522 # 4 - (4096)
3523 # 5 - (5120)
3524 */
3525 static ssize_t
lpfc_iocb_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3526 lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3527 {
3528 struct Scsi_Host *shost = class_to_shost(dev);
3529 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3530
3531 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
3532 }
3533
3534 static DEVICE_ATTR(iocb_hw, S_IRUGO,
3535 lpfc_iocb_hw_show, NULL);
3536 static ssize_t
lpfc_txq_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3537 lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3538 {
3539 struct Scsi_Host *shost = class_to_shost(dev);
3540 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3541 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3542
3543 return scnprintf(buf, PAGE_SIZE, "%d\n",
3544 pring ? pring->txq_max : 0);
3545 }
3546
3547 static DEVICE_ATTR(txq_hw, S_IRUGO,
3548 lpfc_txq_hw_show, NULL);
3549 static ssize_t
lpfc_txcmplq_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3550 lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
3551 char *buf)
3552 {
3553 struct Scsi_Host *shost = class_to_shost(dev);
3554 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3555 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3556
3557 return scnprintf(buf, PAGE_SIZE, "%d\n",
3558 pring ? pring->txcmplq_max : 0);
3559 }
3560
3561 static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
3562 lpfc_txcmplq_hw_show, NULL);
3563
3564 /*
3565 # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
3566 # until the timer expires. Value range is [0,255]. Default value is 30.
3567 */
3568 static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3569 static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO;
3570 module_param(lpfc_nodev_tmo, int, 0);
3571 MODULE_PARM_DESC(lpfc_nodev_tmo,
3572 "Seconds driver will hold I/O waiting "
3573 "for a device to come back");
3574
3575 /**
3576 * lpfc_nodev_tmo_show - Return the hba dev loss timeout value
3577 * @dev: class converted to a Scsi_host structure.
3578 * @attr: device attribute, not used.
3579 * @buf: on return contains the dev loss timeout in decimal.
3580 *
3581 * Returns: size of formatted string.
3582 **/
3583 static ssize_t
lpfc_nodev_tmo_show(struct device * dev,struct device_attribute * attr,char * buf)3584 lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
3585 char *buf)
3586 {
3587 struct Scsi_Host *shost = class_to_shost(dev);
3588 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3589
3590 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
3591 }
3592
3593 /**
3594 * lpfc_nodev_tmo_init - Set the hba nodev timeout value
3595 * @vport: lpfc vport structure pointer.
3596 * @val: contains the nodev timeout value.
3597 *
3598 * Description:
3599 * If the devloss tmo is already set then nodev tmo is set to devloss tmo,
3600 * a kernel error message is printed and zero is returned.
3601 * Else if val is in range then nodev tmo and devloss tmo are set to val.
3602 * Otherwise nodev tmo is set to the default value.
3603 *
3604 * Returns:
3605 * zero if already set or if val is in range
3606 * -EINVAL val out of range
3607 **/
3608 static int
lpfc_nodev_tmo_init(struct lpfc_vport * vport,int val)3609 lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
3610 {
3611 if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
3612 vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
3613 if (val != LPFC_DEF_DEVLOSS_TMO)
3614 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3615 "0407 Ignoring lpfc_nodev_tmo module "
3616 "parameter because lpfc_devloss_tmo "
3617 "is set.\n");
3618 return 0;
3619 }
3620
3621 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3622 vport->cfg_nodev_tmo = val;
3623 vport->cfg_devloss_tmo = val;
3624 return 0;
3625 }
3626 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3627 "0400 lpfc_nodev_tmo attribute cannot be set to"
3628 " %d, allowed range is [%d, %d]\n",
3629 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3630 vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3631 return -EINVAL;
3632 }
3633
3634 /**
3635 * lpfc_update_rport_devloss_tmo - Update dev loss tmo value
3636 * @vport: lpfc vport structure pointer.
3637 *
3638 * Description:
3639 * Update all the ndlp's dev loss tmo with the vport devloss tmo value.
3640 **/
3641 static void
lpfc_update_rport_devloss_tmo(struct lpfc_vport * vport)3642 lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
3643 {
3644 struct Scsi_Host *shost;
3645 struct lpfc_nodelist *ndlp;
3646 #if (IS_ENABLED(CONFIG_NVME_FC))
3647 struct lpfc_nvme_rport *rport;
3648 struct nvme_fc_remote_port *remoteport = NULL;
3649 #endif
3650
3651 shost = lpfc_shost_from_vport(vport);
3652 spin_lock_irq(shost->host_lock);
3653 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3654 if (!NLP_CHK_NODE_ACT(ndlp))
3655 continue;
3656 if (ndlp->rport)
3657 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3658 #if (IS_ENABLED(CONFIG_NVME_FC))
3659 spin_lock(&vport->phba->hbalock);
3660 rport = lpfc_ndlp_get_nrport(ndlp);
3661 if (rport)
3662 remoteport = rport->remoteport;
3663 spin_unlock(&vport->phba->hbalock);
3664 if (rport && remoteport)
3665 nvme_fc_set_remoteport_devloss(remoteport,
3666 vport->cfg_devloss_tmo);
3667 #endif
3668 }
3669 spin_unlock_irq(shost->host_lock);
3670 }
3671
3672 /**
3673 * lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values
3674 * @vport: lpfc vport structure pointer.
3675 * @val: contains the tmo value.
3676 *
3677 * Description:
3678 * If the devloss tmo is already set or the vport dev loss tmo has changed
3679 * then a kernel error message is printed and zero is returned.
3680 * Else if val is in range then nodev tmo and devloss tmo are set to val.
3681 * Otherwise nodev tmo is set to the default value.
3682 *
3683 * Returns:
3684 * zero if already set or if val is in range
3685 * -EINVAL val out of range
3686 **/
3687 static int
lpfc_nodev_tmo_set(struct lpfc_vport * vport,int val)3688 lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
3689 {
3690 if (vport->dev_loss_tmo_changed ||
3691 (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
3692 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3693 "0401 Ignoring change to lpfc_nodev_tmo "
3694 "because lpfc_devloss_tmo is set.\n");
3695 return 0;
3696 }
3697 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3698 vport->cfg_nodev_tmo = val;
3699 vport->cfg_devloss_tmo = val;
3700 /*
3701 * For compat: set the fc_host dev loss so new rports
3702 * will get the value.
3703 */
3704 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3705 lpfc_update_rport_devloss_tmo(vport);
3706 return 0;
3707 }
3708 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3709 "0403 lpfc_nodev_tmo attribute cannot be set to "
3710 "%d, allowed range is [%d, %d]\n",
3711 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3712 return -EINVAL;
3713 }
3714
3715 lpfc_vport_param_store(nodev_tmo)
3716
3717 static DEVICE_ATTR_RW(lpfc_nodev_tmo);
3718
3719 /*
3720 # lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
3721 # disappear until the timer expires. Value range is [0,255]. Default
3722 # value is 30.
3723 */
3724 module_param(lpfc_devloss_tmo, int, S_IRUGO);
3725 MODULE_PARM_DESC(lpfc_devloss_tmo,
3726 "Seconds driver will hold I/O waiting "
3727 "for a device to come back");
lpfc_vport_param_init(devloss_tmo,LPFC_DEF_DEVLOSS_TMO,LPFC_MIN_DEVLOSS_TMO,LPFC_MAX_DEVLOSS_TMO)3728 lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
3729 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
3730 lpfc_vport_param_show(devloss_tmo)
3731
3732 /**
3733 * lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit
3734 * @vport: lpfc vport structure pointer.
3735 * @val: contains the tmo value.
3736 *
3737 * Description:
3738 * If val is in a valid range then set the vport nodev tmo,
3739 * devloss tmo, also set the vport dev loss tmo changed flag.
3740 * Else a kernel error message is printed.
3741 *
3742 * Returns:
3743 * zero if val is in range
3744 * -EINVAL val out of range
3745 **/
3746 static int
3747 lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
3748 {
3749 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3750 vport->cfg_nodev_tmo = val;
3751 vport->cfg_devloss_tmo = val;
3752 vport->dev_loss_tmo_changed = 1;
3753 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3754 lpfc_update_rport_devloss_tmo(vport);
3755 return 0;
3756 }
3757
3758 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3759 "0404 lpfc_devloss_tmo attribute cannot be set to "
3760 "%d, allowed range is [%d, %d]\n",
3761 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3762 return -EINVAL;
3763 }
3764
3765 lpfc_vport_param_store(devloss_tmo)
3766 static DEVICE_ATTR_RW(lpfc_devloss_tmo);
3767
3768 /*
3769 * lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it
3770 * lpfc_suppress_rsp = 0 Disable
3771 * lpfc_suppress_rsp = 1 Enable (default)
3772 *
3773 */
3774 LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
3775 "Enable suppress rsp feature is firmware supports it");
3776
3777 /*
3778 * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
3779 * lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs
3780 * lpfc_nvmet_mrq = 1 use a single RQ pair
3781 * lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ
3782 *
3783 */
3784 LPFC_ATTR_R(nvmet_mrq,
3785 LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
3786 "Specify number of RQ pairs for processing NVMET cmds");
3787
3788 /*
3789 * lpfc_nvmet_mrq_post: Specify number of RQ buffer to initially post
3790 * to each NVMET RQ. Range 64 to 2048, default is 512.
3791 */
3792 LPFC_ATTR_R(nvmet_mrq_post,
3793 LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST,
3794 LPFC_NVMET_RQE_DEF_COUNT,
3795 "Specify number of RQ buffers to initially post");
3796
3797 /*
3798 * lpfc_enable_fc4_type: Defines what FC4 types are supported.
3799 * Supported Values: 1 - register just FCP
3800 * 3 - register both FCP and NVME
3801 * Supported values are [1,3]. Default value is 3
3802 */
3803 LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE,
3804 LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE,
3805 "Enable FC4 Protocol support - FCP / NVME");
3806
3807 /*
3808 # lpfc_log_verbose: Only turn this flag on if you are willing to risk being
3809 # deluged with LOTS of information.
3810 # You can set a bit mask to record specific types of verbose messages:
3811 # See lpfc_logmsh.h for definitions.
3812 */
3813 LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
3814 "Verbose logging bit-mask");
3815
3816 /*
3817 # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
3818 # objects that have been registered with the nameserver after login.
3819 */
3820 LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
3821 "Deregister nameserver objects before LOGO");
3822
3823 /*
3824 # lun_queue_depth: This parameter is used to limit the number of outstanding
3825 # commands per FCP LUN.
3826 */
3827 LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512,
3828 "Max number of FCP commands we can queue to a specific LUN");
3829
3830 /*
3831 # tgt_queue_depth: This parameter is used to limit the number of outstanding
3832 # commands per target port. Value range is [10,65535]. Default value is 65535.
3833 */
3834 static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH;
3835 module_param(lpfc_tgt_queue_depth, uint, 0444);
3836 MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth");
3837 lpfc_vport_param_show(tgt_queue_depth);
3838 lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
3839 LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH);
3840
3841 /**
3842 * lpfc_tgt_queue_depth_store: Sets an attribute value.
3843 * @phba: pointer the the adapter structure.
3844 * @val: integer attribute value.
3845 *
3846 * Description: Sets the parameter to the new value.
3847 *
3848 * Returns:
3849 * zero on success
3850 * -EINVAL if val is invalid
3851 */
3852 static int
lpfc_tgt_queue_depth_set(struct lpfc_vport * vport,uint val)3853 lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
3854 {
3855 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3856 struct lpfc_nodelist *ndlp;
3857
3858 if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
3859 return -EINVAL;
3860
3861 if (val == vport->cfg_tgt_queue_depth)
3862 return 0;
3863
3864 spin_lock_irq(shost->host_lock);
3865 vport->cfg_tgt_queue_depth = val;
3866
3867 /* Next loop thru nodelist and change cmd_qdepth */
3868 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
3869 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
3870
3871 spin_unlock_irq(shost->host_lock);
3872 return 0;
3873 }
3874
3875 lpfc_vport_param_store(tgt_queue_depth);
3876 static DEVICE_ATTR_RW(lpfc_tgt_queue_depth);
3877
3878 /*
3879 # hba_queue_depth: This parameter is used to limit the number of outstanding
3880 # commands per lpfc HBA. Value range is [32,8192]. If this parameter
3881 # value is greater than the maximum number of exchanges supported by the HBA,
3882 # then maximum number of exchanges supported by the HBA is used to determine
3883 # the hba_queue_depth.
3884 */
3885 LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
3886 "Max number of FCP commands we can queue to a lpfc HBA");
3887
3888 /*
3889 # peer_port_login: This parameter allows/prevents logins
3890 # between peer ports hosted on the same physical port.
3891 # When this parameter is set 0 peer ports of same physical port
3892 # are not allowed to login to each other.
3893 # When this parameter is set 1 peer ports of same physical port
3894 # are allowed to login to each other.
3895 # Default value of this parameter is 0.
3896 */
3897 LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
3898 "Allow peer ports on the same physical port to login to each "
3899 "other.");
3900
3901 /*
3902 # restrict_login: This parameter allows/prevents logins
3903 # between Virtual Ports and remote initiators.
3904 # When this parameter is not set (0) Virtual Ports will accept PLOGIs from
3905 # other initiators and will attempt to PLOGI all remote ports.
3906 # When this parameter is set (1) Virtual Ports will reject PLOGIs from
3907 # remote ports and will not attempt to PLOGI to other initiators.
3908 # This parameter does not restrict to the physical port.
3909 # This parameter does not restrict logins to Fabric resident remote ports.
3910 # Default value of this parameter is 1.
3911 */
3912 static int lpfc_restrict_login = 1;
3913 module_param(lpfc_restrict_login, int, S_IRUGO);
3914 MODULE_PARM_DESC(lpfc_restrict_login,
3915 "Restrict virtual ports login to remote initiators.");
3916 lpfc_vport_param_show(restrict_login);
3917
3918 /**
3919 * lpfc_restrict_login_init - Set the vport restrict login flag
3920 * @vport: lpfc vport structure pointer.
3921 * @val: contains the restrict login value.
3922 *
3923 * Description:
3924 * If val is not in a valid range then log a kernel error message and set
3925 * the vport restrict login to one.
3926 * If the port type is physical clear the restrict login flag and return.
3927 * Else set the restrict login flag to val.
3928 *
3929 * Returns:
3930 * zero if val is in range
3931 * -EINVAL val out of range
3932 **/
3933 static int
lpfc_restrict_login_init(struct lpfc_vport * vport,int val)3934 lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
3935 {
3936 if (val < 0 || val > 1) {
3937 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3938 "0422 lpfc_restrict_login attribute cannot "
3939 "be set to %d, allowed range is [0, 1]\n",
3940 val);
3941 vport->cfg_restrict_login = 1;
3942 return -EINVAL;
3943 }
3944 if (vport->port_type == LPFC_PHYSICAL_PORT) {
3945 vport->cfg_restrict_login = 0;
3946 return 0;
3947 }
3948 vport->cfg_restrict_login = val;
3949 return 0;
3950 }
3951
3952 /**
3953 * lpfc_restrict_login_set - Set the vport restrict login flag
3954 * @vport: lpfc vport structure pointer.
3955 * @val: contains the restrict login value.
3956 *
3957 * Description:
3958 * If val is not in a valid range then log a kernel error message and set
3959 * the vport restrict login to one.
3960 * If the port type is physical and the val is not zero log a kernel
3961 * error message, clear the restrict login flag and return zero.
3962 * Else set the restrict login flag to val.
3963 *
3964 * Returns:
3965 * zero if val is in range
3966 * -EINVAL val out of range
3967 **/
3968 static int
lpfc_restrict_login_set(struct lpfc_vport * vport,int val)3969 lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
3970 {
3971 if (val < 0 || val > 1) {
3972 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3973 "0425 lpfc_restrict_login attribute cannot "
3974 "be set to %d, allowed range is [0, 1]\n",
3975 val);
3976 vport->cfg_restrict_login = 1;
3977 return -EINVAL;
3978 }
3979 if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) {
3980 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3981 "0468 lpfc_restrict_login must be 0 for "
3982 "Physical ports.\n");
3983 vport->cfg_restrict_login = 0;
3984 return 0;
3985 }
3986 vport->cfg_restrict_login = val;
3987 return 0;
3988 }
3989 lpfc_vport_param_store(restrict_login);
3990 static DEVICE_ATTR_RW(lpfc_restrict_login);
3991
3992 /*
3993 # Some disk devices have a "select ID" or "select Target" capability.
3994 # From a protocol standpoint "select ID" usually means select the
3995 # Fibre channel "ALPA". In the FC-AL Profile there is an "informative
3996 # annex" which contains a table that maps a "select ID" (a number
3997 # between 0 and 7F) to an ALPA. By default, for compatibility with
3998 # older drivers, the lpfc driver scans this table from low ALPA to high
3999 # ALPA.
4000 #
4001 # Turning on the scan-down variable (on = 1, off = 0) will
4002 # cause the lpfc driver to use an inverted table, effectively
4003 # scanning ALPAs from high to low. Value range is [0,1]. Default value is 1.
4004 #
4005 # (Note: This "select ID" functionality is a LOOP ONLY characteristic
4006 # and will not work across a fabric. Also this parameter will take
4007 # effect only in the case when ALPA map is not available.)
4008 */
4009 LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
4010 "Start scanning for devices from highest ALPA to lowest");
4011
4012 /*
4013 # lpfc_topology: link topology for init link
4014 # 0x0 = attempt loop mode then point-to-point
4015 # 0x01 = internal loopback mode
4016 # 0x02 = attempt point-to-point mode only
4017 # 0x04 = attempt loop mode only
4018 # 0x06 = attempt point-to-point mode then loop
4019 # Set point-to-point mode if you want to run as an N_Port.
4020 # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
4021 # Default value is 0.
4022 */
4023 LPFC_ATTR(topology, 0, 0, 6,
4024 "Select Fibre Channel topology");
4025
4026 /**
4027 * lpfc_topology_set - Set the adapters topology field
4028 * @phba: lpfc_hba pointer.
4029 * @val: topology value.
4030 *
4031 * Description:
4032 * If val is in a valid range then set the adapter's topology field and
4033 * issue a lip; if the lip fails reset the topology to the old value.
4034 *
4035 * If the value is not in range log a kernel error message and return an error.
4036 *
4037 * Returns:
4038 * zero if val is in range and lip okay
4039 * non-zero return value from lpfc_issue_lip()
4040 * -EINVAL val out of range
4041 **/
4042 static ssize_t
lpfc_topology_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4043 lpfc_topology_store(struct device *dev, struct device_attribute *attr,
4044 const char *buf, size_t count)
4045 {
4046 struct Scsi_Host *shost = class_to_shost(dev);
4047 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4048 struct lpfc_hba *phba = vport->phba;
4049 int val = 0;
4050 int nolip = 0;
4051 const char *val_buf = buf;
4052 int err;
4053 uint32_t prev_val;
4054
4055 if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4056 nolip = 1;
4057 val_buf = &buf[strlen("nolip ")];
4058 }
4059
4060 if (!isdigit(val_buf[0]))
4061 return -EINVAL;
4062 if (sscanf(val_buf, "%i", &val) != 1)
4063 return -EINVAL;
4064
4065 if (val >= 0 && val <= 6) {
4066 prev_val = phba->cfg_topology;
4067 if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
4068 val == 4) {
4069 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4070 "3113 Loop mode not supported at speed %d\n",
4071 val);
4072 return -EINVAL;
4073 }
4074 /*
4075 * The 'topology' is not a configurable parameter if :
4076 * - persistent topology enabled
4077 * - G7/G6 with no private loop support
4078 */
4079
4080 if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
4081 (!phba->sli4_hba.pc_sli4_params.pls &&
4082 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
4083 phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) &&
4084 val == 4) {
4085 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4086 "3114 Loop mode not supported\n");
4087 return -EINVAL;
4088 }
4089 phba->cfg_topology = val;
4090 if (nolip)
4091 return strlen(buf);
4092
4093 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4094 "3054 lpfc_topology changed from %d to %d\n",
4095 prev_val, val);
4096 if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
4097 phba->fc_topology_changed = 1;
4098 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4099 if (err) {
4100 phba->cfg_topology = prev_val;
4101 return -EINVAL;
4102 } else
4103 return strlen(buf);
4104 }
4105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4106 "%d:0467 lpfc_topology attribute cannot be set to %d, "
4107 "allowed range is [0, 6]\n",
4108 phba->brd_no, val);
4109 return -EINVAL;
4110 }
4111
4112 lpfc_param_show(topology)
4113 static DEVICE_ATTR_RW(lpfc_topology);
4114
4115 /**
4116 * lpfc_static_vport_show: Read callback function for
4117 * lpfc_static_vport sysfs file.
4118 * @dev: Pointer to class device object.
4119 * @attr: device attribute structure.
4120 * @buf: Data buffer.
4121 *
4122 * This function is the read call back function for
4123 * lpfc_static_vport sysfs file. The lpfc_static_vport
4124 * sysfs file report the mageability of the vport.
4125 **/
4126 static ssize_t
lpfc_static_vport_show(struct device * dev,struct device_attribute * attr,char * buf)4127 lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
4128 char *buf)
4129 {
4130 struct Scsi_Host *shost = class_to_shost(dev);
4131 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4132 if (vport->vport_flag & STATIC_VPORT)
4133 sprintf(buf, "1\n");
4134 else
4135 sprintf(buf, "0\n");
4136
4137 return strlen(buf);
4138 }
4139
4140 /*
4141 * Sysfs attribute to control the statistical data collection.
4142 */
4143 static DEVICE_ATTR_RO(lpfc_static_vport);
4144
4145 /**
4146 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
4147 * @dev: Pointer to class device.
4148 * @buf: Data buffer.
4149 * @count: Size of the data buffer.
4150 *
4151 * This function get called when a user write to the lpfc_stat_data_ctrl
4152 * sysfs file. This function parse the command written to the sysfs file
4153 * and take appropriate action. These commands are used for controlling
4154 * driver statistical data collection.
4155 * Following are the command this function handles.
4156 *
4157 * setbucket <bucket_type> <base> <step>
4158 * = Set the latency buckets.
4159 * destroybucket = destroy all the buckets.
4160 * start = start data collection
4161 * stop = stop data collection
4162 * reset = reset the collected data
4163 **/
4164 static ssize_t
lpfc_stat_data_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4165 lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
4166 const char *buf, size_t count)
4167 {
4168 struct Scsi_Host *shost = class_to_shost(dev);
4169 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4170 struct lpfc_hba *phba = vport->phba;
4171 #define LPFC_MAX_DATA_CTRL_LEN 1024
4172 static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
4173 unsigned long i;
4174 char *str_ptr, *token;
4175 struct lpfc_vport **vports;
4176 struct Scsi_Host *v_shost;
4177 char *bucket_type_str, *base_str, *step_str;
4178 unsigned long base, step, bucket_type;
4179
4180 if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
4181 if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
4182 return -EINVAL;
4183
4184 strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
4185 str_ptr = &bucket_data[0];
4186 /* Ignore this token - this is command token */
4187 token = strsep(&str_ptr, "\t ");
4188 if (!token)
4189 return -EINVAL;
4190
4191 bucket_type_str = strsep(&str_ptr, "\t ");
4192 if (!bucket_type_str)
4193 return -EINVAL;
4194
4195 if (!strncmp(bucket_type_str, "linear", strlen("linear")))
4196 bucket_type = LPFC_LINEAR_BUCKET;
4197 else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
4198 bucket_type = LPFC_POWER2_BUCKET;
4199 else
4200 return -EINVAL;
4201
4202 base_str = strsep(&str_ptr, "\t ");
4203 if (!base_str)
4204 return -EINVAL;
4205 base = simple_strtoul(base_str, NULL, 0);
4206
4207 step_str = strsep(&str_ptr, "\t ");
4208 if (!step_str)
4209 return -EINVAL;
4210 step = simple_strtoul(step_str, NULL, 0);
4211 if (!step)
4212 return -EINVAL;
4213
4214 /* Block the data collection for every vport */
4215 vports = lpfc_create_vport_work_array(phba);
4216 if (vports == NULL)
4217 return -ENOMEM;
4218
4219 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4220 v_shost = lpfc_shost_from_vport(vports[i]);
4221 spin_lock_irq(v_shost->host_lock);
4222 /* Block and reset data collection */
4223 vports[i]->stat_data_blocked = 1;
4224 if (vports[i]->stat_data_enabled)
4225 lpfc_vport_reset_stat_data(vports[i]);
4226 spin_unlock_irq(v_shost->host_lock);
4227 }
4228
4229 /* Set the bucket attributes */
4230 phba->bucket_type = bucket_type;
4231 phba->bucket_base = base;
4232 phba->bucket_step = step;
4233
4234 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4235 v_shost = lpfc_shost_from_vport(vports[i]);
4236
4237 /* Unblock data collection */
4238 spin_lock_irq(v_shost->host_lock);
4239 vports[i]->stat_data_blocked = 0;
4240 spin_unlock_irq(v_shost->host_lock);
4241 }
4242 lpfc_destroy_vport_work_array(phba, vports);
4243 return strlen(buf);
4244 }
4245
4246 if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
4247 vports = lpfc_create_vport_work_array(phba);
4248 if (vports == NULL)
4249 return -ENOMEM;
4250
4251 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4252 v_shost = lpfc_shost_from_vport(vports[i]);
4253 spin_lock_irq(shost->host_lock);
4254 vports[i]->stat_data_blocked = 1;
4255 lpfc_free_bucket(vport);
4256 vport->stat_data_enabled = 0;
4257 vports[i]->stat_data_blocked = 0;
4258 spin_unlock_irq(shost->host_lock);
4259 }
4260 lpfc_destroy_vport_work_array(phba, vports);
4261 phba->bucket_type = LPFC_NO_BUCKET;
4262 phba->bucket_base = 0;
4263 phba->bucket_step = 0;
4264 return strlen(buf);
4265 }
4266
4267 if (!strncmp(buf, "start", strlen("start"))) {
4268 /* If no buckets configured return error */
4269 if (phba->bucket_type == LPFC_NO_BUCKET)
4270 return -EINVAL;
4271 spin_lock_irq(shost->host_lock);
4272 if (vport->stat_data_enabled) {
4273 spin_unlock_irq(shost->host_lock);
4274 return strlen(buf);
4275 }
4276 lpfc_alloc_bucket(vport);
4277 vport->stat_data_enabled = 1;
4278 spin_unlock_irq(shost->host_lock);
4279 return strlen(buf);
4280 }
4281
4282 if (!strncmp(buf, "stop", strlen("stop"))) {
4283 spin_lock_irq(shost->host_lock);
4284 if (vport->stat_data_enabled == 0) {
4285 spin_unlock_irq(shost->host_lock);
4286 return strlen(buf);
4287 }
4288 lpfc_free_bucket(vport);
4289 vport->stat_data_enabled = 0;
4290 spin_unlock_irq(shost->host_lock);
4291 return strlen(buf);
4292 }
4293
4294 if (!strncmp(buf, "reset", strlen("reset"))) {
4295 if ((phba->bucket_type == LPFC_NO_BUCKET)
4296 || !vport->stat_data_enabled)
4297 return strlen(buf);
4298 spin_lock_irq(shost->host_lock);
4299 vport->stat_data_blocked = 1;
4300 lpfc_vport_reset_stat_data(vport);
4301 vport->stat_data_blocked = 0;
4302 spin_unlock_irq(shost->host_lock);
4303 return strlen(buf);
4304 }
4305 return -EINVAL;
4306 }
4307
4308
4309 /**
4310 * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
4311 * @dev: Pointer to class device object.
4312 * @buf: Data buffer.
4313 *
4314 * This function is the read call back function for
4315 * lpfc_stat_data_ctrl sysfs file. This function report the
4316 * current statistical data collection state.
4317 **/
4318 static ssize_t
lpfc_stat_data_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)4319 lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
4320 char *buf)
4321 {
4322 struct Scsi_Host *shost = class_to_shost(dev);
4323 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4324 struct lpfc_hba *phba = vport->phba;
4325 int index = 0;
4326 int i;
4327 char *bucket_type;
4328 unsigned long bucket_value;
4329
4330 switch (phba->bucket_type) {
4331 case LPFC_LINEAR_BUCKET:
4332 bucket_type = "linear";
4333 break;
4334 case LPFC_POWER2_BUCKET:
4335 bucket_type = "power2";
4336 break;
4337 default:
4338 bucket_type = "No Bucket";
4339 break;
4340 }
4341
4342 sprintf(&buf[index], "Statistical Data enabled :%d, "
4343 "blocked :%d, Bucket type :%s, Bucket base :%d,"
4344 " Bucket step :%d\nLatency Ranges :",
4345 vport->stat_data_enabled, vport->stat_data_blocked,
4346 bucket_type, phba->bucket_base, phba->bucket_step);
4347 index = strlen(buf);
4348 if (phba->bucket_type != LPFC_NO_BUCKET) {
4349 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4350 if (phba->bucket_type == LPFC_LINEAR_BUCKET)
4351 bucket_value = phba->bucket_base +
4352 phba->bucket_step * i;
4353 else
4354 bucket_value = phba->bucket_base +
4355 (1 << i) * phba->bucket_step;
4356
4357 if (index + 10 > PAGE_SIZE)
4358 break;
4359 sprintf(&buf[index], "%08ld ", bucket_value);
4360 index = strlen(buf);
4361 }
4362 }
4363 sprintf(&buf[index], "\n");
4364 return strlen(buf);
4365 }
4366
4367 /*
4368 * Sysfs attribute to control the statistical data collection.
4369 */
4370 static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
4371
4372 /*
4373 * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
4374 */
4375
4376 /*
4377 * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
4378 * for each target.
4379 */
4380 #define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
4381 #define MAX_STAT_DATA_SIZE_PER_TARGET \
4382 STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
4383
4384
4385 /**
4386 * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
4387 * @filp: sysfs file
4388 * @kobj: Pointer to the kernel object
4389 * @bin_attr: Attribute object
4390 * @buff: Buffer pointer
4391 * @off: File offset
4392 * @count: Buffer size
4393 *
4394 * This function is the read call back function for lpfc_drvr_stat_data
4395 * sysfs file. This function export the statistical data to user
4396 * applications.
4397 **/
4398 static ssize_t
sysfs_drvr_stat_data_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)4399 sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
4400 struct bin_attribute *bin_attr,
4401 char *buf, loff_t off, size_t count)
4402 {
4403 struct device *dev = container_of(kobj, struct device,
4404 kobj);
4405 struct Scsi_Host *shost = class_to_shost(dev);
4406 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4407 struct lpfc_hba *phba = vport->phba;
4408 int i = 0, index = 0;
4409 unsigned long nport_index;
4410 struct lpfc_nodelist *ndlp = NULL;
4411 nport_index = (unsigned long)off /
4412 MAX_STAT_DATA_SIZE_PER_TARGET;
4413
4414 if (!vport->stat_data_enabled || vport->stat_data_blocked
4415 || (phba->bucket_type == LPFC_NO_BUCKET))
4416 return 0;
4417
4418 spin_lock_irq(shost->host_lock);
4419 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4420 if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data)
4421 continue;
4422
4423 if (nport_index > 0) {
4424 nport_index--;
4425 continue;
4426 }
4427
4428 if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
4429 > count)
4430 break;
4431
4432 if (!ndlp->lat_data)
4433 continue;
4434
4435 /* Print the WWN */
4436 sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
4437 ndlp->nlp_portname.u.wwn[0],
4438 ndlp->nlp_portname.u.wwn[1],
4439 ndlp->nlp_portname.u.wwn[2],
4440 ndlp->nlp_portname.u.wwn[3],
4441 ndlp->nlp_portname.u.wwn[4],
4442 ndlp->nlp_portname.u.wwn[5],
4443 ndlp->nlp_portname.u.wwn[6],
4444 ndlp->nlp_portname.u.wwn[7]);
4445
4446 index = strlen(buf);
4447
4448 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4449 sprintf(&buf[index], "%010u,",
4450 ndlp->lat_data[i].cmd_count);
4451 index = strlen(buf);
4452 }
4453 sprintf(&buf[index], "\n");
4454 index = strlen(buf);
4455 }
4456 spin_unlock_irq(shost->host_lock);
4457 return index;
4458 }
4459
4460 static struct bin_attribute sysfs_drvr_stat_data_attr = {
4461 .attr = {
4462 .name = "lpfc_drvr_stat_data",
4463 .mode = S_IRUSR,
4464 },
4465 .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
4466 .read = sysfs_drvr_stat_data_read,
4467 .write = NULL,
4468 };
4469
4470 /*
4471 # lpfc_link_speed: Link speed selection for initializing the Fibre Channel
4472 # connection.
4473 # Value range is [0,16]. Default value is 0.
4474 */
4475 /**
4476 * lpfc_link_speed_set - Set the adapters link speed
4477 * @phba: lpfc_hba pointer.
4478 * @val: link speed value.
4479 *
4480 * Description:
4481 * If val is in a valid range then set the adapter's link speed field and
4482 * issue a lip; if the lip fails reset the link speed to the old value.
4483 *
4484 * Notes:
4485 * If the value is not in range log a kernel error message and return an error.
4486 *
4487 * Returns:
4488 * zero if val is in range and lip okay.
4489 * non-zero return value from lpfc_issue_lip()
4490 * -EINVAL val out of range
4491 **/
4492 static ssize_t
lpfc_link_speed_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4493 lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
4494 const char *buf, size_t count)
4495 {
4496 struct Scsi_Host *shost = class_to_shost(dev);
4497 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4498 struct lpfc_hba *phba = vport->phba;
4499 int val = LPFC_USER_LINK_SPEED_AUTO;
4500 int nolip = 0;
4501 const char *val_buf = buf;
4502 int err;
4503 uint32_t prev_val, if_type;
4504
4505 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
4506 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 &&
4507 phba->hba_flag & HBA_FORCED_LINK_SPEED)
4508 return -EPERM;
4509
4510 if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4511 nolip = 1;
4512 val_buf = &buf[strlen("nolip ")];
4513 }
4514
4515 if (!isdigit(val_buf[0]))
4516 return -EINVAL;
4517 if (sscanf(val_buf, "%i", &val) != 1)
4518 return -EINVAL;
4519
4520 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4521 "3055 lpfc_link_speed changed from %d to %d %s\n",
4522 phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
4523
4524 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
4525 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
4526 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
4527 ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
4528 ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
4529 ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) ||
4530 ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb)) ||
4531 ((val == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) {
4532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4533 "2879 lpfc_link_speed attribute cannot be set "
4534 "to %d. Speed is not supported by this port.\n",
4535 val);
4536 return -EINVAL;
4537 }
4538 if (val >= LPFC_USER_LINK_SPEED_16G &&
4539 phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4540 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4541 "3112 lpfc_link_speed attribute cannot be set "
4542 "to %d. Speed is not supported in loop mode.\n",
4543 val);
4544 return -EINVAL;
4545 }
4546
4547 switch (val) {
4548 case LPFC_USER_LINK_SPEED_AUTO:
4549 case LPFC_USER_LINK_SPEED_1G:
4550 case LPFC_USER_LINK_SPEED_2G:
4551 case LPFC_USER_LINK_SPEED_4G:
4552 case LPFC_USER_LINK_SPEED_8G:
4553 case LPFC_USER_LINK_SPEED_16G:
4554 case LPFC_USER_LINK_SPEED_32G:
4555 case LPFC_USER_LINK_SPEED_64G:
4556 prev_val = phba->cfg_link_speed;
4557 phba->cfg_link_speed = val;
4558 if (nolip)
4559 return strlen(buf);
4560
4561 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4562 if (err) {
4563 phba->cfg_link_speed = prev_val;
4564 return -EINVAL;
4565 }
4566 return strlen(buf);
4567 default:
4568 break;
4569 }
4570
4571 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4572 "0469 lpfc_link_speed attribute cannot be set to %d, "
4573 "allowed values are [%s]\n",
4574 val, LPFC_LINK_SPEED_STRING);
4575 return -EINVAL;
4576
4577 }
4578
4579 static int lpfc_link_speed = 0;
4580 module_param(lpfc_link_speed, int, S_IRUGO);
4581 MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
lpfc_param_show(link_speed)4582 lpfc_param_show(link_speed)
4583
4584 /**
4585 * lpfc_link_speed_init - Set the adapters link speed
4586 * @phba: lpfc_hba pointer.
4587 * @val: link speed value.
4588 *
4589 * Description:
4590 * If val is in a valid range then set the adapter's link speed field.
4591 *
4592 * Notes:
4593 * If the value is not in range log a kernel error message, clear the link
4594 * speed and return an error.
4595 *
4596 * Returns:
4597 * zero if val saved.
4598 * -EINVAL val out of range
4599 **/
4600 static int
4601 lpfc_link_speed_init(struct lpfc_hba *phba, int val)
4602 {
4603 if (val >= LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
4604 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4605 "3111 lpfc_link_speed of %d cannot "
4606 "support loop mode, setting topology to default.\n",
4607 val);
4608 phba->cfg_topology = 0;
4609 }
4610
4611 switch (val) {
4612 case LPFC_USER_LINK_SPEED_AUTO:
4613 case LPFC_USER_LINK_SPEED_1G:
4614 case LPFC_USER_LINK_SPEED_2G:
4615 case LPFC_USER_LINK_SPEED_4G:
4616 case LPFC_USER_LINK_SPEED_8G:
4617 case LPFC_USER_LINK_SPEED_16G:
4618 case LPFC_USER_LINK_SPEED_32G:
4619 case LPFC_USER_LINK_SPEED_64G:
4620 phba->cfg_link_speed = val;
4621 return 0;
4622 default:
4623 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4624 "0405 lpfc_link_speed attribute cannot "
4625 "be set to %d, allowed values are "
4626 "["LPFC_LINK_SPEED_STRING"]\n", val);
4627 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
4628 return -EINVAL;
4629 }
4630 }
4631
4632 static DEVICE_ATTR_RW(lpfc_link_speed);
4633
4634 /*
4635 # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
4636 # 0 = aer disabled or not supported
4637 # 1 = aer supported and enabled (default)
4638 # Value range is [0,1]. Default value is 1.
4639 */
4640 LPFC_ATTR(aer_support, 1, 0, 1,
4641 "Enable PCIe device AER support");
lpfc_param_show(aer_support)4642 lpfc_param_show(aer_support)
4643
4644 /**
4645 * lpfc_aer_support_store - Set the adapter for aer support
4646 *
4647 * @dev: class device that is converted into a Scsi_host.
4648 * @attr: device attribute, not used.
4649 * @buf: containing enable or disable aer flag.
4650 * @count: unused variable.
4651 *
4652 * Description:
4653 * If the val is 1 and currently the device's AER capability was not
4654 * enabled, invoke the kernel's enable AER helper routine, trying to
4655 * enable the device's AER capability. If the helper routine enabling
4656 * AER returns success, update the device's cfg_aer_support flag to
4657 * indicate AER is supported by the device; otherwise, if the device
4658 * AER capability is already enabled to support AER, then do nothing.
4659 *
4660 * If the val is 0 and currently the device's AER support was enabled,
4661 * invoke the kernel's disable AER helper routine. After that, update
4662 * the device's cfg_aer_support flag to indicate AER is not supported
4663 * by the device; otherwise, if the device AER capability is already
4664 * disabled from supporting AER, then do nothing.
4665 *
4666 * Returns:
4667 * length of the buf on success if val is in range the intended mode
4668 * is supported.
4669 * -EINVAL if val out of range or intended mode is not supported.
4670 **/
4671 static ssize_t
4672 lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
4673 const char *buf, size_t count)
4674 {
4675 struct Scsi_Host *shost = class_to_shost(dev);
4676 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4677 struct lpfc_hba *phba = vport->phba;
4678 int val = 0, rc = -EINVAL;
4679
4680 if (!isdigit(buf[0]))
4681 return -EINVAL;
4682 if (sscanf(buf, "%i", &val) != 1)
4683 return -EINVAL;
4684
4685 switch (val) {
4686 case 0:
4687 if (phba->hba_flag & HBA_AER_ENABLED) {
4688 rc = pci_disable_pcie_error_reporting(phba->pcidev);
4689 if (!rc) {
4690 spin_lock_irq(&phba->hbalock);
4691 phba->hba_flag &= ~HBA_AER_ENABLED;
4692 spin_unlock_irq(&phba->hbalock);
4693 phba->cfg_aer_support = 0;
4694 rc = strlen(buf);
4695 } else
4696 rc = -EPERM;
4697 } else {
4698 phba->cfg_aer_support = 0;
4699 rc = strlen(buf);
4700 }
4701 break;
4702 case 1:
4703 if (!(phba->hba_flag & HBA_AER_ENABLED)) {
4704 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4705 if (!rc) {
4706 spin_lock_irq(&phba->hbalock);
4707 phba->hba_flag |= HBA_AER_ENABLED;
4708 spin_unlock_irq(&phba->hbalock);
4709 phba->cfg_aer_support = 1;
4710 rc = strlen(buf);
4711 } else
4712 rc = -EPERM;
4713 } else {
4714 phba->cfg_aer_support = 1;
4715 rc = strlen(buf);
4716 }
4717 break;
4718 default:
4719 rc = -EINVAL;
4720 break;
4721 }
4722 return rc;
4723 }
4724
4725 static DEVICE_ATTR_RW(lpfc_aer_support);
4726
4727 /**
4728 * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
4729 * @dev: class device that is converted into a Scsi_host.
4730 * @attr: device attribute, not used.
4731 * @buf: containing flag 1 for aer cleanup state.
4732 * @count: unused variable.
4733 *
4734 * Description:
4735 * If the @buf contains 1 and the device currently has the AER support
4736 * enabled, then invokes the kernel AER helper routine
4737 * pci_aer_clear_nonfatal_status() to clean up the uncorrectable
4738 * error status register.
4739 *
4740 * Notes:
4741 *
4742 * Returns:
4743 * -EINVAL if the buf does not contain the 1 or the device is not currently
4744 * enabled with the AER support.
4745 **/
4746 static ssize_t
lpfc_aer_cleanup_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4747 lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
4748 const char *buf, size_t count)
4749 {
4750 struct Scsi_Host *shost = class_to_shost(dev);
4751 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4752 struct lpfc_hba *phba = vport->phba;
4753 int val, rc = -1;
4754
4755 if (!isdigit(buf[0]))
4756 return -EINVAL;
4757 if (sscanf(buf, "%i", &val) != 1)
4758 return -EINVAL;
4759 if (val != 1)
4760 return -EINVAL;
4761
4762 if (phba->hba_flag & HBA_AER_ENABLED)
4763 rc = pci_aer_clear_nonfatal_status(phba->pcidev);
4764
4765 if (rc == 0)
4766 return strlen(buf);
4767 else
4768 return -EPERM;
4769 }
4770
4771 static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
4772 lpfc_aer_cleanup_state);
4773
4774 /**
4775 * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
4776 *
4777 * @dev: class device that is converted into a Scsi_host.
4778 * @attr: device attribute, not used.
4779 * @buf: containing the string the number of vfs to be enabled.
4780 * @count: unused variable.
4781 *
4782 * Description:
4783 * When this api is called either through user sysfs, the driver shall
4784 * try to enable or disable SR-IOV virtual functions according to the
4785 * following:
4786 *
4787 * If zero virtual function has been enabled to the physical function,
4788 * the driver shall invoke the pci enable virtual function api trying
4789 * to enable the virtual functions. If the nr_vfn provided is greater
4790 * than the maximum supported, the maximum virtual function number will
4791 * be used for invoking the api; otherwise, the nr_vfn provided shall
4792 * be used for invoking the api. If the api call returned success, the
4793 * actual number of virtual functions enabled will be set to the driver
4794 * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
4795 * cfg_sriov_nr_virtfn remains zero.
4796 *
4797 * If none-zero virtual functions have already been enabled to the
4798 * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
4799 * -EINVAL will be returned and the driver does nothing;
4800 *
4801 * If the nr_vfn provided is zero and none-zero virtual functions have
4802 * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
4803 * disabling virtual function api shall be invoded to disable all the
4804 * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
4805 * zero. Otherwise, if zero virtual function has been enabled, do
4806 * nothing.
4807 *
4808 * Returns:
4809 * length of the buf on success if val is in range the intended mode
4810 * is supported.
4811 * -EINVAL if val out of range or intended mode is not supported.
4812 **/
4813 static ssize_t
lpfc_sriov_nr_virtfn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4814 lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
4815 const char *buf, size_t count)
4816 {
4817 struct Scsi_Host *shost = class_to_shost(dev);
4818 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4819 struct lpfc_hba *phba = vport->phba;
4820 struct pci_dev *pdev = phba->pcidev;
4821 int val = 0, rc = -EINVAL;
4822
4823 /* Sanity check on user data */
4824 if (!isdigit(buf[0]))
4825 return -EINVAL;
4826 if (sscanf(buf, "%i", &val) != 1)
4827 return -EINVAL;
4828 if (val < 0)
4829 return -EINVAL;
4830
4831 /* Request disabling virtual functions */
4832 if (val == 0) {
4833 if (phba->cfg_sriov_nr_virtfn > 0) {
4834 pci_disable_sriov(pdev);
4835 phba->cfg_sriov_nr_virtfn = 0;
4836 }
4837 return strlen(buf);
4838 }
4839
4840 /* Request enabling virtual functions */
4841 if (phba->cfg_sriov_nr_virtfn > 0) {
4842 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4843 "3018 There are %d virtual functions "
4844 "enabled on physical function.\n",
4845 phba->cfg_sriov_nr_virtfn);
4846 return -EEXIST;
4847 }
4848
4849 if (val <= LPFC_MAX_VFN_PER_PFN)
4850 phba->cfg_sriov_nr_virtfn = val;
4851 else {
4852 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4853 "3019 Enabling %d virtual functions is not "
4854 "allowed.\n", val);
4855 return -EINVAL;
4856 }
4857
4858 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
4859 if (rc) {
4860 phba->cfg_sriov_nr_virtfn = 0;
4861 rc = -EPERM;
4862 } else
4863 rc = strlen(buf);
4864
4865 return rc;
4866 }
4867
4868 LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
4869 "Enable PCIe device SR-IOV virtual fn");
4870
4871 lpfc_param_show(sriov_nr_virtfn)
4872 static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn);
4873
4874 /**
4875 * lpfc_request_firmware_store - Request for Linux generic firmware upgrade
4876 *
4877 * @dev: class device that is converted into a Scsi_host.
4878 * @attr: device attribute, not used.
4879 * @buf: containing the string the number of vfs to be enabled.
4880 * @count: unused variable.
4881 *
4882 * Description:
4883 *
4884 * Returns:
4885 * length of the buf on success if val is in range the intended mode
4886 * is supported.
4887 * -EINVAL if val out of range or intended mode is not supported.
4888 **/
4889 static ssize_t
lpfc_request_firmware_upgrade_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4890 lpfc_request_firmware_upgrade_store(struct device *dev,
4891 struct device_attribute *attr,
4892 const char *buf, size_t count)
4893 {
4894 struct Scsi_Host *shost = class_to_shost(dev);
4895 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4896 struct lpfc_hba *phba = vport->phba;
4897 int val = 0, rc;
4898
4899 /* Sanity check on user data */
4900 if (!isdigit(buf[0]))
4901 return -EINVAL;
4902 if (sscanf(buf, "%i", &val) != 1)
4903 return -EINVAL;
4904 if (val != 1)
4905 return -EINVAL;
4906
4907 rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE);
4908 if (rc)
4909 rc = -EPERM;
4910 else
4911 rc = strlen(buf);
4912 return rc;
4913 }
4914
4915 static int lpfc_req_fw_upgrade;
4916 module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR);
4917 MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade");
lpfc_param_show(request_firmware_upgrade)4918 lpfc_param_show(request_firmware_upgrade)
4919
4920 /**
4921 * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade
4922 * @phba: lpfc_hba pointer.
4923 * @val: 0 or 1.
4924 *
4925 * Description:
4926 * Set the initial Linux generic firmware upgrade enable or disable flag.
4927 *
4928 * Returns:
4929 * zero if val saved.
4930 * -EINVAL val out of range
4931 **/
4932 static int
4933 lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val)
4934 {
4935 if (val >= 0 && val <= 1) {
4936 phba->cfg_request_firmware_upgrade = val;
4937 return 0;
4938 }
4939 return -EINVAL;
4940 }
4941 static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR,
4942 lpfc_request_firmware_upgrade_show,
4943 lpfc_request_firmware_upgrade_store);
4944
4945 /**
4946 * lpfc_force_rscn_store
4947 *
4948 * @dev: class device that is converted into a Scsi_host.
4949 * @attr: device attribute, not used.
4950 * @buf: unused string
4951 * @count: unused variable.
4952 *
4953 * Description:
4954 * Force the switch to send a RSCN to all other NPorts in our zone
4955 * If we are direct connect pt2pt, build the RSCN command ourself
4956 * and send to the other NPort. Not supported for private loop.
4957 *
4958 * Returns:
4959 * 0 - on success
4960 * -EIO - if command is not sent
4961 **/
4962 static ssize_t
lpfc_force_rscn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4963 lpfc_force_rscn_store(struct device *dev, struct device_attribute *attr,
4964 const char *buf, size_t count)
4965 {
4966 struct Scsi_Host *shost = class_to_shost(dev);
4967 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4968 int i;
4969
4970 i = lpfc_issue_els_rscn(vport, 0);
4971 if (i)
4972 return -EIO;
4973 return strlen(buf);
4974 }
4975
4976 /*
4977 * lpfc_force_rscn: Force an RSCN to be sent to all remote NPorts
4978 * connected to the HBA.
4979 *
4980 * Value range is any ascii value
4981 */
4982 static int lpfc_force_rscn;
4983 module_param(lpfc_force_rscn, int, 0644);
4984 MODULE_PARM_DESC(lpfc_force_rscn,
4985 "Force an RSCN to be sent to all remote NPorts");
lpfc_param_show(force_rscn)4986 lpfc_param_show(force_rscn)
4987
4988 /**
4989 * lpfc_force_rscn_init - Force an RSCN to be sent to all remote NPorts
4990 * @phba: lpfc_hba pointer.
4991 * @val: unused value.
4992 *
4993 * Returns:
4994 * zero if val saved.
4995 **/
4996 static int
4997 lpfc_force_rscn_init(struct lpfc_hba *phba, int val)
4998 {
4999 return 0;
5000 }
5001 static DEVICE_ATTR_RW(lpfc_force_rscn);
5002
5003 /**
5004 * lpfc_fcp_imax_store
5005 *
5006 * @dev: class device that is converted into a Scsi_host.
5007 * @attr: device attribute, not used.
5008 * @buf: string with the number of fast-path FCP interrupts per second.
5009 * @count: unused variable.
5010 *
5011 * Description:
5012 * If val is in a valid range [636,651042], then set the adapter's
5013 * maximum number of fast-path FCP interrupts per second.
5014 *
5015 * Returns:
5016 * length of the buf on success if val is in range the intended mode
5017 * is supported.
5018 * -EINVAL if val out of range or intended mode is not supported.
5019 **/
5020 static ssize_t
lpfc_fcp_imax_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5021 lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
5022 const char *buf, size_t count)
5023 {
5024 struct Scsi_Host *shost = class_to_shost(dev);
5025 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5026 struct lpfc_hba *phba = vport->phba;
5027 struct lpfc_eq_intr_info *eqi;
5028 uint32_t usdelay;
5029 int val = 0, i;
5030
5031 /* fcp_imax is only valid for SLI4 */
5032 if (phba->sli_rev != LPFC_SLI_REV4)
5033 return -EINVAL;
5034
5035 /* Sanity check on user data */
5036 if (!isdigit(buf[0]))
5037 return -EINVAL;
5038 if (sscanf(buf, "%i", &val) != 1)
5039 return -EINVAL;
5040
5041 /*
5042 * Value range for the HBA is [5000,5000000]
5043 * The value for each EQ depends on how many EQs are configured.
5044 * Allow value == 0
5045 */
5046 if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX))
5047 return -EINVAL;
5048
5049 phba->cfg_auto_imax = (val) ? 0 : 1;
5050 if (phba->cfg_fcp_imax && !val) {
5051 queue_delayed_work(phba->wq, &phba->eq_delay_work,
5052 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
5053
5054 for_each_present_cpu(i) {
5055 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
5056 eqi->icnt = 0;
5057 }
5058 }
5059
5060 phba->cfg_fcp_imax = (uint32_t)val;
5061
5062 if (phba->cfg_fcp_imax)
5063 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
5064 else
5065 usdelay = 0;
5066
5067 for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
5068 lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
5069 usdelay);
5070
5071 return strlen(buf);
5072 }
5073
5074 /*
5075 # lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
5076 # for the HBA.
5077 #
5078 # Value range is [5,000 to 5,000,000]. Default value is 50,000.
5079 */
5080 static int lpfc_fcp_imax = LPFC_DEF_IMAX;
5081 module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
5082 MODULE_PARM_DESC(lpfc_fcp_imax,
5083 "Set the maximum number of FCP interrupts per second per HBA");
lpfc_param_show(fcp_imax)5084 lpfc_param_show(fcp_imax)
5085
5086 /**
5087 * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable
5088 * @phba: lpfc_hba pointer.
5089 * @val: link speed value.
5090 *
5091 * Description:
5092 * If val is in a valid range [636,651042], then initialize the adapter's
5093 * maximum number of fast-path FCP interrupts per second.
5094 *
5095 * Returns:
5096 * zero if val saved.
5097 * -EINVAL val out of range
5098 **/
5099 static int
5100 lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
5101 {
5102 if (phba->sli_rev != LPFC_SLI_REV4) {
5103 phba->cfg_fcp_imax = 0;
5104 return 0;
5105 }
5106
5107 if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) ||
5108 (val == 0)) {
5109 phba->cfg_fcp_imax = val;
5110 return 0;
5111 }
5112
5113 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5114 "3016 lpfc_fcp_imax: %d out of range, using default\n",
5115 val);
5116 phba->cfg_fcp_imax = LPFC_DEF_IMAX;
5117
5118 return 0;
5119 }
5120
5121 static DEVICE_ATTR_RW(lpfc_fcp_imax);
5122
5123 /**
5124 * lpfc_cq_max_proc_limit_store
5125 *
5126 * @dev: class device that is converted into a Scsi_host.
5127 * @attr: device attribute, not used.
5128 * @buf: string with the cq max processing limit of cqes
5129 * @count: unused variable.
5130 *
5131 * Description:
5132 * If val is in a valid range, then set value on each cq
5133 *
5134 * Returns:
5135 * The length of the buf: if successful
5136 * -ERANGE: if val is not in the valid range
5137 * -EINVAL: if bad value format or intended mode is not supported.
5138 **/
5139 static ssize_t
lpfc_cq_max_proc_limit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5140 lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr,
5141 const char *buf, size_t count)
5142 {
5143 struct Scsi_Host *shost = class_to_shost(dev);
5144 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5145 struct lpfc_hba *phba = vport->phba;
5146 struct lpfc_queue *eq, *cq;
5147 unsigned long val;
5148 int i;
5149
5150 /* cq_max_proc_limit is only valid for SLI4 */
5151 if (phba->sli_rev != LPFC_SLI_REV4)
5152 return -EINVAL;
5153
5154 /* Sanity check on user data */
5155 if (!isdigit(buf[0]))
5156 return -EINVAL;
5157 if (kstrtoul(buf, 0, &val))
5158 return -EINVAL;
5159
5160 if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT)
5161 return -ERANGE;
5162
5163 phba->cfg_cq_max_proc_limit = (uint32_t)val;
5164
5165 /* set the values on the cq's */
5166 for (i = 0; i < phba->cfg_irq_chann; i++) {
5167 /* Get the EQ corresponding to the IRQ vector */
5168 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
5169 if (!eq)
5170 continue;
5171
5172 list_for_each_entry(cq, &eq->child_list, list)
5173 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
5174 cq->entry_count);
5175 }
5176
5177 return strlen(buf);
5178 }
5179
5180 /*
5181 * lpfc_cq_max_proc_limit: The maximum number CQE entries processed in an
5182 * itteration of CQ processing.
5183 */
5184 static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5185 module_param(lpfc_cq_max_proc_limit, int, 0644);
5186 MODULE_PARM_DESC(lpfc_cq_max_proc_limit,
5187 "Set the maximum number CQEs processed in an iteration of "
5188 "CQ processing");
5189 lpfc_param_show(cq_max_proc_limit)
5190
5191 /*
5192 * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a
5193 * single handler call which should request a polled completion rather
5194 * than re-enabling interrupts.
5195 */
5196 LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL,
5197 LPFC_CQ_MIN_THRESHOLD_TO_POLL,
5198 LPFC_CQ_MAX_THRESHOLD_TO_POLL,
5199 "CQE Processing Threshold to enable Polling");
5200
5201 /**
5202 * lpfc_cq_max_proc_limit_init - Set the initial cq max_proc_limit
5203 * @phba: lpfc_hba pointer.
5204 * @val: entry limit
5205 *
5206 * Description:
5207 * If val is in a valid range, then initialize the adapter's maximum
5208 * value.
5209 *
5210 * Returns:
5211 * Always returns 0 for success, even if value not always set to
5212 * requested value. If value out of range or not supported, will fall
5213 * back to default.
5214 **/
5215 static int
lpfc_cq_max_proc_limit_init(struct lpfc_hba * phba,int val)5216 lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val)
5217 {
5218 phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5219
5220 if (phba->sli_rev != LPFC_SLI_REV4)
5221 return 0;
5222
5223 if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) {
5224 phba->cfg_cq_max_proc_limit = val;
5225 return 0;
5226 }
5227
5228 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5229 "0371 "LPFC_DRIVER_NAME"_cq_max_proc_limit: "
5230 "%d out of range, using default\n",
5231 phba->cfg_cq_max_proc_limit);
5232
5233 return 0;
5234 }
5235
5236 static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit);
5237
5238 /**
5239 * lpfc_state_show - Display current driver CPU affinity
5240 * @dev: class converted to a Scsi_host structure.
5241 * @attr: device attribute, not used.
5242 * @buf: on return contains text describing the state of the link.
5243 *
5244 * Returns: size of formatted string.
5245 **/
5246 static ssize_t
lpfc_fcp_cpu_map_show(struct device * dev,struct device_attribute * attr,char * buf)5247 lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
5248 char *buf)
5249 {
5250 struct Scsi_Host *shost = class_to_shost(dev);
5251 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5252 struct lpfc_hba *phba = vport->phba;
5253 struct lpfc_vector_map_info *cpup;
5254 int len = 0;
5255
5256 if ((phba->sli_rev != LPFC_SLI_REV4) ||
5257 (phba->intr_type != MSIX))
5258 return len;
5259
5260 switch (phba->cfg_fcp_cpu_map) {
5261 case 0:
5262 len += scnprintf(buf + len, PAGE_SIZE-len,
5263 "fcp_cpu_map: No mapping (%d)\n",
5264 phba->cfg_fcp_cpu_map);
5265 return len;
5266 case 1:
5267 len += scnprintf(buf + len, PAGE_SIZE-len,
5268 "fcp_cpu_map: HBA centric mapping (%d): "
5269 "%d of %d CPUs online from %d possible CPUs\n",
5270 phba->cfg_fcp_cpu_map, num_online_cpus(),
5271 num_present_cpus(),
5272 phba->sli4_hba.num_possible_cpu);
5273 break;
5274 }
5275
5276 while (phba->sli4_hba.curr_disp_cpu <
5277 phba->sli4_hba.num_possible_cpu) {
5278 cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
5279
5280 if (!cpu_present(phba->sli4_hba.curr_disp_cpu))
5281 len += scnprintf(buf + len, PAGE_SIZE - len,
5282 "CPU %02d not present\n",
5283 phba->sli4_hba.curr_disp_cpu);
5284 else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
5285 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5286 len += scnprintf(
5287 buf + len, PAGE_SIZE - len,
5288 "CPU %02d hdwq None "
5289 "physid %d coreid %d ht %d ua %d\n",
5290 phba->sli4_hba.curr_disp_cpu,
5291 cpup->phys_id, cpup->core_id,
5292 (cpup->flag & LPFC_CPU_MAP_HYPER),
5293 (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5294 else
5295 len += scnprintf(
5296 buf + len, PAGE_SIZE - len,
5297 "CPU %02d EQ None hdwq %04d "
5298 "physid %d coreid %d ht %d ua %d\n",
5299 phba->sli4_hba.curr_disp_cpu,
5300 cpup->hdwq, cpup->phys_id,
5301 cpup->core_id,
5302 (cpup->flag & LPFC_CPU_MAP_HYPER),
5303 (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5304 } else {
5305 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5306 len += scnprintf(
5307 buf + len, PAGE_SIZE - len,
5308 "CPU %02d hdwq None "
5309 "physid %d coreid %d ht %d ua %d IRQ %d\n",
5310 phba->sli4_hba.curr_disp_cpu,
5311 cpup->phys_id,
5312 cpup->core_id,
5313 (cpup->flag & LPFC_CPU_MAP_HYPER),
5314 (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5315 lpfc_get_irq(cpup->eq));
5316 else
5317 len += scnprintf(
5318 buf + len, PAGE_SIZE - len,
5319 "CPU %02d EQ %04d hdwq %04d "
5320 "physid %d coreid %d ht %d ua %d IRQ %d\n",
5321 phba->sli4_hba.curr_disp_cpu,
5322 cpup->eq, cpup->hdwq, cpup->phys_id,
5323 cpup->core_id,
5324 (cpup->flag & LPFC_CPU_MAP_HYPER),
5325 (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5326 lpfc_get_irq(cpup->eq));
5327 }
5328
5329 phba->sli4_hba.curr_disp_cpu++;
5330
5331 /* display max number of CPUs keeping some margin */
5332 if (phba->sli4_hba.curr_disp_cpu <
5333 phba->sli4_hba.num_possible_cpu &&
5334 (len >= (PAGE_SIZE - 64))) {
5335 len += scnprintf(buf + len,
5336 PAGE_SIZE - len, "more...\n");
5337 break;
5338 }
5339 }
5340
5341 if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu)
5342 phba->sli4_hba.curr_disp_cpu = 0;
5343
5344 return len;
5345 }
5346
5347 /**
5348 * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
5349 * @dev: class device that is converted into a Scsi_host.
5350 * @attr: device attribute, not used.
5351 * @buf: one or more lpfc_polling_flags values.
5352 * @count: not used.
5353 *
5354 * Returns:
5355 * -EINVAL - Not implemented yet.
5356 **/
5357 static ssize_t
lpfc_fcp_cpu_map_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5358 lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
5359 const char *buf, size_t count)
5360 {
5361 return -EINVAL;
5362 }
5363
5364 /*
5365 # lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
5366 # for the HBA.
5367 #
5368 # Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1).
5369 # 0 - Do not affinitze IRQ vectors
5370 # 1 - Affintize HBA vectors with respect to each HBA
5371 # (start with CPU0 for each HBA)
5372 # This also defines how Hardware Queues are mapped to specific CPUs.
5373 */
5374 static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5375 module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
5376 MODULE_PARM_DESC(lpfc_fcp_cpu_map,
5377 "Defines how to map CPUs to IRQ vectors per HBA");
5378
5379 /**
5380 * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
5381 * @phba: lpfc_hba pointer.
5382 * @val: link speed value.
5383 *
5384 * Description:
5385 * If val is in a valid range [0-2], then affinitze the adapter's
5386 * MSIX vectors.
5387 *
5388 * Returns:
5389 * zero if val saved.
5390 * -EINVAL val out of range
5391 **/
5392 static int
lpfc_fcp_cpu_map_init(struct lpfc_hba * phba,int val)5393 lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
5394 {
5395 if (phba->sli_rev != LPFC_SLI_REV4) {
5396 phba->cfg_fcp_cpu_map = 0;
5397 return 0;
5398 }
5399
5400 if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
5401 phba->cfg_fcp_cpu_map = val;
5402 return 0;
5403 }
5404
5405 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5406 "3326 lpfc_fcp_cpu_map: %d out of range, using "
5407 "default\n", val);
5408 phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5409
5410 return 0;
5411 }
5412
5413 static DEVICE_ATTR_RW(lpfc_fcp_cpu_map);
5414
5415 /*
5416 # lpfc_fcp_class: Determines FC class to use for the FCP protocol.
5417 # Value range is [2,3]. Default value is 3.
5418 */
5419 LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
5420 "Select Fibre Channel class of service for FCP sequences");
5421
5422 /*
5423 # lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
5424 # is [0,1]. Default value is 0.
5425 */
5426 LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
5427 "Use ADISC on rediscovery to authenticate FCP devices");
5428
5429 /*
5430 # lpfc_first_burst_size: First burst size to use on the NPorts
5431 # that support first burst.
5432 # Value range is [0,65536]. Default value is 0.
5433 */
5434 LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
5435 "First burst size for Targets that support first burst");
5436
5437 /*
5438 * lpfc_nvmet_fb_size: NVME Target mode supported first burst size.
5439 * When the driver is configured as an NVME target, this value is
5440 * communicated to the NVME initiator in the PRLI response. It is
5441 * used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support
5442 * parameters are set and the target is sending the PRLI RSP.
5443 * Parameter supported on physical port only - no NPIV support.
5444 * Value range is [0,65536]. Default value is 0.
5445 */
5446 LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
5447 "NVME Target mode first burst size in 512B increments.");
5448
5449 /*
5450 * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
5451 * For the Initiator (I), enabling this parameter means that an NVMET
5452 * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
5453 * processed by the initiator for subsequent NVME FCP IO.
5454 * Currently, this feature is not supported on the NVME target
5455 * Value range is [0,1]. Default value is 0 (disabled).
5456 */
5457 LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
5458 "Enable First Burst feature for NVME Initiator.");
5459
5460 /*
5461 # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
5462 # depth. Default value is 0. When the value of this parameter is zero the
5463 # SCSI command completion time is not used for controlling I/O queue depth. When
5464 # the parameter is set to a non-zero value, the I/O queue depth is controlled
5465 # to limit the I/O completion time to the parameter value.
5466 # The value is set in milliseconds.
5467 */
5468 LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000,
5469 "Use command completion time to control queue depth");
5470
5471 lpfc_vport_param_show(max_scsicmpl_time);
5472 static int
lpfc_max_scsicmpl_time_set(struct lpfc_vport * vport,int val)5473 lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
5474 {
5475 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5476 struct lpfc_nodelist *ndlp, *next_ndlp;
5477
5478 if (val == vport->cfg_max_scsicmpl_time)
5479 return 0;
5480 if ((val < 0) || (val > 60000))
5481 return -EINVAL;
5482 vport->cfg_max_scsicmpl_time = val;
5483
5484 spin_lock_irq(shost->host_lock);
5485 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
5486 if (!NLP_CHK_NODE_ACT(ndlp))
5487 continue;
5488 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
5489 continue;
5490 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
5491 }
5492 spin_unlock_irq(shost->host_lock);
5493 return 0;
5494 }
5495 lpfc_vport_param_store(max_scsicmpl_time);
5496 static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time);
5497
5498 /*
5499 # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
5500 # range is [0,1]. Default value is 0.
5501 */
5502 LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
5503
5504 /*
5505 # lpfc_xri_rebalancing: enable or disable XRI rebalancing feature
5506 # range is [0,1]. Default value is 1.
5507 */
5508 LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing");
5509
5510 /*
5511 * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds
5512 * range is [0,1]. Default value is 0.
5513 * For [0], FCP commands are issued to Work Queues based on upper layer
5514 * hardware queue index.
5515 * For [1], FCP commands are issued to a Work Queue associated with the
5516 * current CPU.
5517 *
5518 * LPFC_FCP_SCHED_BY_HDWQ == 0
5519 * LPFC_FCP_SCHED_BY_CPU == 1
5520 *
5521 * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu
5522 * affinity for FCP/NVME I/Os through Work Queues associated with the current
5523 * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os
5524 * through WQs will be used.
5525 */
5526 LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU,
5527 LPFC_FCP_SCHED_BY_HDWQ,
5528 LPFC_FCP_SCHED_BY_CPU,
5529 "Determine scheduling algorithm for "
5530 "issuing commands [0] - Hardware Queue, [1] - Current CPU");
5531
5532 /*
5533 * lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN
5534 * range is [0,1]. Default value is 0.
5535 * For [0], GID_FT is used for NameServer queries after RSCN (default)
5536 * For [1], GID_PT is used for NameServer queries after RSCN
5537 *
5538 */
5539 LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT,
5540 LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT,
5541 "Determine algorithm NameServer queries after RSCN "
5542 "[0] - GID_FT, [1] - GID_PT");
5543
5544 /*
5545 # lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
5546 # range is [0,1]. Default value is 0.
5547 # For [0], bus reset issues target reset to ALL devices
5548 # For [1], bus reset issues target reset to non-FCP2 devices
5549 */
5550 LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for "
5551 "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset");
5552
5553
5554 /*
5555 # lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
5556 # cr_delay (msec) or cr_count outstanding commands. cr_delay can take
5557 # value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
5558 # is 0. Default value of cr_count is 1. The cr_count feature is disabled if
5559 # cr_delay is set to 0.
5560 */
5561 LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an "
5562 "interrupt response is generated");
5563
5564 LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an "
5565 "interrupt response is generated");
5566
5567 /*
5568 # lpfc_multi_ring_support: Determines how many rings to spread available
5569 # cmd/rsp IOCB entries across.
5570 # Value range is [1,2]. Default value is 1.
5571 */
5572 LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
5573 "SLI rings to spread IOCB entries across");
5574
5575 /*
5576 # lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this
5577 # identifies what rctl value to configure the additional ring for.
5578 # Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
5579 */
5580 LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1,
5581 255, "Identifies RCTL for additional ring configuration");
5582
5583 /*
5584 # lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this
5585 # identifies what type value to configure the additional ring for.
5586 # Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
5587 */
5588 LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
5589 255, "Identifies TYPE for additional ring configuration");
5590
5591 /*
5592 # lpfc_enable_SmartSAN: Sets up FDMI support for SmartSAN
5593 # 0 = SmartSAN functionality disabled (default)
5594 # 1 = SmartSAN functionality enabled
5595 # This parameter will override the value of lpfc_fdmi_on module parameter.
5596 # Value range is [0,1]. Default value is 0.
5597 */
5598 LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
5599
5600 /*
5601 # lpfc_fdmi_on: Controls FDMI support.
5602 # 0 No FDMI support
5603 # 1 Traditional FDMI support (default)
5604 # Traditional FDMI support means the driver will assume FDMI-2 support;
5605 # however, if that fails, it will fallback to FDMI-1.
5606 # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.
5607 # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of
5608 # lpfc_fdmi_on.
5609 # Value range [0,1]. Default value is 1.
5610 */
5611 LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support");
5612
5613 /*
5614 # Specifies the maximum number of ELS cmds we can have outstanding (for
5615 # discovery). Value range is [1,64]. Default value = 32.
5616 */
5617 LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
5618 "during discovery");
5619
5620 /*
5621 # lpfc_max_luns: maximum allowed LUN ID. This is the highest LUN ID that
5622 # will be scanned by the SCSI midlayer when sequential scanning is
5623 # used; and is also the highest LUN ID allowed when the SCSI midlayer
5624 # parses REPORT_LUN responses. The lpfc driver has no LUN count or
5625 # LUN ID limit, but the SCSI midlayer requires this field for the uses
5626 # above. The lpfc driver limits the default value to 255 for two reasons.
5627 # As it bounds the sequential scan loop, scanning for thousands of luns
5628 # on a target can take minutes of wall clock time. Additionally,
5629 # there are FC targets, such as JBODs, that only recognize 8-bits of
5630 # LUN ID. When they receive a value greater than 8 bits, they chop off
5631 # the high order bits. In other words, they see LUN IDs 0, 256, 512,
5632 # and so on all as LUN ID 0. This causes the linux kernel, which sees
5633 # valid responses at each of the LUN IDs, to believe there are multiple
5634 # devices present, when in fact, there is only 1.
5635 # A customer that is aware of their target behaviors, and the results as
5636 # indicated above, is welcome to increase the lpfc_max_luns value.
5637 # As mentioned, this value is not used by the lpfc driver, only the
5638 # SCSI midlayer.
5639 # Value range is [0,65535]. Default value is 255.
5640 # NOTE: The SCSI layer might probe all allowed LUN on some old targets.
5641 */
5642 LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID");
5643
5644 /*
5645 # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
5646 # Value range is [1,255], default value is 10.
5647 */
5648 LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
5649 "Milliseconds driver will wait between polling FCP ring");
5650
5651 /*
5652 # lpfc_task_mgmt_tmo: Maximum time to wait for task management commands
5653 # to complete in seconds. Value range is [5,180], default value is 60.
5654 */
5655 LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
5656 "Maximum time to wait for task management commands to complete");
5657 /*
5658 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
5659 # support this feature
5660 # 0 = MSI disabled
5661 # 1 = MSI enabled
5662 # 2 = MSI-X enabled (default)
5663 # Value range is [0,2]. Default value is 2.
5664 */
5665 LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
5666 "MSI-X (2), if possible");
5667
5668 /*
5669 * lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs
5670 *
5671 * 0 = NVME OAS disabled
5672 * 1 = NVME OAS enabled
5673 *
5674 * Value range is [0,1]. Default value is 0.
5675 */
5676 LPFC_ATTR_RW(nvme_oas, 0, 0, 1,
5677 "Use OAS bit on NVME IOs");
5678
5679 /*
5680 * lpfc_nvme_embed_cmd: Use the oas bit when sending NVME/NVMET IOs
5681 *
5682 * 0 = Put NVME Command in SGL
5683 * 1 = Embed NVME Command in WQE (unless G7)
5684 * 2 = Embed NVME Command in WQE (force)
5685 *
5686 * Value range is [0,2]. Default value is 1.
5687 */
5688 LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
5689 "Embed NVME Command in WQE");
5690
5691 /*
5692 * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
5693 * the driver will advertise it supports to the SCSI layer.
5694 *
5695 * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
5696 * 1,256 = Manually specify nr_hw_queue value to be advertised,
5697 *
5698 * Value range is [0,256]. Default value is 8.
5699 */
5700 LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
5701 LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
5702 "Set the number of SCSI Queues advertised");
5703
5704 /*
5705 * lpfc_hdw_queue: Set the number of Hardware Queues the driver
5706 * will advertise it supports to the NVME and SCSI layers. This also
5707 * will map to the number of CQ/WQ pairs the driver will create.
5708 *
5709 * The NVME Layer will try to create this many, plus 1 administrative
5710 * hardware queue. The administrative queue will always map to WQ 0
5711 * A hardware IO queue maps (qidx) to a specific driver CQ/WQ.
5712 *
5713 * 0 = Configure the number of hdw queues to the number of active CPUs.
5714 * 1,256 = Manually specify how many hdw queues to use.
5715 *
5716 * Value range is [0,256]. Default value is 0.
5717 */
5718 LPFC_ATTR_R(hdw_queue,
5719 LPFC_HBA_HDWQ_DEF,
5720 LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
5721 "Set the number of I/O Hardware Queues");
5722
5723 #if IS_ENABLED(CONFIG_X86)
5724 /**
5725 * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on
5726 * irq_chann_mode
5727 * @phba: Pointer to HBA context object.
5728 **/
5729 static void
lpfc_cpumask_irq_mode_init(struct lpfc_hba * phba)5730 lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba)
5731 {
5732 unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE;
5733 const struct cpumask *sibling_mask;
5734 struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask;
5735
5736 cpumask_clear(aff_mask);
5737
5738 if (phba->irq_chann_mode == NUMA_MODE) {
5739 /* Check if we're a NUMA architecture */
5740 numa_node = dev_to_node(&phba->pcidev->dev);
5741 if (numa_node == NUMA_NO_NODE) {
5742 phba->irq_chann_mode = NORMAL_MODE;
5743 return;
5744 }
5745 }
5746
5747 for_each_possible_cpu(cpu) {
5748 switch (phba->irq_chann_mode) {
5749 case NUMA_MODE:
5750 if (cpu_to_node(cpu) == numa_node)
5751 cpumask_set_cpu(cpu, aff_mask);
5752 break;
5753 case NHT_MODE:
5754 sibling_mask = topology_sibling_cpumask(cpu);
5755 first_cpu = cpumask_first(sibling_mask);
5756 if (first_cpu < nr_cpu_ids)
5757 cpumask_set_cpu(first_cpu, aff_mask);
5758 break;
5759 default:
5760 break;
5761 }
5762 }
5763 }
5764 #endif
5765
5766 static void
lpfc_assign_default_irq_chann(struct lpfc_hba * phba)5767 lpfc_assign_default_irq_chann(struct lpfc_hba *phba)
5768 {
5769 #if IS_ENABLED(CONFIG_X86)
5770 switch (boot_cpu_data.x86_vendor) {
5771 case X86_VENDOR_AMD:
5772 /* If AMD architecture, then default is NUMA_MODE */
5773 phba->irq_chann_mode = NUMA_MODE;
5774 break;
5775 case X86_VENDOR_INTEL:
5776 /* If Intel architecture, then default is no hyperthread mode */
5777 phba->irq_chann_mode = NHT_MODE;
5778 break;
5779 default:
5780 phba->irq_chann_mode = NORMAL_MODE;
5781 break;
5782 }
5783 lpfc_cpumask_irq_mode_init(phba);
5784 #else
5785 phba->irq_chann_mode = NORMAL_MODE;
5786 #endif
5787 }
5788
5789 /*
5790 * lpfc_irq_chann: Set the number of IRQ vectors that are available
5791 * for Hardware Queues to utilize. This also will map to the number
5792 * of EQ / MSI-X vectors the driver will create. This should never be
5793 * more than the number of Hardware Queues
5794 *
5795 * 0 = Configure number of IRQ Channels to:
5796 * if AMD architecture, number of CPUs on HBA's NUMA node
5797 * if Intel architecture, number of physical CPUs.
5798 * otherwise, number of active CPUs.
5799 * [1,256] = Manually specify how many IRQ Channels to use.
5800 *
5801 * Value range is [0,256]. Default value is [0].
5802 */
5803 static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF;
5804 module_param(lpfc_irq_chann, uint, 0444);
5805 MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
5806
5807 /* lpfc_irq_chann_init - Set the hba irq_chann initial value
5808 * @phba: lpfc_hba pointer.
5809 * @val: contains the initial value
5810 *
5811 * Description:
5812 * Validates the initial value is within range and assigns it to the
5813 * adapter. If not in range, an error message is posted and the
5814 * default value is assigned.
5815 *
5816 * Returns:
5817 * zero if value is in range and is set
5818 * -EINVAL if value was out of range
5819 **/
5820 static int
lpfc_irq_chann_init(struct lpfc_hba * phba,uint32_t val)5821 lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
5822 {
5823 const struct cpumask *aff_mask;
5824
5825 if (phba->cfg_use_msi != 2) {
5826 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5827 "8532 use_msi = %u ignoring cfg_irq_numa\n",
5828 phba->cfg_use_msi);
5829 phba->irq_chann_mode = NORMAL_MODE;
5830 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5831 return 0;
5832 }
5833
5834 /* Check if default setting was passed */
5835 if (val == LPFC_IRQ_CHANN_DEF)
5836 lpfc_assign_default_irq_chann(phba);
5837
5838 if (phba->irq_chann_mode != NORMAL_MODE) {
5839 aff_mask = &phba->sli4_hba.irq_aff_mask;
5840
5841 if (cpumask_empty(aff_mask)) {
5842 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5843 "8533 Could not identify CPUS for "
5844 "mode %d, ignoring\n",
5845 phba->irq_chann_mode);
5846 phba->irq_chann_mode = NORMAL_MODE;
5847 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5848 } else {
5849 phba->cfg_irq_chann = cpumask_weight(aff_mask);
5850
5851 /* If no hyperthread mode, then set hdwq count to
5852 * aff_mask weight as well
5853 */
5854 if (phba->irq_chann_mode == NHT_MODE)
5855 phba->cfg_hdw_queue = phba->cfg_irq_chann;
5856
5857 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5858 "8543 lpfc_irq_chann set to %u "
5859 "(mode: %d)\n", phba->cfg_irq_chann,
5860 phba->irq_chann_mode);
5861 }
5862 } else {
5863 if (val > LPFC_IRQ_CHANN_MAX) {
5864 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5865 "8545 lpfc_irq_chann attribute cannot "
5866 "be set to %u, allowed range is "
5867 "[%u,%u]\n",
5868 val,
5869 LPFC_IRQ_CHANN_MIN,
5870 LPFC_IRQ_CHANN_MAX);
5871 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5872 return -EINVAL;
5873 }
5874 phba->cfg_irq_chann = val;
5875 }
5876
5877 return 0;
5878 }
5879
5880 /**
5881 * lpfc_irq_chann_show - Display value of irq_chann
5882 * @dev: class converted to a Scsi_host structure.
5883 * @attr: device attribute, not used.
5884 * @buf: on return contains a string with the list sizes
5885 *
5886 * Returns: size of formatted string.
5887 **/
5888 static ssize_t
lpfc_irq_chann_show(struct device * dev,struct device_attribute * attr,char * buf)5889 lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr,
5890 char *buf)
5891 {
5892 struct Scsi_Host *shost = class_to_shost(dev);
5893 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5894 struct lpfc_hba *phba = vport->phba;
5895
5896 return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann);
5897 }
5898
5899 static DEVICE_ATTR_RO(lpfc_irq_chann);
5900
5901 /*
5902 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
5903 # 0 = HBA resets disabled
5904 # 1 = HBA resets enabled (default)
5905 # 2 = HBA reset via PCI bus reset enabled
5906 # Value range is [0,2]. Default value is 1.
5907 */
5908 LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver.");
5909
5910 /*
5911 # lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
5912 # 0 = HBA Heartbeat disabled
5913 # 1 = HBA Heartbeat enabled (default)
5914 # Value range is [0,1]. Default value is 1.
5915 */
5916 LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
5917
5918 /*
5919 # lpfc_EnableXLane: Enable Express Lane Feature
5920 # 0x0 Express Lane Feature disabled
5921 # 0x1 Express Lane Feature enabled
5922 # Value range is [0,1]. Default value is 0.
5923 */
5924 LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
5925
5926 /*
5927 # lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature
5928 # 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits)
5929 # Value range is [0x0,0x7f]. Default value is 0
5930 */
5931 LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
5932
5933 /*
5934 # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
5935 # 0 = BlockGuard disabled (default)
5936 # 1 = BlockGuard enabled
5937 # Value range is [0,1]. Default value is 0.
5938 */
5939 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
5940
5941 /*
5942 # lpfc_prot_mask: i
5943 # - Bit mask of host protection capabilities used to register with the
5944 # SCSI mid-layer
5945 # - Only meaningful if BG is turned on (lpfc_enable_bg=1).
5946 # - Allows you to ultimately specify which profiles to use
5947 # - Default will result in registering capabilities for all profiles.
5948 # - SHOST_DIF_TYPE1_PROTECTION 1
5949 # HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection
5950 # - SHOST_DIX_TYPE0_PROTECTION 8
5951 # HBA supports DIX Type 0: Host to HBA protection only
5952 # - SHOST_DIX_TYPE1_PROTECTION 16
5953 # HBA supports DIX Type 1: Host to HBA Type 1 protection
5954 #
5955 */
5956 LPFC_ATTR(prot_mask,
5957 (SHOST_DIF_TYPE1_PROTECTION |
5958 SHOST_DIX_TYPE0_PROTECTION |
5959 SHOST_DIX_TYPE1_PROTECTION),
5960 0,
5961 (SHOST_DIF_TYPE1_PROTECTION |
5962 SHOST_DIX_TYPE0_PROTECTION |
5963 SHOST_DIX_TYPE1_PROTECTION),
5964 "T10-DIF host protection capabilities mask");
5965
5966 /*
5967 # lpfc_prot_guard: i
5968 # - Bit mask of protection guard types to register with the SCSI mid-layer
5969 # - Guard types are currently either 1) T10-DIF CRC 2) IP checksum
5970 # - Allows you to ultimately specify which profiles to use
5971 # - Default will result in registering capabilities for all guard types
5972 #
5973 */
5974 LPFC_ATTR(prot_guard,
5975 SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP,
5976 "T10-DIF host protection guard type");
5977
5978 /*
5979 * Delay initial NPort discovery when Clean Address bit is cleared in
5980 * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed.
5981 * This parameter can have value 0 or 1.
5982 * When this parameter is set to 0, no delay is added to the initial
5983 * discovery.
5984 * When this parameter is set to non-zero value, initial Nport discovery is
5985 * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC
5986 * accept and FCID/Fabric name/Fabric portname is changed.
5987 * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion
5988 * when Clean Address bit is cleared in FLOGI/FDISC
5989 * accept and FCID/Fabric name/Fabric portname is changed.
5990 * Default value is 0.
5991 */
5992 LPFC_ATTR(delay_discovery, 0, 0, 1,
5993 "Delay NPort discovery when Clean Address bit is cleared.");
5994
5995 /*
5996 * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
5997 * This value can be set to values between 64 and 4096. The default value
5998 * is 64, but may be increased to allow for larger Max I/O sizes. The scsi
5999 * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE).
6000 * Because of the additional overhead involved in setting up T10-DIF,
6001 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
6002 * and will be limited to 512 if BlockGuard is enabled under SLI3.
6003 */
6004 static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
6005 module_param(lpfc_sg_seg_cnt, uint, 0444);
6006 MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count");
6007
6008 /**
6009 * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes
6010 * configured for the adapter
6011 * @dev: class converted to a Scsi_host structure.
6012 * @attr: device attribute, not used.
6013 * @buf: on return contains a string with the list sizes
6014 *
6015 * Returns: size of formatted string.
6016 **/
6017 static ssize_t
lpfc_sg_seg_cnt_show(struct device * dev,struct device_attribute * attr,char * buf)6018 lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
6019 char *buf)
6020 {
6021 struct Scsi_Host *shost = class_to_shost(dev);
6022 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6023 struct lpfc_hba *phba = vport->phba;
6024 int len;
6025
6026 len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
6027 phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
6028
6029 len += scnprintf(buf + len, PAGE_SIZE - len,
6030 "Cfg: %d SCSI: %d NVME: %d\n",
6031 phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
6032 phba->cfg_nvme_seg_cnt);
6033 return len;
6034 }
6035
6036 static DEVICE_ATTR_RO(lpfc_sg_seg_cnt);
6037
6038 /**
6039 * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value
6040 * @phba: lpfc_hba pointer.
6041 * @val: contains the initial value
6042 *
6043 * Description:
6044 * Validates the initial value is within range and assigns it to the
6045 * adapter. If not in range, an error message is posted and the
6046 * default value is assigned.
6047 *
6048 * Returns:
6049 * zero if value is in range and is set
6050 * -EINVAL if value was out of range
6051 **/
6052 static int
lpfc_sg_seg_cnt_init(struct lpfc_hba * phba,int val)6053 lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
6054 {
6055 if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) {
6056 phba->cfg_sg_seg_cnt = val;
6057 return 0;
6058 }
6059 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6060 "0409 "LPFC_DRIVER_NAME"_sg_seg_cnt attribute cannot "
6061 "be set to %d, allowed range is [%d, %d]\n",
6062 val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
6063 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
6064 return -EINVAL;
6065 }
6066
6067 /*
6068 * lpfc_enable_mds_diags: Enable MDS Diagnostics
6069 * 0 = MDS Diagnostics disabled (default)
6070 * 1 = MDS Diagnostics enabled
6071 * Value range is [0,1]. Default value is 0.
6072 */
6073 LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
6074
6075 /*
6076 * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
6077 * 0 = Disable firmware logging (default)
6078 * [1-4] = Multiple of 1/4th Mb of host memory for FW logging
6079 * Value range [0..4]. Default value is 0
6080 */
6081 LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
6082 lpfc_param_show(ras_fwlog_buffsize);
6083
6084 static ssize_t
lpfc_ras_fwlog_buffsize_set(struct lpfc_hba * phba,uint val)6085 lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val)
6086 {
6087 int ret = 0;
6088 enum ras_state state;
6089
6090 if (!lpfc_rangecheck(val, 0, 4))
6091 return -EINVAL;
6092
6093 if (phba->cfg_ras_fwlog_buffsize == val)
6094 return 0;
6095
6096 if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
6097 return -EINVAL;
6098
6099 spin_lock_irq(&phba->hbalock);
6100 state = phba->ras_fwlog.state;
6101 spin_unlock_irq(&phba->hbalock);
6102
6103 if (state == REG_INPROGRESS) {
6104 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
6105 "registration is in progress\n");
6106 return -EBUSY;
6107 }
6108
6109 /* For disable logging: stop the logs and free the DMA.
6110 * For ras_fwlog_buffsize size change we still need to free and
6111 * reallocate the DMA in lpfc_sli4_ras_fwlog_init.
6112 */
6113 phba->cfg_ras_fwlog_buffsize = val;
6114 if (state == ACTIVE) {
6115 lpfc_ras_stop_fwlog(phba);
6116 lpfc_sli4_ras_dma_free(phba);
6117 }
6118
6119 lpfc_sli4_ras_init(phba);
6120 if (phba->ras_fwlog.ras_enabled)
6121 ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6122 LPFC_RAS_ENABLE_LOGGING);
6123 return ret;
6124 }
6125
6126 lpfc_param_store(ras_fwlog_buffsize);
6127 static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize);
6128
6129 /*
6130 * lpfc_ras_fwlog_level: Firmware logging verbosity level
6131 * Valid only if firmware logging is enabled
6132 * 0(Least Verbosity) 4 (most verbosity)
6133 * Value range is [0..4]. Default value is 0
6134 */
6135 LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
6136
6137 /*
6138 * lpfc_ras_fwlog_func: Firmware logging enabled on function number
6139 * Default function which has RAS support : 0
6140 * Value Range is [0..7].
6141 * FW logging is a global action and enablement is via a specific
6142 * port.
6143 */
6144 LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
6145
6146 /*
6147 * lpfc_enable_bbcr: Enable BB Credit Recovery
6148 * 0 = BB Credit Recovery disabled
6149 * 1 = BB Credit Recovery enabled (default)
6150 * Value range is [0,1]. Default value is 1.
6151 */
6152 LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
6153
6154 /*
6155 * lpfc_enable_dpp: Enable DPP on G7
6156 * 0 = DPP on G7 disabled
6157 * 1 = DPP on G7 enabled (default)
6158 * Value range is [0,1]. Default value is 1.
6159 */
6160 LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push");
6161
6162 struct device_attribute *lpfc_hba_attrs[] = {
6163 &dev_attr_nvme_info,
6164 &dev_attr_scsi_stat,
6165 &dev_attr_bg_info,
6166 &dev_attr_bg_guard_err,
6167 &dev_attr_bg_apptag_err,
6168 &dev_attr_bg_reftag_err,
6169 &dev_attr_info,
6170 &dev_attr_serialnum,
6171 &dev_attr_modeldesc,
6172 &dev_attr_modelname,
6173 &dev_attr_programtype,
6174 &dev_attr_portnum,
6175 &dev_attr_fwrev,
6176 &dev_attr_hdw,
6177 &dev_attr_option_rom_version,
6178 &dev_attr_link_state,
6179 &dev_attr_num_discovered_ports,
6180 &dev_attr_menlo_mgmt_mode,
6181 &dev_attr_lpfc_drvr_version,
6182 &dev_attr_lpfc_enable_fip,
6183 &dev_attr_lpfc_temp_sensor,
6184 &dev_attr_lpfc_log_verbose,
6185 &dev_attr_lpfc_lun_queue_depth,
6186 &dev_attr_lpfc_tgt_queue_depth,
6187 &dev_attr_lpfc_hba_queue_depth,
6188 &dev_attr_lpfc_peer_port_login,
6189 &dev_attr_lpfc_nodev_tmo,
6190 &dev_attr_lpfc_devloss_tmo,
6191 &dev_attr_lpfc_enable_fc4_type,
6192 &dev_attr_lpfc_fcp_class,
6193 &dev_attr_lpfc_use_adisc,
6194 &dev_attr_lpfc_first_burst_size,
6195 &dev_attr_lpfc_ack0,
6196 &dev_attr_lpfc_xri_rebalancing,
6197 &dev_attr_lpfc_topology,
6198 &dev_attr_lpfc_scan_down,
6199 &dev_attr_lpfc_link_speed,
6200 &dev_attr_lpfc_fcp_io_sched,
6201 &dev_attr_lpfc_ns_query,
6202 &dev_attr_lpfc_fcp2_no_tgt_reset,
6203 &dev_attr_lpfc_cr_delay,
6204 &dev_attr_lpfc_cr_count,
6205 &dev_attr_lpfc_multi_ring_support,
6206 &dev_attr_lpfc_multi_ring_rctl,
6207 &dev_attr_lpfc_multi_ring_type,
6208 &dev_attr_lpfc_fdmi_on,
6209 &dev_attr_lpfc_enable_SmartSAN,
6210 &dev_attr_lpfc_max_luns,
6211 &dev_attr_lpfc_enable_npiv,
6212 &dev_attr_lpfc_fcf_failover_policy,
6213 &dev_attr_lpfc_enable_rrq,
6214 &dev_attr_nport_evt_cnt,
6215 &dev_attr_board_mode,
6216 &dev_attr_max_vpi,
6217 &dev_attr_used_vpi,
6218 &dev_attr_max_rpi,
6219 &dev_attr_used_rpi,
6220 &dev_attr_max_xri,
6221 &dev_attr_used_xri,
6222 &dev_attr_npiv_info,
6223 &dev_attr_issue_reset,
6224 &dev_attr_lpfc_poll,
6225 &dev_attr_lpfc_poll_tmo,
6226 &dev_attr_lpfc_task_mgmt_tmo,
6227 &dev_attr_lpfc_use_msi,
6228 &dev_attr_lpfc_nvme_oas,
6229 &dev_attr_lpfc_nvme_embed_cmd,
6230 &dev_attr_lpfc_fcp_imax,
6231 &dev_attr_lpfc_force_rscn,
6232 &dev_attr_lpfc_cq_poll_threshold,
6233 &dev_attr_lpfc_cq_max_proc_limit,
6234 &dev_attr_lpfc_fcp_cpu_map,
6235 &dev_attr_lpfc_fcp_mq_threshold,
6236 &dev_attr_lpfc_hdw_queue,
6237 &dev_attr_lpfc_irq_chann,
6238 &dev_attr_lpfc_suppress_rsp,
6239 &dev_attr_lpfc_nvmet_mrq,
6240 &dev_attr_lpfc_nvmet_mrq_post,
6241 &dev_attr_lpfc_nvme_enable_fb,
6242 &dev_attr_lpfc_nvmet_fb_size,
6243 &dev_attr_lpfc_enable_bg,
6244 &dev_attr_lpfc_soft_wwnn,
6245 &dev_attr_lpfc_soft_wwpn,
6246 &dev_attr_lpfc_soft_wwn_enable,
6247 &dev_attr_lpfc_enable_hba_reset,
6248 &dev_attr_lpfc_enable_hba_heartbeat,
6249 &dev_attr_lpfc_EnableXLane,
6250 &dev_attr_lpfc_XLanePriority,
6251 &dev_attr_lpfc_xlane_lun,
6252 &dev_attr_lpfc_xlane_tgt,
6253 &dev_attr_lpfc_xlane_vpt,
6254 &dev_attr_lpfc_xlane_lun_state,
6255 &dev_attr_lpfc_xlane_lun_status,
6256 &dev_attr_lpfc_xlane_priority,
6257 &dev_attr_lpfc_sg_seg_cnt,
6258 &dev_attr_lpfc_max_scsicmpl_time,
6259 &dev_attr_lpfc_stat_data_ctrl,
6260 &dev_attr_lpfc_aer_support,
6261 &dev_attr_lpfc_aer_state_cleanup,
6262 &dev_attr_lpfc_sriov_nr_virtfn,
6263 &dev_attr_lpfc_req_fw_upgrade,
6264 &dev_attr_lpfc_suppress_link_up,
6265 &dev_attr_iocb_hw,
6266 &dev_attr_pls,
6267 &dev_attr_pt,
6268 &dev_attr_txq_hw,
6269 &dev_attr_txcmplq_hw,
6270 &dev_attr_lpfc_sriov_hw_max_virtfn,
6271 &dev_attr_protocol,
6272 &dev_attr_lpfc_xlane_supported,
6273 &dev_attr_lpfc_enable_mds_diags,
6274 &dev_attr_lpfc_ras_fwlog_buffsize,
6275 &dev_attr_lpfc_ras_fwlog_level,
6276 &dev_attr_lpfc_ras_fwlog_func,
6277 &dev_attr_lpfc_enable_bbcr,
6278 &dev_attr_lpfc_enable_dpp,
6279 NULL,
6280 };
6281
6282 struct device_attribute *lpfc_vport_attrs[] = {
6283 &dev_attr_info,
6284 &dev_attr_link_state,
6285 &dev_attr_num_discovered_ports,
6286 &dev_attr_lpfc_drvr_version,
6287 &dev_attr_lpfc_log_verbose,
6288 &dev_attr_lpfc_lun_queue_depth,
6289 &dev_attr_lpfc_tgt_queue_depth,
6290 &dev_attr_lpfc_nodev_tmo,
6291 &dev_attr_lpfc_devloss_tmo,
6292 &dev_attr_lpfc_hba_queue_depth,
6293 &dev_attr_lpfc_peer_port_login,
6294 &dev_attr_lpfc_restrict_login,
6295 &dev_attr_lpfc_fcp_class,
6296 &dev_attr_lpfc_use_adisc,
6297 &dev_attr_lpfc_first_burst_size,
6298 &dev_attr_lpfc_max_luns,
6299 &dev_attr_nport_evt_cnt,
6300 &dev_attr_npiv_info,
6301 &dev_attr_lpfc_enable_da_id,
6302 &dev_attr_lpfc_max_scsicmpl_time,
6303 &dev_attr_lpfc_stat_data_ctrl,
6304 &dev_attr_lpfc_static_vport,
6305 NULL,
6306 };
6307
6308 /**
6309 * sysfs_ctlreg_write - Write method for writing to ctlreg
6310 * @filp: open sysfs file
6311 * @kobj: kernel kobject that contains the kernel class device.
6312 * @bin_attr: kernel attributes passed to us.
6313 * @buf: contains the data to be written to the adapter IOREG space.
6314 * @off: offset into buffer to beginning of data.
6315 * @count: bytes to transfer.
6316 *
6317 * Description:
6318 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
6319 * Uses the adapter io control registers to send buf contents to the adapter.
6320 *
6321 * Returns:
6322 * -ERANGE off and count combo out of range
6323 * -EINVAL off, count or buff address invalid
6324 * -EPERM adapter is offline
6325 * value of count, buf contents written
6326 **/
6327 static ssize_t
sysfs_ctlreg_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6328 sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
6329 struct bin_attribute *bin_attr,
6330 char *buf, loff_t off, size_t count)
6331 {
6332 size_t buf_off;
6333 struct device *dev = container_of(kobj, struct device, kobj);
6334 struct Scsi_Host *shost = class_to_shost(dev);
6335 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6336 struct lpfc_hba *phba = vport->phba;
6337
6338 if (phba->sli_rev >= LPFC_SLI_REV4)
6339 return -EPERM;
6340
6341 if ((off + count) > FF_REG_AREA_SIZE)
6342 return -ERANGE;
6343
6344 if (count <= LPFC_REG_WRITE_KEY_SIZE)
6345 return 0;
6346
6347 if (off % 4 || count % 4 || (unsigned long)buf % 4)
6348 return -EINVAL;
6349
6350 /* This is to protect HBA registers from accidental writes. */
6351 if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
6352 return -EINVAL;
6353
6354 if (!(vport->fc_flag & FC_OFFLINE_MODE))
6355 return -EPERM;
6356
6357 spin_lock_irq(&phba->hbalock);
6358 for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
6359 buf_off += sizeof(uint32_t))
6360 writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
6361 phba->ctrl_regs_memmap_p + off + buf_off);
6362
6363 spin_unlock_irq(&phba->hbalock);
6364
6365 return count;
6366 }
6367
6368 /**
6369 * sysfs_ctlreg_read - Read method for reading from ctlreg
6370 * @filp: open sysfs file
6371 * @kobj: kernel kobject that contains the kernel class device.
6372 * @bin_attr: kernel attributes passed to us.
6373 * @buf: if successful contains the data from the adapter IOREG space.
6374 * @off: offset into buffer to beginning of data.
6375 * @count: bytes to transfer.
6376 *
6377 * Description:
6378 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
6379 * Uses the adapter io control registers to read data into buf.
6380 *
6381 * Returns:
6382 * -ERANGE off and count combo out of range
6383 * -EINVAL off, count or buff address invalid
6384 * value of count, buf contents read
6385 **/
6386 static ssize_t
sysfs_ctlreg_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6387 sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
6388 struct bin_attribute *bin_attr,
6389 char *buf, loff_t off, size_t count)
6390 {
6391 size_t buf_off;
6392 uint32_t * tmp_ptr;
6393 struct device *dev = container_of(kobj, struct device, kobj);
6394 struct Scsi_Host *shost = class_to_shost(dev);
6395 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6396 struct lpfc_hba *phba = vport->phba;
6397
6398 if (phba->sli_rev >= LPFC_SLI_REV4)
6399 return -EPERM;
6400
6401 if (off > FF_REG_AREA_SIZE)
6402 return -ERANGE;
6403
6404 if ((off + count) > FF_REG_AREA_SIZE)
6405 count = FF_REG_AREA_SIZE - off;
6406
6407 if (count == 0) return 0;
6408
6409 if (off % 4 || count % 4 || (unsigned long)buf % 4)
6410 return -EINVAL;
6411
6412 spin_lock_irq(&phba->hbalock);
6413
6414 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
6415 tmp_ptr = (uint32_t *)(buf + buf_off);
6416 *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
6417 }
6418
6419 spin_unlock_irq(&phba->hbalock);
6420
6421 return count;
6422 }
6423
6424 static struct bin_attribute sysfs_ctlreg_attr = {
6425 .attr = {
6426 .name = "ctlreg",
6427 .mode = S_IRUSR | S_IWUSR,
6428 },
6429 .size = 256,
6430 .read = sysfs_ctlreg_read,
6431 .write = sysfs_ctlreg_write,
6432 };
6433
6434 /**
6435 * sysfs_mbox_write - Write method for writing information via mbox
6436 * @filp: open sysfs file
6437 * @kobj: kernel kobject that contains the kernel class device.
6438 * @bin_attr: kernel attributes passed to us.
6439 * @buf: contains the data to be written to sysfs mbox.
6440 * @off: offset into buffer to beginning of data.
6441 * @count: bytes to transfer.
6442 *
6443 * Description:
6444 * Deprecated function. All mailbox access from user space is performed via the
6445 * bsg interface.
6446 *
6447 * Returns:
6448 * -EPERM operation not permitted
6449 **/
6450 static ssize_t
sysfs_mbox_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6451 sysfs_mbox_write(struct file *filp, struct kobject *kobj,
6452 struct bin_attribute *bin_attr,
6453 char *buf, loff_t off, size_t count)
6454 {
6455 return -EPERM;
6456 }
6457
6458 /**
6459 * sysfs_mbox_read - Read method for reading information via mbox
6460 * @filp: open sysfs file
6461 * @kobj: kernel kobject that contains the kernel class device.
6462 * @bin_attr: kernel attributes passed to us.
6463 * @buf: contains the data to be read from sysfs mbox.
6464 * @off: offset into buffer to beginning of data.
6465 * @count: bytes to transfer.
6466 *
6467 * Description:
6468 * Deprecated function. All mailbox access from user space is performed via the
6469 * bsg interface.
6470 *
6471 * Returns:
6472 * -EPERM operation not permitted
6473 **/
6474 static ssize_t
sysfs_mbox_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6475 sysfs_mbox_read(struct file *filp, struct kobject *kobj,
6476 struct bin_attribute *bin_attr,
6477 char *buf, loff_t off, size_t count)
6478 {
6479 return -EPERM;
6480 }
6481
6482 static struct bin_attribute sysfs_mbox_attr = {
6483 .attr = {
6484 .name = "mbox",
6485 .mode = S_IRUSR | S_IWUSR,
6486 },
6487 .size = MAILBOX_SYSFS_MAX,
6488 .read = sysfs_mbox_read,
6489 .write = sysfs_mbox_write,
6490 };
6491
6492 /**
6493 * lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries
6494 * @vport: address of lpfc vport structure.
6495 *
6496 * Return codes:
6497 * zero on success
6498 * error return code from sysfs_create_bin_file()
6499 **/
6500 int
lpfc_alloc_sysfs_attr(struct lpfc_vport * vport)6501 lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
6502 {
6503 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6504 int error;
6505
6506 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6507 &sysfs_drvr_stat_data_attr);
6508
6509 /* Virtual ports do not need ctrl_reg and mbox */
6510 if (error || vport->port_type == LPFC_NPIV_PORT)
6511 goto out;
6512
6513 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6514 &sysfs_ctlreg_attr);
6515 if (error)
6516 goto out_remove_stat_attr;
6517
6518 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6519 &sysfs_mbox_attr);
6520 if (error)
6521 goto out_remove_ctlreg_attr;
6522
6523 return 0;
6524 out_remove_ctlreg_attr:
6525 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6526 out_remove_stat_attr:
6527 sysfs_remove_bin_file(&shost->shost_dev.kobj,
6528 &sysfs_drvr_stat_data_attr);
6529 out:
6530 return error;
6531 }
6532
6533 /**
6534 * lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries
6535 * @vport: address of lpfc vport structure.
6536 **/
6537 void
lpfc_free_sysfs_attr(struct lpfc_vport * vport)6538 lpfc_free_sysfs_attr(struct lpfc_vport *vport)
6539 {
6540 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6541 sysfs_remove_bin_file(&shost->shost_dev.kobj,
6542 &sysfs_drvr_stat_data_attr);
6543 /* Virtual ports do not need ctrl_reg and mbox */
6544 if (vport->port_type == LPFC_NPIV_PORT)
6545 return;
6546 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
6547 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6548 }
6549
6550 /*
6551 * Dynamic FC Host Attributes Support
6552 */
6553
6554 /**
6555 * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host
6556 * @shost: kernel scsi host pointer.
6557 **/
6558 static void
lpfc_get_host_symbolic_name(struct Scsi_Host * shost)6559 lpfc_get_host_symbolic_name(struct Scsi_Host *shost)
6560 {
6561 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6562
6563 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
6564 sizeof fc_host_symbolic_name(shost));
6565 }
6566
6567 /**
6568 * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id
6569 * @shost: kernel scsi host pointer.
6570 **/
6571 static void
lpfc_get_host_port_id(struct Scsi_Host * shost)6572 lpfc_get_host_port_id(struct Scsi_Host *shost)
6573 {
6574 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6575
6576 /* note: fc_myDID already in cpu endianness */
6577 fc_host_port_id(shost) = vport->fc_myDID;
6578 }
6579
6580 /**
6581 * lpfc_get_host_port_type - Set the value of the scsi host port type
6582 * @shost: kernel scsi host pointer.
6583 **/
6584 static void
lpfc_get_host_port_type(struct Scsi_Host * shost)6585 lpfc_get_host_port_type(struct Scsi_Host *shost)
6586 {
6587 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6588 struct lpfc_hba *phba = vport->phba;
6589
6590 spin_lock_irq(shost->host_lock);
6591
6592 if (vport->port_type == LPFC_NPIV_PORT) {
6593 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
6594 } else if (lpfc_is_link_up(phba)) {
6595 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6596 if (vport->fc_flag & FC_PUBLIC_LOOP)
6597 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
6598 else
6599 fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
6600 } else {
6601 if (vport->fc_flag & FC_FABRIC)
6602 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
6603 else
6604 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
6605 }
6606 } else
6607 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
6608
6609 spin_unlock_irq(shost->host_lock);
6610 }
6611
6612 /**
6613 * lpfc_get_host_port_state - Set the value of the scsi host port state
6614 * @shost: kernel scsi host pointer.
6615 **/
6616 static void
lpfc_get_host_port_state(struct Scsi_Host * shost)6617 lpfc_get_host_port_state(struct Scsi_Host *shost)
6618 {
6619 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6620 struct lpfc_hba *phba = vport->phba;
6621
6622 spin_lock_irq(shost->host_lock);
6623
6624 if (vport->fc_flag & FC_OFFLINE_MODE)
6625 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
6626 else {
6627 switch (phba->link_state) {
6628 case LPFC_LINK_UNKNOWN:
6629 case LPFC_LINK_DOWN:
6630 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
6631 break;
6632 case LPFC_LINK_UP:
6633 case LPFC_CLEAR_LA:
6634 case LPFC_HBA_READY:
6635 /* Links up, reports port state accordingly */
6636 if (vport->port_state < LPFC_VPORT_READY)
6637 fc_host_port_state(shost) =
6638 FC_PORTSTATE_BYPASSED;
6639 else
6640 fc_host_port_state(shost) =
6641 FC_PORTSTATE_ONLINE;
6642 break;
6643 case LPFC_HBA_ERROR:
6644 fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
6645 break;
6646 default:
6647 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
6648 break;
6649 }
6650 }
6651
6652 spin_unlock_irq(shost->host_lock);
6653 }
6654
6655 /**
6656 * lpfc_get_host_speed - Set the value of the scsi host speed
6657 * @shost: kernel scsi host pointer.
6658 **/
6659 static void
lpfc_get_host_speed(struct Scsi_Host * shost)6660 lpfc_get_host_speed(struct Scsi_Host *shost)
6661 {
6662 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6663 struct lpfc_hba *phba = vport->phba;
6664
6665 spin_lock_irq(shost->host_lock);
6666
6667 if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) {
6668 switch(phba->fc_linkspeed) {
6669 case LPFC_LINK_SPEED_1GHZ:
6670 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6671 break;
6672 case LPFC_LINK_SPEED_2GHZ:
6673 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
6674 break;
6675 case LPFC_LINK_SPEED_4GHZ:
6676 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
6677 break;
6678 case LPFC_LINK_SPEED_8GHZ:
6679 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
6680 break;
6681 case LPFC_LINK_SPEED_10GHZ:
6682 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6683 break;
6684 case LPFC_LINK_SPEED_16GHZ:
6685 fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
6686 break;
6687 case LPFC_LINK_SPEED_32GHZ:
6688 fc_host_speed(shost) = FC_PORTSPEED_32GBIT;
6689 break;
6690 case LPFC_LINK_SPEED_64GHZ:
6691 fc_host_speed(shost) = FC_PORTSPEED_64GBIT;
6692 break;
6693 case LPFC_LINK_SPEED_128GHZ:
6694 fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
6695 break;
6696 default:
6697 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6698 break;
6699 }
6700 } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
6701 switch (phba->fc_linkspeed) {
6702 case LPFC_ASYNC_LINK_SPEED_1GBPS:
6703 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6704 break;
6705 case LPFC_ASYNC_LINK_SPEED_10GBPS:
6706 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6707 break;
6708 case LPFC_ASYNC_LINK_SPEED_20GBPS:
6709 fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
6710 break;
6711 case LPFC_ASYNC_LINK_SPEED_25GBPS:
6712 fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
6713 break;
6714 case LPFC_ASYNC_LINK_SPEED_40GBPS:
6715 fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
6716 break;
6717 case LPFC_ASYNC_LINK_SPEED_100GBPS:
6718 fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
6719 break;
6720 default:
6721 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6722 break;
6723 }
6724 } else
6725 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6726
6727 spin_unlock_irq(shost->host_lock);
6728 }
6729
6730 /**
6731 * lpfc_get_host_fabric_name - Set the value of the scsi host fabric name
6732 * @shost: kernel scsi host pointer.
6733 **/
6734 static void
lpfc_get_host_fabric_name(struct Scsi_Host * shost)6735 lpfc_get_host_fabric_name (struct Scsi_Host *shost)
6736 {
6737 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6738 struct lpfc_hba *phba = vport->phba;
6739 u64 node_name;
6740
6741 spin_lock_irq(shost->host_lock);
6742
6743 if ((vport->port_state > LPFC_FLOGI) &&
6744 ((vport->fc_flag & FC_FABRIC) ||
6745 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
6746 (vport->fc_flag & FC_PUBLIC_LOOP))))
6747 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
6748 else
6749 /* fabric is local port if there is no F/FL_Port */
6750 node_name = 0;
6751
6752 spin_unlock_irq(shost->host_lock);
6753
6754 fc_host_fabric_name(shost) = node_name;
6755 }
6756
6757 /**
6758 * lpfc_get_stats - Return statistical information about the adapter
6759 * @shost: kernel scsi host pointer.
6760 *
6761 * Notes:
6762 * NULL on error for link down, no mbox pool, sli2 active,
6763 * management not allowed, memory allocation error, or mbox error.
6764 *
6765 * Returns:
6766 * NULL for error
6767 * address of the adapter host statistics
6768 **/
6769 static struct fc_host_statistics *
lpfc_get_stats(struct Scsi_Host * shost)6770 lpfc_get_stats(struct Scsi_Host *shost)
6771 {
6772 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6773 struct lpfc_hba *phba = vport->phba;
6774 struct lpfc_sli *psli = &phba->sli;
6775 struct fc_host_statistics *hs = &phba->link_stats;
6776 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
6777 LPFC_MBOXQ_t *pmboxq;
6778 MAILBOX_t *pmb;
6779 int rc = 0;
6780
6781 /*
6782 * prevent udev from issuing mailbox commands until the port is
6783 * configured.
6784 */
6785 if (phba->link_state < LPFC_LINK_DOWN ||
6786 !phba->mbox_mem_pool ||
6787 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
6788 return NULL;
6789
6790 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
6791 return NULL;
6792
6793 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6794 if (!pmboxq)
6795 return NULL;
6796 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
6797
6798 pmb = &pmboxq->u.mb;
6799 pmb->mbxCommand = MBX_READ_STATUS;
6800 pmb->mbxOwner = OWN_HOST;
6801 pmboxq->ctx_buf = NULL;
6802 pmboxq->vport = vport;
6803
6804 if (vport->fc_flag & FC_OFFLINE_MODE) {
6805 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6806 if (rc != MBX_SUCCESS) {
6807 mempool_free(pmboxq, phba->mbox_mem_pool);
6808 return NULL;
6809 }
6810 } else {
6811 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6812 if (rc != MBX_SUCCESS) {
6813 if (rc != MBX_TIMEOUT)
6814 mempool_free(pmboxq, phba->mbox_mem_pool);
6815 return NULL;
6816 }
6817 }
6818
6819 memset(hs, 0, sizeof (struct fc_host_statistics));
6820
6821 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
6822 /*
6823 * The MBX_READ_STATUS returns tx_k_bytes which has to
6824 * converted to words
6825 */
6826 hs->tx_words = (uint64_t)
6827 ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
6828 * (uint64_t)256);
6829 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
6830 hs->rx_words = (uint64_t)
6831 ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
6832 * (uint64_t)256);
6833
6834 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
6835 pmb->mbxCommand = MBX_READ_LNK_STAT;
6836 pmb->mbxOwner = OWN_HOST;
6837 pmboxq->ctx_buf = NULL;
6838 pmboxq->vport = vport;
6839
6840 if (vport->fc_flag & FC_OFFLINE_MODE) {
6841 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6842 if (rc != MBX_SUCCESS) {
6843 mempool_free(pmboxq, phba->mbox_mem_pool);
6844 return NULL;
6845 }
6846 } else {
6847 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6848 if (rc != MBX_SUCCESS) {
6849 if (rc != MBX_TIMEOUT)
6850 mempool_free(pmboxq, phba->mbox_mem_pool);
6851 return NULL;
6852 }
6853 }
6854
6855 hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
6856 hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
6857 hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
6858 hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
6859 hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
6860 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
6861 hs->error_frames = pmb->un.varRdLnk.crcCnt;
6862
6863 hs->link_failure_count -= lso->link_failure_count;
6864 hs->loss_of_sync_count -= lso->loss_of_sync_count;
6865 hs->loss_of_signal_count -= lso->loss_of_signal_count;
6866 hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
6867 hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
6868 hs->invalid_crc_count -= lso->invalid_crc_count;
6869 hs->error_frames -= lso->error_frames;
6870
6871 if (phba->hba_flag & HBA_FCOE_MODE) {
6872 hs->lip_count = -1;
6873 hs->nos_count = (phba->link_events >> 1);
6874 hs->nos_count -= lso->link_events;
6875 } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6876 hs->lip_count = (phba->fc_eventTag >> 1);
6877 hs->lip_count -= lso->link_events;
6878 hs->nos_count = -1;
6879 } else {
6880 hs->lip_count = -1;
6881 hs->nos_count = (phba->fc_eventTag >> 1);
6882 hs->nos_count -= lso->link_events;
6883 }
6884
6885 hs->dumped_frames = -1;
6886
6887 hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start;
6888
6889 mempool_free(pmboxq, phba->mbox_mem_pool);
6890
6891 return hs;
6892 }
6893
6894 /**
6895 * lpfc_reset_stats - Copy the adapter link stats information
6896 * @shost: kernel scsi host pointer.
6897 **/
6898 static void
lpfc_reset_stats(struct Scsi_Host * shost)6899 lpfc_reset_stats(struct Scsi_Host *shost)
6900 {
6901 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6902 struct lpfc_hba *phba = vport->phba;
6903 struct lpfc_sli *psli = &phba->sli;
6904 struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets;
6905 LPFC_MBOXQ_t *pmboxq;
6906 MAILBOX_t *pmb;
6907 int rc = 0;
6908
6909 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
6910 return;
6911
6912 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6913 if (!pmboxq)
6914 return;
6915 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
6916
6917 pmb = &pmboxq->u.mb;
6918 pmb->mbxCommand = MBX_READ_STATUS;
6919 pmb->mbxOwner = OWN_HOST;
6920 pmb->un.varWords[0] = 0x1; /* reset request */
6921 pmboxq->ctx_buf = NULL;
6922 pmboxq->vport = vport;
6923
6924 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
6925 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
6926 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6927 if (rc != MBX_SUCCESS) {
6928 mempool_free(pmboxq, phba->mbox_mem_pool);
6929 return;
6930 }
6931 } else {
6932 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6933 if (rc != MBX_SUCCESS) {
6934 if (rc != MBX_TIMEOUT)
6935 mempool_free(pmboxq, phba->mbox_mem_pool);
6936 return;
6937 }
6938 }
6939
6940 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
6941 pmb->mbxCommand = MBX_READ_LNK_STAT;
6942 pmb->mbxOwner = OWN_HOST;
6943 pmboxq->ctx_buf = NULL;
6944 pmboxq->vport = vport;
6945
6946 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
6947 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
6948 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6949 if (rc != MBX_SUCCESS) {
6950 mempool_free(pmboxq, phba->mbox_mem_pool);
6951 return;
6952 }
6953 } else {
6954 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6955 if (rc != MBX_SUCCESS) {
6956 if (rc != MBX_TIMEOUT)
6957 mempool_free(pmboxq, phba->mbox_mem_pool);
6958 return;
6959 }
6960 }
6961
6962 lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
6963 lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
6964 lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
6965 lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
6966 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
6967 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
6968 lso->error_frames = pmb->un.varRdLnk.crcCnt;
6969 if (phba->hba_flag & HBA_FCOE_MODE)
6970 lso->link_events = (phba->link_events >> 1);
6971 else
6972 lso->link_events = (phba->fc_eventTag >> 1);
6973
6974 psli->stats_start = ktime_get_seconds();
6975
6976 mempool_free(pmboxq, phba->mbox_mem_pool);
6977
6978 return;
6979 }
6980
6981 /*
6982 * The LPFC driver treats linkdown handling as target loss events so there
6983 * are no sysfs handlers for link_down_tmo.
6984 */
6985
6986 /**
6987 * lpfc_get_node_by_target - Return the nodelist for a target
6988 * @starget: kernel scsi target pointer.
6989 *
6990 * Returns:
6991 * address of the node list if found
6992 * NULL target not found
6993 **/
6994 static struct lpfc_nodelist *
lpfc_get_node_by_target(struct scsi_target * starget)6995 lpfc_get_node_by_target(struct scsi_target *starget)
6996 {
6997 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
6998 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6999 struct lpfc_nodelist *ndlp;
7000
7001 spin_lock_irq(shost->host_lock);
7002 /* Search for this, mapped, target ID */
7003 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
7004 if (NLP_CHK_NODE_ACT(ndlp) &&
7005 ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
7006 starget->id == ndlp->nlp_sid) {
7007 spin_unlock_irq(shost->host_lock);
7008 return ndlp;
7009 }
7010 }
7011 spin_unlock_irq(shost->host_lock);
7012 return NULL;
7013 }
7014
7015 /**
7016 * lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1
7017 * @starget: kernel scsi target pointer.
7018 **/
7019 static void
lpfc_get_starget_port_id(struct scsi_target * starget)7020 lpfc_get_starget_port_id(struct scsi_target *starget)
7021 {
7022 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7023
7024 fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
7025 }
7026
7027 /**
7028 * lpfc_get_starget_node_name - Set the target node name
7029 * @starget: kernel scsi target pointer.
7030 *
7031 * Description: Set the target node name to the ndlp node name wwn or zero.
7032 **/
7033 static void
lpfc_get_starget_node_name(struct scsi_target * starget)7034 lpfc_get_starget_node_name(struct scsi_target *starget)
7035 {
7036 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7037
7038 fc_starget_node_name(starget) =
7039 ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
7040 }
7041
7042 /**
7043 * lpfc_get_starget_port_name - Set the target port name
7044 * @starget: kernel scsi target pointer.
7045 *
7046 * Description: set the target port name to the ndlp port name wwn or zero.
7047 **/
7048 static void
lpfc_get_starget_port_name(struct scsi_target * starget)7049 lpfc_get_starget_port_name(struct scsi_target *starget)
7050 {
7051 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7052
7053 fc_starget_port_name(starget) =
7054 ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
7055 }
7056
7057 /**
7058 * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo
7059 * @rport: fc rport address.
7060 * @timeout: new value for dev loss tmo.
7061 *
7062 * Description:
7063 * If timeout is non zero set the dev_loss_tmo to timeout, else set
7064 * dev_loss_tmo to one.
7065 **/
7066 static void
lpfc_set_rport_loss_tmo(struct fc_rport * rport,uint32_t timeout)7067 lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
7068 {
7069 struct lpfc_rport_data *rdata = rport->dd_data;
7070 struct lpfc_nodelist *ndlp = rdata->pnode;
7071 #if (IS_ENABLED(CONFIG_NVME_FC))
7072 struct lpfc_nvme_rport *nrport = NULL;
7073 #endif
7074
7075 if (timeout)
7076 rport->dev_loss_tmo = timeout;
7077 else
7078 rport->dev_loss_tmo = 1;
7079
7080 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
7081 dev_info(&rport->dev, "Cannot find remote node to "
7082 "set rport dev loss tmo, port_id x%x\n",
7083 rport->port_id);
7084 return;
7085 }
7086
7087 #if (IS_ENABLED(CONFIG_NVME_FC))
7088 nrport = lpfc_ndlp_get_nrport(ndlp);
7089
7090 if (nrport && nrport->remoteport)
7091 nvme_fc_set_remoteport_devloss(nrport->remoteport,
7092 rport->dev_loss_tmo);
7093 #endif
7094 }
7095
7096 /**
7097 * lpfc_rport_show_function - Return rport target information
7098 *
7099 * Description:
7100 * Macro that uses field to generate a function with the name lpfc_show_rport_
7101 *
7102 * lpfc_show_rport_##field: returns the bytes formatted in buf
7103 * @cdev: class converted to an fc_rport.
7104 * @buf: on return contains the target_field or zero.
7105 *
7106 * Returns: size of formatted string.
7107 **/
7108 #define lpfc_rport_show_function(field, format_string, sz, cast) \
7109 static ssize_t \
7110 lpfc_show_rport_##field (struct device *dev, \
7111 struct device_attribute *attr, \
7112 char *buf) \
7113 { \
7114 struct fc_rport *rport = transport_class_to_rport(dev); \
7115 struct lpfc_rport_data *rdata = rport->hostdata; \
7116 return scnprintf(buf, sz, format_string, \
7117 (rdata->target) ? cast rdata->target->field : 0); \
7118 }
7119
7120 #define lpfc_rport_rd_attr(field, format_string, sz) \
7121 lpfc_rport_show_function(field, format_string, sz, ) \
7122 static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
7123
7124 /**
7125 * lpfc_set_vport_symbolic_name - Set the vport's symbolic name
7126 * @fc_vport: The fc_vport who's symbolic name has been changed.
7127 *
7128 * Description:
7129 * This function is called by the transport after the @fc_vport's symbolic name
7130 * has been changed. This function re-registers the symbolic name with the
7131 * switch to propagate the change into the fabric if the vport is active.
7132 **/
7133 static void
lpfc_set_vport_symbolic_name(struct fc_vport * fc_vport)7134 lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
7135 {
7136 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
7137
7138 if (vport->port_state == LPFC_VPORT_READY)
7139 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
7140 }
7141
7142 /**
7143 * lpfc_hba_log_verbose_init - Set hba's log verbose level
7144 * @phba: Pointer to lpfc_hba struct.
7145 *
7146 * This function is called by the lpfc_get_cfgparam() routine to set the
7147 * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
7148 * log message according to the module's lpfc_log_verbose parameter setting
7149 * before hba port or vport created.
7150 **/
7151 static void
lpfc_hba_log_verbose_init(struct lpfc_hba * phba,uint32_t verbose)7152 lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
7153 {
7154 phba->cfg_log_verbose = verbose;
7155 }
7156
7157 struct fc_function_template lpfc_transport_functions = {
7158 /* fixed attributes the driver supports */
7159 .show_host_node_name = 1,
7160 .show_host_port_name = 1,
7161 .show_host_supported_classes = 1,
7162 .show_host_supported_fc4s = 1,
7163 .show_host_supported_speeds = 1,
7164 .show_host_maxframe_size = 1,
7165
7166 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
7167 .show_host_symbolic_name = 1,
7168
7169 /* dynamic attributes the driver supports */
7170 .get_host_port_id = lpfc_get_host_port_id,
7171 .show_host_port_id = 1,
7172
7173 .get_host_port_type = lpfc_get_host_port_type,
7174 .show_host_port_type = 1,
7175
7176 .get_host_port_state = lpfc_get_host_port_state,
7177 .show_host_port_state = 1,
7178
7179 /* active_fc4s is shown but doesn't change (thus no get function) */
7180 .show_host_active_fc4s = 1,
7181
7182 .get_host_speed = lpfc_get_host_speed,
7183 .show_host_speed = 1,
7184
7185 .get_host_fabric_name = lpfc_get_host_fabric_name,
7186 .show_host_fabric_name = 1,
7187
7188 /*
7189 * The LPFC driver treats linkdown handling as target loss events
7190 * so there are no sysfs handlers for link_down_tmo.
7191 */
7192
7193 .get_fc_host_stats = lpfc_get_stats,
7194 .reset_fc_host_stats = lpfc_reset_stats,
7195
7196 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
7197 .show_rport_maxframe_size = 1,
7198 .show_rport_supported_classes = 1,
7199
7200 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
7201 .show_rport_dev_loss_tmo = 1,
7202
7203 .get_starget_port_id = lpfc_get_starget_port_id,
7204 .show_starget_port_id = 1,
7205
7206 .get_starget_node_name = lpfc_get_starget_node_name,
7207 .show_starget_node_name = 1,
7208
7209 .get_starget_port_name = lpfc_get_starget_port_name,
7210 .show_starget_port_name = 1,
7211
7212 .issue_fc_host_lip = lpfc_issue_lip,
7213 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
7214 .terminate_rport_io = lpfc_terminate_rport_io,
7215
7216 .dd_fcvport_size = sizeof(struct lpfc_vport *),
7217
7218 .vport_disable = lpfc_vport_disable,
7219
7220 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
7221
7222 .bsg_request = lpfc_bsg_request,
7223 .bsg_timeout = lpfc_bsg_timeout,
7224 };
7225
7226 struct fc_function_template lpfc_vport_transport_functions = {
7227 /* fixed attributes the driver supports */
7228 .show_host_node_name = 1,
7229 .show_host_port_name = 1,
7230 .show_host_supported_classes = 1,
7231 .show_host_supported_fc4s = 1,
7232 .show_host_supported_speeds = 1,
7233 .show_host_maxframe_size = 1,
7234
7235 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
7236 .show_host_symbolic_name = 1,
7237
7238 /* dynamic attributes the driver supports */
7239 .get_host_port_id = lpfc_get_host_port_id,
7240 .show_host_port_id = 1,
7241
7242 .get_host_port_type = lpfc_get_host_port_type,
7243 .show_host_port_type = 1,
7244
7245 .get_host_port_state = lpfc_get_host_port_state,
7246 .show_host_port_state = 1,
7247
7248 /* active_fc4s is shown but doesn't change (thus no get function) */
7249 .show_host_active_fc4s = 1,
7250
7251 .get_host_speed = lpfc_get_host_speed,
7252 .show_host_speed = 1,
7253
7254 .get_host_fabric_name = lpfc_get_host_fabric_name,
7255 .show_host_fabric_name = 1,
7256
7257 /*
7258 * The LPFC driver treats linkdown handling as target loss events
7259 * so there are no sysfs handlers for link_down_tmo.
7260 */
7261
7262 .get_fc_host_stats = lpfc_get_stats,
7263 .reset_fc_host_stats = lpfc_reset_stats,
7264
7265 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
7266 .show_rport_maxframe_size = 1,
7267 .show_rport_supported_classes = 1,
7268
7269 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
7270 .show_rport_dev_loss_tmo = 1,
7271
7272 .get_starget_port_id = lpfc_get_starget_port_id,
7273 .show_starget_port_id = 1,
7274
7275 .get_starget_node_name = lpfc_get_starget_node_name,
7276 .show_starget_node_name = 1,
7277
7278 .get_starget_port_name = lpfc_get_starget_port_name,
7279 .show_starget_port_name = 1,
7280
7281 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
7282 .terminate_rport_io = lpfc_terminate_rport_io,
7283
7284 .vport_disable = lpfc_vport_disable,
7285
7286 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
7287 };
7288
7289 /**
7290 * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE
7291 * Mode
7292 * @phba: lpfc_hba pointer.
7293 **/
7294 static void
lpfc_get_hba_function_mode(struct lpfc_hba * phba)7295 lpfc_get_hba_function_mode(struct lpfc_hba *phba)
7296 {
7297 /* If the adapter supports FCoE mode */
7298 switch (phba->pcidev->device) {
7299 case PCI_DEVICE_ID_SKYHAWK:
7300 case PCI_DEVICE_ID_SKYHAWK_VF:
7301 case PCI_DEVICE_ID_LANCER_FCOE:
7302 case PCI_DEVICE_ID_LANCER_FCOE_VF:
7303 case PCI_DEVICE_ID_ZEPHYR_DCSP:
7304 case PCI_DEVICE_ID_HORNET:
7305 case PCI_DEVICE_ID_TIGERSHARK:
7306 case PCI_DEVICE_ID_TOMCAT:
7307 phba->hba_flag |= HBA_FCOE_MODE;
7308 break;
7309 default:
7310 /* for others, clear the flag */
7311 phba->hba_flag &= ~HBA_FCOE_MODE;
7312 }
7313 }
7314
7315 /**
7316 * lpfc_get_cfgparam - Used during probe_one to init the adapter structure
7317 * @phba: lpfc_hba pointer.
7318 **/
7319 void
lpfc_get_cfgparam(struct lpfc_hba * phba)7320 lpfc_get_cfgparam(struct lpfc_hba *phba)
7321 {
7322 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
7323 lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
7324 lpfc_ns_query_init(phba, lpfc_ns_query);
7325 lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
7326 lpfc_cr_delay_init(phba, lpfc_cr_delay);
7327 lpfc_cr_count_init(phba, lpfc_cr_count);
7328 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
7329 lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
7330 lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
7331 lpfc_ack0_init(phba, lpfc_ack0);
7332 lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing);
7333 lpfc_topology_init(phba, lpfc_topology);
7334 lpfc_link_speed_init(phba, lpfc_link_speed);
7335 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
7336 lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
7337 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
7338 lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
7339 lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
7340 lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
7341 lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
7342 lpfc_use_msi_init(phba, lpfc_use_msi);
7343 lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
7344 lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd);
7345 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
7346 lpfc_force_rscn_init(phba, lpfc_force_rscn);
7347 lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold);
7348 lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit);
7349 lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
7350 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
7351 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
7352
7353 lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
7354 if (phba->sli_rev != LPFC_SLI_REV4)
7355 phba->cfg_EnableXLane = 0;
7356 lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
7357
7358 memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
7359 memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
7360 phba->cfg_oas_lun_state = 0;
7361 phba->cfg_oas_lun_status = 0;
7362 phba->cfg_oas_flags = 0;
7363 phba->cfg_oas_priority = 0;
7364 lpfc_enable_bg_init(phba, lpfc_enable_bg);
7365 lpfc_prot_mask_init(phba, lpfc_prot_mask);
7366 lpfc_prot_guard_init(phba, lpfc_prot_guard);
7367 if (phba->sli_rev == LPFC_SLI_REV4)
7368 phba->cfg_poll = 0;
7369 else
7370 phba->cfg_poll = lpfc_poll;
7371
7372 /* Get the function mode */
7373 lpfc_get_hba_function_mode(phba);
7374
7375 /* BlockGuard allowed for FC only. */
7376 if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) {
7377 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7378 "0581 BlockGuard feature not supported\n");
7379 /* If set, clear the BlockGuard support param */
7380 phba->cfg_enable_bg = 0;
7381 } else if (phba->cfg_enable_bg) {
7382 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
7383 }
7384
7385 lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
7386
7387 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
7388 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
7389 lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
7390
7391 /* Initialize first burst. Target vs Initiator are different. */
7392 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
7393 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
7394 lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
7395 lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
7396 lpfc_irq_chann_init(phba, lpfc_irq_chann);
7397 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
7398 lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
7399
7400 if (phba->sli_rev != LPFC_SLI_REV4) {
7401 /* NVME only supported on SLI4 */
7402 phba->nvmet_support = 0;
7403 phba->cfg_nvmet_mrq = 0;
7404 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
7405 phba->cfg_enable_bbcr = 0;
7406 phba->cfg_xri_rebalancing = 0;
7407 } else {
7408 /* We MUST have FCP support */
7409 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
7410 phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
7411 }
7412
7413 phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1;
7414
7415 phba->cfg_enable_pbde = 0;
7416
7417 /* A value of 0 means use the number of CPUs found in the system */
7418 if (phba->cfg_hdw_queue == 0)
7419 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7420 if (phba->cfg_irq_chann == 0)
7421 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7422 if (phba->cfg_irq_chann > phba->cfg_hdw_queue)
7423 phba->cfg_irq_chann = phba->cfg_hdw_queue;
7424
7425 phba->cfg_soft_wwnn = 0L;
7426 phba->cfg_soft_wwpn = 0L;
7427 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
7428 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
7429 lpfc_aer_support_init(phba, lpfc_aer_support);
7430 lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
7431 lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
7432 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
7433 lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
7434 lpfc_sli_mode_init(phba, lpfc_sli_mode);
7435 lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
7436 lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
7437 lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
7438 lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
7439
7440 return;
7441 }
7442
7443 /**
7444 * lpfc_nvme_mod_param_dep - Adjust module parameter value based on
7445 * dependencies between protocols and roles.
7446 * @phba: lpfc_hba pointer.
7447 **/
7448 void
lpfc_nvme_mod_param_dep(struct lpfc_hba * phba)7449 lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
7450 {
7451 int logit = 0;
7452
7453 if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) {
7454 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7455 logit = 1;
7456 }
7457 if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) {
7458 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7459 logit = 1;
7460 }
7461 if (phba->cfg_irq_chann > phba->cfg_hdw_queue) {
7462 phba->cfg_irq_chann = phba->cfg_hdw_queue;
7463 logit = 1;
7464 }
7465 if (logit)
7466 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7467 "2006 Reducing Queues - CPU limitation: "
7468 "IRQ %d HDWQ %d\n",
7469 phba->cfg_irq_chann,
7470 phba->cfg_hdw_queue);
7471
7472 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
7473 phba->nvmet_support) {
7474 phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
7475
7476 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
7477 "6013 %s x%x fb_size x%x, fb_max x%x\n",
7478 "NVME Target PRLI ACC enable_fb ",
7479 phba->cfg_nvme_enable_fb,
7480 phba->cfg_nvmet_fb_size,
7481 LPFC_NVMET_FB_SZ_MAX);
7482
7483 if (phba->cfg_nvme_enable_fb == 0)
7484 phba->cfg_nvmet_fb_size = 0;
7485 else {
7486 if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX)
7487 phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
7488 }
7489
7490 if (!phba->cfg_nvmet_mrq)
7491 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
7492
7493 /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
7494 if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
7495 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
7496 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
7497 "6018 Adjust lpfc_nvmet_mrq to %d\n",
7498 phba->cfg_nvmet_mrq);
7499 }
7500 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
7501 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
7502
7503 } else {
7504 /* Not NVME Target mode. Turn off Target parameters. */
7505 phba->nvmet_support = 0;
7506 phba->cfg_nvmet_mrq = 0;
7507 phba->cfg_nvmet_fb_size = 0;
7508 }
7509 }
7510
7511 /**
7512 * lpfc_get_vport_cfgparam - Used during port create, init the vport structure
7513 * @vport: lpfc_vport pointer.
7514 **/
7515 void
lpfc_get_vport_cfgparam(struct lpfc_vport * vport)7516 lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
7517 {
7518 lpfc_log_verbose_init(vport, lpfc_log_verbose);
7519 lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth);
7520 lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth);
7521 lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo);
7522 lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo);
7523 lpfc_peer_port_login_init(vport, lpfc_peer_port_login);
7524 lpfc_restrict_login_init(vport, lpfc_restrict_login);
7525 lpfc_fcp_class_init(vport, lpfc_fcp_class);
7526 lpfc_use_adisc_init(vport, lpfc_use_adisc);
7527 lpfc_first_burst_size_init(vport, lpfc_first_burst_size);
7528 lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
7529 lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
7530 lpfc_max_luns_init(vport, lpfc_max_luns);
7531 lpfc_scan_down_init(vport, lpfc_scan_down);
7532 lpfc_enable_da_id_init(vport, lpfc_enable_da_id);
7533 return;
7534 }
7535