1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23
24 #include <linux/ctype.h>
25 #include <linux/delay.h>
26 #include <linux/pci.h>
27 #include <linux/interrupt.h>
28 #include <linux/module.h>
29 #include <linux/aer.h>
30 #include <linux/gfp.h>
31 #include <linux/kernel.h>
32
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_tcq.h>
37 #include <scsi/scsi_transport_fc.h>
38 #include <scsi/fc/fc_fs.h>
39
40 #include "lpfc_hw4.h"
41 #include "lpfc_hw.h"
42 #include "lpfc_sli.h"
43 #include "lpfc_sli4.h"
44 #include "lpfc_nl.h"
45 #include "lpfc_disc.h"
46 #include "lpfc.h"
47 #include "lpfc_scsi.h"
48 #include "lpfc_nvme.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_version.h"
51 #include "lpfc_compat.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_vport.h"
54 #include "lpfc_attr.h"
55
56 #define LPFC_DEF_DEVLOSS_TMO 30
57 #define LPFC_MIN_DEVLOSS_TMO 1
58 #define LPFC_MAX_DEVLOSS_TMO 255
59
60 #define LPFC_MAX_INFO_TMP_LEN 100
61 #define LPFC_INFO_MORE_STR "\nCould be more info...\n"
62 /*
63 * Write key size should be multiple of 4. If write key is changed
64 * make sure that library write key is also changed.
65 */
66 #define LPFC_REG_WRITE_KEY_SIZE 4
67 #define LPFC_REG_WRITE_KEY "EMLX"
68
69 const char *const trunk_errmsg[] = { /* map errcode */
70 "", /* There is no such error code at index 0*/
71 "link negotiated speed does not match existing"
72 " trunk - link was \"low\" speed",
73 "link negotiated speed does not match"
74 " existing trunk - link was \"middle\" speed",
75 "link negotiated speed does not match existing"
76 " trunk - link was \"high\" speed",
77 "Attached to non-trunking port - F_Port",
78 "Attached to non-trunking port - N_Port",
79 "FLOGI response timeout",
80 "non-FLOGI frame received",
81 "Invalid FLOGI response",
82 "Trunking initialization protocol",
83 "Trunk peer device mismatch",
84 };
85
86 /**
87 * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
88 * @incr: integer to convert.
89 * @hdw: ascii string holding converted integer plus a string terminator.
90 *
91 * Description:
92 * JEDEC Joint Electron Device Engineering Council.
93 * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii
94 * character string. The string is then terminated with a NULL in byte 9.
95 * Hex 0-9 becomes ascii '0' to '9'.
96 * Hex a-f becomes ascii '=' to 'B' capital B.
97 *
98 * Notes:
99 * Coded for 32 bit integers only.
100 **/
101 static void
lpfc_jedec_to_ascii(int incr,char hdw[])102 lpfc_jedec_to_ascii(int incr, char hdw[])
103 {
104 int i, j;
105 for (i = 0; i < 8; i++) {
106 j = (incr & 0xf);
107 if (j <= 9)
108 hdw[7 - i] = 0x30 + j;
109 else
110 hdw[7 - i] = 0x61 + j - 10;
111 incr = (incr >> 4);
112 }
113 hdw[8] = 0;
114 return;
115 }
116
117 static ssize_t
lpfc_cmf_info_show(struct device * dev,struct device_attribute * attr,char * buf)118 lpfc_cmf_info_show(struct device *dev, struct device_attribute *attr,
119 char *buf)
120 {
121 struct Scsi_Host *shost = class_to_shost(dev);
122 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
123 struct lpfc_hba *phba = vport->phba;
124 struct lpfc_cgn_info *cp = NULL;
125 struct lpfc_cgn_stat *cgs;
126 int len = 0;
127 int cpu;
128 u64 rcv, total;
129 char tmp[LPFC_MAX_INFO_TMP_LEN] = {0};
130
131 if (phba->cgn_i)
132 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
133
134 scnprintf(tmp, sizeof(tmp),
135 "Congestion Mgmt Info: E2Eattr %d Ver %d "
136 "CMF %d cnt %d\n",
137 phba->sli4_hba.pc_sli4_params.mi_ver,
138 cp ? cp->cgn_info_version : 0,
139 phba->sli4_hba.pc_sli4_params.cmf, phba->cmf_timer_cnt);
140
141 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
142 goto buffer_done;
143
144 if (!phba->sli4_hba.pc_sli4_params.cmf)
145 goto buffer_done;
146
147 switch (phba->cgn_init_reg_signal) {
148 case EDC_CG_SIG_WARN_ONLY:
149 scnprintf(tmp, sizeof(tmp),
150 "Register: Init: Signal:WARN ");
151 break;
152 case EDC_CG_SIG_WARN_ALARM:
153 scnprintf(tmp, sizeof(tmp),
154 "Register: Init: Signal:WARN|ALARM ");
155 break;
156 default:
157 scnprintf(tmp, sizeof(tmp),
158 "Register: Init: Signal:NONE ");
159 break;
160 }
161 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
162 goto buffer_done;
163
164 switch (phba->cgn_init_reg_fpin) {
165 case LPFC_CGN_FPIN_WARN:
166 scnprintf(tmp, sizeof(tmp),
167 "FPIN:WARN\n");
168 break;
169 case LPFC_CGN_FPIN_ALARM:
170 scnprintf(tmp, sizeof(tmp),
171 "FPIN:ALARM\n");
172 break;
173 case LPFC_CGN_FPIN_BOTH:
174 scnprintf(tmp, sizeof(tmp),
175 "FPIN:WARN|ALARM\n");
176 break;
177 default:
178 scnprintf(tmp, sizeof(tmp),
179 "FPIN:NONE\n");
180 break;
181 }
182 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
183 goto buffer_done;
184
185 switch (phba->cgn_reg_signal) {
186 case EDC_CG_SIG_WARN_ONLY:
187 scnprintf(tmp, sizeof(tmp),
188 " Current: Signal:WARN ");
189 break;
190 case EDC_CG_SIG_WARN_ALARM:
191 scnprintf(tmp, sizeof(tmp),
192 " Current: Signal:WARN|ALARM ");
193 break;
194 default:
195 scnprintf(tmp, sizeof(tmp),
196 " Current: Signal:NONE ");
197 break;
198 }
199 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
200 goto buffer_done;
201
202 switch (phba->cgn_reg_fpin) {
203 case LPFC_CGN_FPIN_WARN:
204 scnprintf(tmp, sizeof(tmp),
205 "FPIN:WARN ACQEcnt:%d\n", phba->cgn_acqe_cnt);
206 break;
207 case LPFC_CGN_FPIN_ALARM:
208 scnprintf(tmp, sizeof(tmp),
209 "FPIN:ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt);
210 break;
211 case LPFC_CGN_FPIN_BOTH:
212 scnprintf(tmp, sizeof(tmp),
213 "FPIN:WARN|ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt);
214 break;
215 default:
216 scnprintf(tmp, sizeof(tmp),
217 "FPIN:NONE ACQEcnt:%d\n", phba->cgn_acqe_cnt);
218 break;
219 }
220 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
221 goto buffer_done;
222
223 if (phba->cmf_active_mode != phba->cgn_p.cgn_param_mode) {
224 switch (phba->cmf_active_mode) {
225 case LPFC_CFG_OFF:
226 scnprintf(tmp, sizeof(tmp), "Active: Mode:Off\n");
227 break;
228 case LPFC_CFG_MANAGED:
229 scnprintf(tmp, sizeof(tmp), "Active: Mode:Managed\n");
230 break;
231 case LPFC_CFG_MONITOR:
232 scnprintf(tmp, sizeof(tmp), "Active: Mode:Monitor\n");
233 break;
234 default:
235 scnprintf(tmp, sizeof(tmp), "Active: Mode:Unknown\n");
236 }
237 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
238 goto buffer_done;
239 }
240
241 switch (phba->cgn_p.cgn_param_mode) {
242 case LPFC_CFG_OFF:
243 scnprintf(tmp, sizeof(tmp), "Config: Mode:Off ");
244 break;
245 case LPFC_CFG_MANAGED:
246 scnprintf(tmp, sizeof(tmp), "Config: Mode:Managed ");
247 break;
248 case LPFC_CFG_MONITOR:
249 scnprintf(tmp, sizeof(tmp), "Config: Mode:Monitor ");
250 break;
251 default:
252 scnprintf(tmp, sizeof(tmp), "Config: Mode:Unknown ");
253 }
254 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
255 goto buffer_done;
256
257 total = 0;
258 rcv = 0;
259 for_each_present_cpu(cpu) {
260 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
261 total += atomic64_read(&cgs->total_bytes);
262 rcv += atomic64_read(&cgs->rcv_bytes);
263 }
264
265 scnprintf(tmp, sizeof(tmp),
266 "IObusy:%d Info:%d Bytes: Rcv:x%llx Total:x%llx\n",
267 atomic_read(&phba->cmf_busy),
268 phba->cmf_active_info, rcv, total);
269 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
270 goto buffer_done;
271
272 scnprintf(tmp, sizeof(tmp),
273 "Port_speed:%d Link_byte_cnt:%ld "
274 "Max_byte_per_interval:%ld\n",
275 lpfc_sli_port_speed_get(phba),
276 (unsigned long)phba->cmf_link_byte_count,
277 (unsigned long)phba->cmf_max_bytes_per_interval);
278 strlcat(buf, tmp, PAGE_SIZE);
279
280 buffer_done:
281 len = strnlen(buf, PAGE_SIZE);
282
283 if (unlikely(len >= (PAGE_SIZE - 1))) {
284 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
285 "6312 Catching potential buffer "
286 "overflow > PAGE_SIZE = %lu bytes\n",
287 PAGE_SIZE);
288 strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
289 LPFC_INFO_MORE_STR, sizeof(LPFC_INFO_MORE_STR) + 1);
290 }
291 return len;
292 }
293
294 /**
295 * lpfc_drvr_version_show - Return the Emulex driver string with version number
296 * @dev: class unused variable.
297 * @attr: device attribute, not used.
298 * @buf: on return contains the module description text.
299 *
300 * Returns: size of formatted string.
301 **/
302 static ssize_t
lpfc_drvr_version_show(struct device * dev,struct device_attribute * attr,char * buf)303 lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
304 char *buf)
305 {
306 return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
307 }
308
309 /**
310 * lpfc_enable_fip_show - Return the fip mode of the HBA
311 * @dev: class unused variable.
312 * @attr: device attribute, not used.
313 * @buf: on return contains the module description text.
314 *
315 * Returns: size of formatted string.
316 **/
317 static ssize_t
lpfc_enable_fip_show(struct device * dev,struct device_attribute * attr,char * buf)318 lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
319 char *buf)
320 {
321 struct Scsi_Host *shost = class_to_shost(dev);
322 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
323 struct lpfc_hba *phba = vport->phba;
324
325 if (phba->hba_flag & HBA_FIP_SUPPORT)
326 return scnprintf(buf, PAGE_SIZE, "1\n");
327 else
328 return scnprintf(buf, PAGE_SIZE, "0\n");
329 }
330
331 static ssize_t
lpfc_nvme_info_show(struct device * dev,struct device_attribute * attr,char * buf)332 lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
333 char *buf)
334 {
335 struct Scsi_Host *shost = class_to_shost(dev);
336 struct lpfc_vport *vport = shost_priv(shost);
337 struct lpfc_hba *phba = vport->phba;
338 struct lpfc_nvmet_tgtport *tgtp;
339 struct nvme_fc_local_port *localport;
340 struct lpfc_nvme_lport *lport;
341 struct lpfc_nvme_rport *rport;
342 struct lpfc_nodelist *ndlp;
343 struct nvme_fc_remote_port *nrport;
344 struct lpfc_fc4_ctrl_stat *cstat;
345 uint64_t data1, data2, data3;
346 uint64_t totin, totout, tot;
347 char *statep;
348 int i;
349 int len = 0;
350 char tmp[LPFC_MAX_INFO_TMP_LEN] = {0};
351
352 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
353 len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
354 return len;
355 }
356 if (phba->nvmet_support) {
357 if (!phba->targetport) {
358 len = scnprintf(buf, PAGE_SIZE,
359 "NVME Target: x%llx is not allocated\n",
360 wwn_to_u64(vport->fc_portname.u.wwn));
361 return len;
362 }
363 /* Port state is only one of two values for now. */
364 if (phba->targetport->port_id)
365 statep = "REGISTERED";
366 else
367 statep = "INIT";
368 scnprintf(tmp, sizeof(tmp),
369 "NVME Target Enabled State %s\n",
370 statep);
371 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
372 goto buffer_done;
373
374 scnprintf(tmp, sizeof(tmp),
375 "%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
376 "NVME Target: lpfc",
377 phba->brd_no,
378 wwn_to_u64(vport->fc_portname.u.wwn),
379 wwn_to_u64(vport->fc_nodename.u.wwn),
380 phba->targetport->port_id);
381 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
382 goto buffer_done;
383
384 if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE)
385 >= PAGE_SIZE)
386 goto buffer_done;
387
388 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
389 scnprintf(tmp, sizeof(tmp),
390 "LS: Rcv %08x Drop %08x Abort %08x\n",
391 atomic_read(&tgtp->rcv_ls_req_in),
392 atomic_read(&tgtp->rcv_ls_req_drop),
393 atomic_read(&tgtp->xmt_ls_abort));
394 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
395 goto buffer_done;
396
397 if (atomic_read(&tgtp->rcv_ls_req_in) !=
398 atomic_read(&tgtp->rcv_ls_req_out)) {
399 scnprintf(tmp, sizeof(tmp),
400 "Rcv LS: in %08x != out %08x\n",
401 atomic_read(&tgtp->rcv_ls_req_in),
402 atomic_read(&tgtp->rcv_ls_req_out));
403 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
404 goto buffer_done;
405 }
406
407 scnprintf(tmp, sizeof(tmp),
408 "LS: Xmt %08x Drop %08x Cmpl %08x\n",
409 atomic_read(&tgtp->xmt_ls_rsp),
410 atomic_read(&tgtp->xmt_ls_drop),
411 atomic_read(&tgtp->xmt_ls_rsp_cmpl));
412 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
413 goto buffer_done;
414
415 scnprintf(tmp, sizeof(tmp),
416 "LS: RSP Abort %08x xb %08x Err %08x\n",
417 atomic_read(&tgtp->xmt_ls_rsp_aborted),
418 atomic_read(&tgtp->xmt_ls_rsp_xb_set),
419 atomic_read(&tgtp->xmt_ls_rsp_error));
420 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
421 goto buffer_done;
422
423 scnprintf(tmp, sizeof(tmp),
424 "FCP: Rcv %08x Defer %08x Release %08x "
425 "Drop %08x\n",
426 atomic_read(&tgtp->rcv_fcp_cmd_in),
427 atomic_read(&tgtp->rcv_fcp_cmd_defer),
428 atomic_read(&tgtp->xmt_fcp_release),
429 atomic_read(&tgtp->rcv_fcp_cmd_drop));
430 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
431 goto buffer_done;
432
433 if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
434 atomic_read(&tgtp->rcv_fcp_cmd_out)) {
435 scnprintf(tmp, sizeof(tmp),
436 "Rcv FCP: in %08x != out %08x\n",
437 atomic_read(&tgtp->rcv_fcp_cmd_in),
438 atomic_read(&tgtp->rcv_fcp_cmd_out));
439 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
440 goto buffer_done;
441 }
442
443 scnprintf(tmp, sizeof(tmp),
444 "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
445 "drop %08x\n",
446 atomic_read(&tgtp->xmt_fcp_read),
447 atomic_read(&tgtp->xmt_fcp_read_rsp),
448 atomic_read(&tgtp->xmt_fcp_write),
449 atomic_read(&tgtp->xmt_fcp_rsp),
450 atomic_read(&tgtp->xmt_fcp_drop));
451 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
452 goto buffer_done;
453
454 scnprintf(tmp, sizeof(tmp),
455 "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
456 atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
457 atomic_read(&tgtp->xmt_fcp_rsp_error),
458 atomic_read(&tgtp->xmt_fcp_rsp_drop));
459 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
460 goto buffer_done;
461
462 scnprintf(tmp, sizeof(tmp),
463 "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
464 atomic_read(&tgtp->xmt_fcp_rsp_aborted),
465 atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
466 atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
467 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
468 goto buffer_done;
469
470 scnprintf(tmp, sizeof(tmp),
471 "ABORT: Xmt %08x Cmpl %08x\n",
472 atomic_read(&tgtp->xmt_fcp_abort),
473 atomic_read(&tgtp->xmt_fcp_abort_cmpl));
474 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
475 goto buffer_done;
476
477 scnprintf(tmp, sizeof(tmp),
478 "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n",
479 atomic_read(&tgtp->xmt_abort_sol),
480 atomic_read(&tgtp->xmt_abort_unsol),
481 atomic_read(&tgtp->xmt_abort_rsp),
482 atomic_read(&tgtp->xmt_abort_rsp_error));
483 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
484 goto buffer_done;
485
486 scnprintf(tmp, sizeof(tmp),
487 "DELAY: ctx %08x fod %08x wqfull %08x\n",
488 atomic_read(&tgtp->defer_ctx),
489 atomic_read(&tgtp->defer_fod),
490 atomic_read(&tgtp->defer_wqfull));
491 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
492 goto buffer_done;
493
494 /* Calculate outstanding IOs */
495 tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
496 tot += atomic_read(&tgtp->xmt_fcp_release);
497 tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
498
499 scnprintf(tmp, sizeof(tmp),
500 "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
501 "CTX Outstanding %08llx\n\n",
502 phba->sli4_hba.nvmet_xri_cnt,
503 phba->sli4_hba.nvmet_io_wait_cnt,
504 phba->sli4_hba.nvmet_io_wait_total,
505 tot);
506 strlcat(buf, tmp, PAGE_SIZE);
507 goto buffer_done;
508 }
509
510 localport = vport->localport;
511 if (!localport) {
512 len = scnprintf(buf, PAGE_SIZE,
513 "NVME Initiator x%llx is not allocated\n",
514 wwn_to_u64(vport->fc_portname.u.wwn));
515 return len;
516 }
517 lport = (struct lpfc_nvme_lport *)localport->private;
518 if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
519 goto buffer_done;
520
521 scnprintf(tmp, sizeof(tmp),
522 "XRI Dist lpfc%d Total %d IO %d ELS %d\n",
523 phba->brd_no,
524 phba->sli4_hba.max_cfg_param.max_xri,
525 phba->sli4_hba.io_xri_max,
526 lpfc_sli4_get_els_iocb_cnt(phba));
527 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
528 goto buffer_done;
529
530 /* Port state is only one of two values for now. */
531 if (localport->port_id)
532 statep = "ONLINE";
533 else
534 statep = "UNKNOWN ";
535
536 scnprintf(tmp, sizeof(tmp),
537 "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
538 "NVME LPORT lpfc",
539 phba->brd_no,
540 wwn_to_u64(vport->fc_portname.u.wwn),
541 wwn_to_u64(vport->fc_nodename.u.wwn),
542 localport->port_id, statep);
543 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
544 goto buffer_done;
545
546 spin_lock_irq(shost->host_lock);
547
548 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
549 nrport = NULL;
550 spin_lock(&ndlp->lock);
551 rport = lpfc_ndlp_get_nrport(ndlp);
552 if (rport)
553 nrport = rport->remoteport;
554 spin_unlock(&ndlp->lock);
555 if (!nrport)
556 continue;
557
558 /* Port state is only one of two values for now. */
559 switch (nrport->port_state) {
560 case FC_OBJSTATE_ONLINE:
561 statep = "ONLINE";
562 break;
563 case FC_OBJSTATE_UNKNOWN:
564 statep = "UNKNOWN ";
565 break;
566 default:
567 statep = "UNSUPPORTED";
568 break;
569 }
570
571 /* Tab in to show lport ownership. */
572 if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
573 goto unlock_buf_done;
574 if (phba->brd_no >= 10) {
575 if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
576 goto unlock_buf_done;
577 }
578
579 scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
580 nrport->port_name);
581 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
582 goto unlock_buf_done;
583
584 scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
585 nrport->node_name);
586 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
587 goto unlock_buf_done;
588
589 scnprintf(tmp, sizeof(tmp), "DID x%06x ",
590 nrport->port_id);
591 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
592 goto unlock_buf_done;
593
594 /* An NVME rport can have multiple roles. */
595 if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
596 if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
597 goto unlock_buf_done;
598 }
599 if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
600 if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
601 goto unlock_buf_done;
602 }
603 if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
604 if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
605 goto unlock_buf_done;
606 }
607 if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
608 FC_PORT_ROLE_NVME_TARGET |
609 FC_PORT_ROLE_NVME_DISCOVERY)) {
610 scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
611 nrport->port_role);
612 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
613 goto unlock_buf_done;
614 }
615
616 scnprintf(tmp, sizeof(tmp), "%s\n", statep);
617 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
618 goto unlock_buf_done;
619 }
620 spin_unlock_irq(shost->host_lock);
621
622 if (!lport)
623 goto buffer_done;
624
625 if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE)
626 goto buffer_done;
627
628 scnprintf(tmp, sizeof(tmp),
629 "LS: Xmt %010x Cmpl %010x Abort %08x\n",
630 atomic_read(&lport->fc4NvmeLsRequests),
631 atomic_read(&lport->fc4NvmeLsCmpls),
632 atomic_read(&lport->xmt_ls_abort));
633 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
634 goto buffer_done;
635
636 scnprintf(tmp, sizeof(tmp),
637 "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
638 atomic_read(&lport->xmt_ls_err),
639 atomic_read(&lport->cmpl_ls_xb),
640 atomic_read(&lport->cmpl_ls_err));
641 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
642 goto buffer_done;
643
644 totin = 0;
645 totout = 0;
646 for (i = 0; i < phba->cfg_hdw_queue; i++) {
647 cstat = &phba->sli4_hba.hdwq[i].nvme_cstat;
648 tot = cstat->io_cmpls;
649 totin += tot;
650 data1 = cstat->input_requests;
651 data2 = cstat->output_requests;
652 data3 = cstat->control_requests;
653 totout += (data1 + data2 + data3);
654 }
655 scnprintf(tmp, sizeof(tmp),
656 "Total FCP Cmpl %016llx Issue %016llx "
657 "OutIO %016llx\n",
658 totin, totout, totout - totin);
659 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
660 goto buffer_done;
661
662 scnprintf(tmp, sizeof(tmp),
663 "\tabort %08x noxri %08x nondlp %08x qdepth %08x "
664 "wqerr %08x err %08x\n",
665 atomic_read(&lport->xmt_fcp_abort),
666 atomic_read(&lport->xmt_fcp_noxri),
667 atomic_read(&lport->xmt_fcp_bad_ndlp),
668 atomic_read(&lport->xmt_fcp_qdepth),
669 atomic_read(&lport->xmt_fcp_wqerr),
670 atomic_read(&lport->xmt_fcp_err));
671 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
672 goto buffer_done;
673
674 scnprintf(tmp, sizeof(tmp),
675 "FCP CMPL: xb %08x Err %08x\n",
676 atomic_read(&lport->cmpl_fcp_xb),
677 atomic_read(&lport->cmpl_fcp_err));
678 strlcat(buf, tmp, PAGE_SIZE);
679
680 /* host_lock is already unlocked. */
681 goto buffer_done;
682
683 unlock_buf_done:
684 spin_unlock_irq(shost->host_lock);
685
686 buffer_done:
687 len = strnlen(buf, PAGE_SIZE);
688
689 if (unlikely(len >= (PAGE_SIZE - 1))) {
690 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
691 "6314 Catching potential buffer "
692 "overflow > PAGE_SIZE = %lu bytes\n",
693 PAGE_SIZE);
694 strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
695 LPFC_INFO_MORE_STR,
696 sizeof(LPFC_INFO_MORE_STR) + 1);
697 }
698
699 return len;
700 }
701
702 static ssize_t
lpfc_scsi_stat_show(struct device * dev,struct device_attribute * attr,char * buf)703 lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr,
704 char *buf)
705 {
706 struct Scsi_Host *shost = class_to_shost(dev);
707 struct lpfc_vport *vport = shost_priv(shost);
708 struct lpfc_hba *phba = vport->phba;
709 int len;
710 struct lpfc_fc4_ctrl_stat *cstat;
711 u64 data1, data2, data3;
712 u64 tot, totin, totout;
713 int i;
714 char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
715
716 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ||
717 (phba->sli_rev != LPFC_SLI_REV4))
718 return 0;
719
720 scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n");
721
722 totin = 0;
723 totout = 0;
724 for (i = 0; i < phba->cfg_hdw_queue; i++) {
725 cstat = &phba->sli4_hba.hdwq[i].scsi_cstat;
726 tot = cstat->io_cmpls;
727 totin += tot;
728 data1 = cstat->input_requests;
729 data2 = cstat->output_requests;
730 data3 = cstat->control_requests;
731 totout += (data1 + data2 + data3);
732
733 scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx "
734 "IO %016llx ", i, data1, data2, data3);
735 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
736 goto buffer_done;
737
738 scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n",
739 tot, ((data1 + data2 + data3) - tot));
740 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
741 goto buffer_done;
742 }
743 scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx "
744 "OutIO %016llx\n", totin, totout, totout - totin);
745 strlcat(buf, tmp, PAGE_SIZE);
746
747 buffer_done:
748 len = strnlen(buf, PAGE_SIZE);
749
750 return len;
751 }
752
753 static ssize_t
lpfc_bg_info_show(struct device * dev,struct device_attribute * attr,char * buf)754 lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
755 char *buf)
756 {
757 struct Scsi_Host *shost = class_to_shost(dev);
758 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
759 struct lpfc_hba *phba = vport->phba;
760
761 if (phba->cfg_enable_bg) {
762 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
763 return scnprintf(buf, PAGE_SIZE,
764 "BlockGuard Enabled\n");
765 else
766 return scnprintf(buf, PAGE_SIZE,
767 "BlockGuard Not Supported\n");
768 } else
769 return scnprintf(buf, PAGE_SIZE,
770 "BlockGuard Disabled\n");
771 }
772
773 static ssize_t
lpfc_bg_guard_err_show(struct device * dev,struct device_attribute * attr,char * buf)774 lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
775 char *buf)
776 {
777 struct Scsi_Host *shost = class_to_shost(dev);
778 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
779 struct lpfc_hba *phba = vport->phba;
780
781 return scnprintf(buf, PAGE_SIZE, "%llu\n",
782 (unsigned long long)phba->bg_guard_err_cnt);
783 }
784
785 static ssize_t
lpfc_bg_apptag_err_show(struct device * dev,struct device_attribute * attr,char * buf)786 lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
787 char *buf)
788 {
789 struct Scsi_Host *shost = class_to_shost(dev);
790 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
791 struct lpfc_hba *phba = vport->phba;
792
793 return scnprintf(buf, PAGE_SIZE, "%llu\n",
794 (unsigned long long)phba->bg_apptag_err_cnt);
795 }
796
797 static ssize_t
lpfc_bg_reftag_err_show(struct device * dev,struct device_attribute * attr,char * buf)798 lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
799 char *buf)
800 {
801 struct Scsi_Host *shost = class_to_shost(dev);
802 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
803 struct lpfc_hba *phba = vport->phba;
804
805 return scnprintf(buf, PAGE_SIZE, "%llu\n",
806 (unsigned long long)phba->bg_reftag_err_cnt);
807 }
808
809 /**
810 * lpfc_info_show - Return some pci info about the host in ascii
811 * @dev: class converted to a Scsi_host structure.
812 * @attr: device attribute, not used.
813 * @buf: on return contains the formatted text from lpfc_info().
814 *
815 * Returns: size of formatted string.
816 **/
817 static ssize_t
lpfc_info_show(struct device * dev,struct device_attribute * attr,char * buf)818 lpfc_info_show(struct device *dev, struct device_attribute *attr,
819 char *buf)
820 {
821 struct Scsi_Host *host = class_to_shost(dev);
822
823 return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host));
824 }
825
826 /**
827 * lpfc_serialnum_show - Return the hba serial number in ascii
828 * @dev: class converted to a Scsi_host structure.
829 * @attr: device attribute, not used.
830 * @buf: on return contains the formatted text serial number.
831 *
832 * Returns: size of formatted string.
833 **/
834 static ssize_t
lpfc_serialnum_show(struct device * dev,struct device_attribute * attr,char * buf)835 lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
836 char *buf)
837 {
838 struct Scsi_Host *shost = class_to_shost(dev);
839 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
840 struct lpfc_hba *phba = vport->phba;
841
842 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber);
843 }
844
845 /**
846 * lpfc_temp_sensor_show - Return the temperature sensor level
847 * @dev: class converted to a Scsi_host structure.
848 * @attr: device attribute, not used.
849 * @buf: on return contains the formatted support level.
850 *
851 * Description:
852 * Returns a number indicating the temperature sensor level currently
853 * supported, zero or one in ascii.
854 *
855 * Returns: size of formatted string.
856 **/
857 static ssize_t
lpfc_temp_sensor_show(struct device * dev,struct device_attribute * attr,char * buf)858 lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
859 char *buf)
860 {
861 struct Scsi_Host *shost = class_to_shost(dev);
862 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
863 struct lpfc_hba *phba = vport->phba;
864 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support);
865 }
866
867 /**
868 * lpfc_modeldesc_show - Return the model description of the hba
869 * @dev: class converted to a Scsi_host structure.
870 * @attr: device attribute, not used.
871 * @buf: on return contains the scsi vpd model description.
872 *
873 * Returns: size of formatted string.
874 **/
875 static ssize_t
lpfc_modeldesc_show(struct device * dev,struct device_attribute * attr,char * buf)876 lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
877 char *buf)
878 {
879 struct Scsi_Host *shost = class_to_shost(dev);
880 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
881 struct lpfc_hba *phba = vport->phba;
882
883 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc);
884 }
885
886 /**
887 * lpfc_modelname_show - Return the model name of the hba
888 * @dev: class converted to a Scsi_host structure.
889 * @attr: device attribute, not used.
890 * @buf: on return contains the scsi vpd model name.
891 *
892 * Returns: size of formatted string.
893 **/
894 static ssize_t
lpfc_modelname_show(struct device * dev,struct device_attribute * attr,char * buf)895 lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
896 char *buf)
897 {
898 struct Scsi_Host *shost = class_to_shost(dev);
899 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
900 struct lpfc_hba *phba = vport->phba;
901
902 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName);
903 }
904
905 /**
906 * lpfc_programtype_show - Return the program type of the hba
907 * @dev: class converted to a Scsi_host structure.
908 * @attr: device attribute, not used.
909 * @buf: on return contains the scsi vpd program type.
910 *
911 * Returns: size of formatted string.
912 **/
913 static ssize_t
lpfc_programtype_show(struct device * dev,struct device_attribute * attr,char * buf)914 lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
915 char *buf)
916 {
917 struct Scsi_Host *shost = class_to_shost(dev);
918 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
919 struct lpfc_hba *phba = vport->phba;
920
921 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType);
922 }
923
924 /**
925 * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag
926 * @dev: class converted to a Scsi_host structure.
927 * @attr: device attribute, not used.
928 * @buf: on return contains the Menlo Maintenance sli flag.
929 *
930 * Returns: size of formatted string.
931 **/
932 static ssize_t
lpfc_mlomgmt_show(struct device * dev,struct device_attribute * attr,char * buf)933 lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
934 {
935 struct Scsi_Host *shost = class_to_shost(dev);
936 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
937 struct lpfc_hba *phba = vport->phba;
938
939 return scnprintf(buf, PAGE_SIZE, "%d\n",
940 (phba->sli.sli_flag & LPFC_MENLO_MAINT));
941 }
942
943 /**
944 * lpfc_vportnum_show - Return the port number in ascii of the hba
945 * @dev: class converted to a Scsi_host structure.
946 * @attr: device attribute, not used.
947 * @buf: on return contains scsi vpd program type.
948 *
949 * Returns: size of formatted string.
950 **/
951 static ssize_t
lpfc_vportnum_show(struct device * dev,struct device_attribute * attr,char * buf)952 lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
953 char *buf)
954 {
955 struct Scsi_Host *shost = class_to_shost(dev);
956 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
957 struct lpfc_hba *phba = vport->phba;
958
959 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port);
960 }
961
962 /**
963 * lpfc_fwrev_show - Return the firmware rev running in the hba
964 * @dev: class converted to a Scsi_host structure.
965 * @attr: device attribute, not used.
966 * @buf: on return contains the scsi vpd program type.
967 *
968 * Returns: size of formatted string.
969 **/
970 static ssize_t
lpfc_fwrev_show(struct device * dev,struct device_attribute * attr,char * buf)971 lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
972 char *buf)
973 {
974 struct Scsi_Host *shost = class_to_shost(dev);
975 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
976 struct lpfc_hba *phba = vport->phba;
977 uint32_t if_type;
978 uint8_t sli_family;
979 char fwrev[FW_REV_STR_SIZE];
980 int len;
981
982 lpfc_decode_firmware_rev(phba, fwrev, 1);
983 if_type = phba->sli4_hba.pc_sli4_params.if_type;
984 sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
985
986 if (phba->sli_rev < LPFC_SLI_REV4)
987 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
988 fwrev, phba->sli_rev);
989 else
990 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
991 fwrev, phba->sli_rev, if_type, sli_family);
992
993 return len;
994 }
995
996 /**
997 * lpfc_hdw_show - Return the jedec information about the hba
998 * @dev: class converted to a Scsi_host structure.
999 * @attr: device attribute, not used.
1000 * @buf: on return contains the scsi vpd program type.
1001 *
1002 * Returns: size of formatted string.
1003 **/
1004 static ssize_t
lpfc_hdw_show(struct device * dev,struct device_attribute * attr,char * buf)1005 lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
1006 {
1007 char hdw[9];
1008 struct Scsi_Host *shost = class_to_shost(dev);
1009 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1010 struct lpfc_hba *phba = vport->phba;
1011 lpfc_vpd_t *vp = &phba->vpd;
1012
1013 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
1014 return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw,
1015 vp->rev.smRev, vp->rev.smFwRev);
1016 }
1017
1018 /**
1019 * lpfc_option_rom_version_show - Return the adapter ROM FCode version
1020 * @dev: class converted to a Scsi_host structure.
1021 * @attr: device attribute, not used.
1022 * @buf: on return contains the ROM and FCode ascii strings.
1023 *
1024 * Returns: size of formatted string.
1025 **/
1026 static ssize_t
lpfc_option_rom_version_show(struct device * dev,struct device_attribute * attr,char * buf)1027 lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
1028 char *buf)
1029 {
1030 struct Scsi_Host *shost = class_to_shost(dev);
1031 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1032 struct lpfc_hba *phba = vport->phba;
1033 char fwrev[FW_REV_STR_SIZE];
1034
1035 if (phba->sli_rev < LPFC_SLI_REV4)
1036 return scnprintf(buf, PAGE_SIZE, "%s\n",
1037 phba->OptionROMVersion);
1038
1039 lpfc_decode_firmware_rev(phba, fwrev, 1);
1040 return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev);
1041 }
1042
1043 /**
1044 * lpfc_link_state_show - Return the link state of the port
1045 * @dev: class converted to a Scsi_host structure.
1046 * @attr: device attribute, not used.
1047 * @buf: on return contains text describing the state of the link.
1048 *
1049 * Notes:
1050 * The switch statement has no default so zero will be returned.
1051 *
1052 * Returns: size of formatted string.
1053 **/
1054 static ssize_t
lpfc_link_state_show(struct device * dev,struct device_attribute * attr,char * buf)1055 lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
1056 char *buf)
1057 {
1058 struct Scsi_Host *shost = class_to_shost(dev);
1059 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1060 struct lpfc_hba *phba = vport->phba;
1061 int len = 0;
1062
1063 switch (phba->link_state) {
1064 case LPFC_LINK_UNKNOWN:
1065 case LPFC_WARM_START:
1066 case LPFC_INIT_START:
1067 case LPFC_INIT_MBX_CMDS:
1068 case LPFC_LINK_DOWN:
1069 case LPFC_HBA_ERROR:
1070 if (phba->hba_flag & LINK_DISABLED)
1071 len += scnprintf(buf + len, PAGE_SIZE-len,
1072 "Link Down - User disabled\n");
1073 else
1074 len += scnprintf(buf + len, PAGE_SIZE-len,
1075 "Link Down\n");
1076 break;
1077 case LPFC_LINK_UP:
1078 case LPFC_CLEAR_LA:
1079 case LPFC_HBA_READY:
1080 len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
1081
1082 switch (vport->port_state) {
1083 case LPFC_LOCAL_CFG_LINK:
1084 len += scnprintf(buf + len, PAGE_SIZE-len,
1085 "Configuring Link\n");
1086 break;
1087 case LPFC_FDISC:
1088 case LPFC_FLOGI:
1089 case LPFC_FABRIC_CFG_LINK:
1090 case LPFC_NS_REG:
1091 case LPFC_NS_QRY:
1092 case LPFC_BUILD_DISC_LIST:
1093 case LPFC_DISC_AUTH:
1094 len += scnprintf(buf + len, PAGE_SIZE - len,
1095 "Discovery\n");
1096 break;
1097 case LPFC_VPORT_READY:
1098 len += scnprintf(buf + len, PAGE_SIZE - len,
1099 "Ready\n");
1100 break;
1101
1102 case LPFC_VPORT_FAILED:
1103 len += scnprintf(buf + len, PAGE_SIZE - len,
1104 "Failed\n");
1105 break;
1106
1107 case LPFC_VPORT_UNKNOWN:
1108 len += scnprintf(buf + len, PAGE_SIZE - len,
1109 "Unknown\n");
1110 break;
1111 }
1112 if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
1113 len += scnprintf(buf + len, PAGE_SIZE-len,
1114 " Menlo Maint Mode\n");
1115 else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
1116 if (vport->fc_flag & FC_PUBLIC_LOOP)
1117 len += scnprintf(buf + len, PAGE_SIZE-len,
1118 " Public Loop\n");
1119 else
1120 len += scnprintf(buf + len, PAGE_SIZE-len,
1121 " Private Loop\n");
1122 } else {
1123 if (vport->fc_flag & FC_FABRIC)
1124 len += scnprintf(buf + len, PAGE_SIZE-len,
1125 " Fabric\n");
1126 else
1127 len += scnprintf(buf + len, PAGE_SIZE-len,
1128 " Point-2-Point\n");
1129 }
1130 }
1131
1132 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1133 ((bf_get(lpfc_sli_intf_if_type,
1134 &phba->sli4_hba.sli_intf) ==
1135 LPFC_SLI_INTF_IF_TYPE_6))) {
1136 struct lpfc_trunk_link link = phba->trunk_link;
1137
1138 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
1139 len += scnprintf(buf + len, PAGE_SIZE - len,
1140 "Trunk port 0: Link %s %s\n",
1141 (link.link0.state == LPFC_LINK_UP) ?
1142 "Up" : "Down. ",
1143 trunk_errmsg[link.link0.fault]);
1144
1145 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
1146 len += scnprintf(buf + len, PAGE_SIZE - len,
1147 "Trunk port 1: Link %s %s\n",
1148 (link.link1.state == LPFC_LINK_UP) ?
1149 "Up" : "Down. ",
1150 trunk_errmsg[link.link1.fault]);
1151
1152 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
1153 len += scnprintf(buf + len, PAGE_SIZE - len,
1154 "Trunk port 2: Link %s %s\n",
1155 (link.link2.state == LPFC_LINK_UP) ?
1156 "Up" : "Down. ",
1157 trunk_errmsg[link.link2.fault]);
1158
1159 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
1160 len += scnprintf(buf + len, PAGE_SIZE - len,
1161 "Trunk port 3: Link %s %s\n",
1162 (link.link3.state == LPFC_LINK_UP) ?
1163 "Up" : "Down. ",
1164 trunk_errmsg[link.link3.fault]);
1165
1166 }
1167
1168 return len;
1169 }
1170
1171 /**
1172 * lpfc_sli4_protocol_show - Return the fip mode of the HBA
1173 * @dev: class unused variable.
1174 * @attr: device attribute, not used.
1175 * @buf: on return contains the module description text.
1176 *
1177 * Returns: size of formatted string.
1178 **/
1179 static ssize_t
lpfc_sli4_protocol_show(struct device * dev,struct device_attribute * attr,char * buf)1180 lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
1181 char *buf)
1182 {
1183 struct Scsi_Host *shost = class_to_shost(dev);
1184 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1185 struct lpfc_hba *phba = vport->phba;
1186
1187 if (phba->sli_rev < LPFC_SLI_REV4)
1188 return scnprintf(buf, PAGE_SIZE, "fc\n");
1189
1190 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
1191 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
1192 return scnprintf(buf, PAGE_SIZE, "fcoe\n");
1193 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
1194 return scnprintf(buf, PAGE_SIZE, "fc\n");
1195 }
1196 return scnprintf(buf, PAGE_SIZE, "unknown\n");
1197 }
1198
1199 /**
1200 * lpfc_oas_supported_show - Return whether or not Optimized Access Storage
1201 * (OAS) is supported.
1202 * @dev: class unused variable.
1203 * @attr: device attribute, not used.
1204 * @buf: on return contains the module description text.
1205 *
1206 * Returns: size of formatted string.
1207 **/
1208 static ssize_t
lpfc_oas_supported_show(struct device * dev,struct device_attribute * attr,char * buf)1209 lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
1210 char *buf)
1211 {
1212 struct Scsi_Host *shost = class_to_shost(dev);
1213 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
1214 struct lpfc_hba *phba = vport->phba;
1215
1216 return scnprintf(buf, PAGE_SIZE, "%d\n",
1217 phba->sli4_hba.pc_sli4_params.oas_supported);
1218 }
1219
1220 /**
1221 * lpfc_link_state_store - Transition the link_state on an HBA port
1222 * @dev: class device that is converted into a Scsi_host.
1223 * @attr: device attribute, not used.
1224 * @buf: one or more lpfc_polling_flags values.
1225 * @count: not used.
1226 *
1227 * Returns:
1228 * -EINVAL if the buffer is not "up" or "down"
1229 * return from link state change function if non-zero
1230 * length of the buf on success
1231 **/
1232 static ssize_t
lpfc_link_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1233 lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
1234 const char *buf, size_t count)
1235 {
1236 struct Scsi_Host *shost = class_to_shost(dev);
1237 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1238 struct lpfc_hba *phba = vport->phba;
1239
1240 int status = -EINVAL;
1241
1242 if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
1243 (phba->link_state == LPFC_LINK_DOWN))
1244 status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
1245 else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
1246 (phba->link_state >= LPFC_LINK_UP))
1247 status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT);
1248
1249 if (status == 0)
1250 return strlen(buf);
1251 else
1252 return status;
1253 }
1254
1255 /**
1256 * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports
1257 * @dev: class device that is converted into a Scsi_host.
1258 * @attr: device attribute, not used.
1259 * @buf: on return contains the sum of fc mapped and unmapped.
1260 *
1261 * Description:
1262 * Returns the ascii text number of the sum of the fc mapped and unmapped
1263 * vport counts.
1264 *
1265 * Returns: size of formatted string.
1266 **/
1267 static ssize_t
lpfc_num_discovered_ports_show(struct device * dev,struct device_attribute * attr,char * buf)1268 lpfc_num_discovered_ports_show(struct device *dev,
1269 struct device_attribute *attr, char *buf)
1270 {
1271 struct Scsi_Host *shost = class_to_shost(dev);
1272 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1273
1274 return scnprintf(buf, PAGE_SIZE, "%d\n",
1275 vport->fc_map_cnt + vport->fc_unmap_cnt);
1276 }
1277
1278 /**
1279 * lpfc_issue_lip - Misnomer, name carried over from long ago
1280 * @shost: Scsi_Host pointer.
1281 *
1282 * Description:
1283 * Bring the link down gracefully then re-init the link. The firmware will
1284 * re-init the fiber channel interface as required. Does not issue a LIP.
1285 *
1286 * Returns:
1287 * -EPERM port offline or management commands are being blocked
1288 * -ENOMEM cannot allocate memory for the mailbox command
1289 * -EIO error sending the mailbox command
1290 * zero for success
1291 **/
1292 static int
lpfc_issue_lip(struct Scsi_Host * shost)1293 lpfc_issue_lip(struct Scsi_Host *shost)
1294 {
1295 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1296 struct lpfc_hba *phba = vport->phba;
1297 LPFC_MBOXQ_t *pmboxq;
1298 int mbxstatus = MBXERR_ERROR;
1299
1300 /*
1301 * If the link is offline, disabled or BLOCK_MGMT_IO
1302 * it doesn't make any sense to allow issue_lip
1303 */
1304 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1305 (phba->hba_flag & LINK_DISABLED) ||
1306 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
1307 return -EPERM;
1308
1309 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
1310
1311 if (!pmboxq)
1312 return -ENOMEM;
1313
1314 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1315 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1316 pmboxq->u.mb.mbxOwner = OWN_HOST;
1317
1318 if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
1319 vport->fc_flag &= ~FC_PT2PT_NO_NVME;
1320
1321 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
1322
1323 if ((mbxstatus == MBX_SUCCESS) &&
1324 (pmboxq->u.mb.mbxStatus == 0 ||
1325 pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
1326 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1327 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
1328 phba->cfg_link_speed);
1329 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1330 phba->fc_ratov * 2);
1331 if ((mbxstatus == MBX_SUCCESS) &&
1332 (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
1333 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1334 "2859 SLI authentication is required "
1335 "for INIT_LINK but has not done yet\n");
1336 }
1337
1338 lpfc_set_loopback_flag(phba);
1339 if (mbxstatus != MBX_TIMEOUT)
1340 mempool_free(pmboxq, phba->mbox_mem_pool);
1341
1342 if (mbxstatus == MBXERR_ERROR)
1343 return -EIO;
1344
1345 return 0;
1346 }
1347
1348 int
lpfc_emptyq_wait(struct lpfc_hba * phba,struct list_head * q,spinlock_t * lock)1349 lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
1350 {
1351 int cnt = 0;
1352
1353 spin_lock_irq(lock);
1354 while (!list_empty(q)) {
1355 spin_unlock_irq(lock);
1356 msleep(20);
1357 if (cnt++ > 250) { /* 5 secs */
1358 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1359 "0466 Outstanding IO when "
1360 "bringing Adapter offline\n");
1361 return 0;
1362 }
1363 spin_lock_irq(lock);
1364 }
1365 spin_unlock_irq(lock);
1366 return 1;
1367 }
1368
1369 /**
1370 * lpfc_do_offline - Issues a mailbox command to bring the link down
1371 * @phba: lpfc_hba pointer.
1372 * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL.
1373 *
1374 * Notes:
1375 * Assumes any error from lpfc_do_offline() will be negative.
1376 * Can wait up to 5 seconds for the port ring buffers count
1377 * to reach zero, prints a warning if it is not zero and continues.
1378 * lpfc_workq_post_event() returns a non-zero return code if call fails.
1379 *
1380 * Returns:
1381 * -EIO error posting the event
1382 * zero for success
1383 **/
1384 static int
lpfc_do_offline(struct lpfc_hba * phba,uint32_t type)1385 lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
1386 {
1387 struct completion online_compl;
1388 struct lpfc_queue *qp = NULL;
1389 struct lpfc_sli_ring *pring;
1390 struct lpfc_sli *psli;
1391 int status = 0;
1392 int i;
1393 int rc;
1394
1395 init_completion(&online_compl);
1396 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1397 LPFC_EVT_OFFLINE_PREP);
1398 if (rc == 0)
1399 return -ENOMEM;
1400
1401 wait_for_completion(&online_compl);
1402
1403 if (status != 0)
1404 return -EIO;
1405
1406 psli = &phba->sli;
1407
1408 /*
1409 * If freeing the queues have already started, don't access them.
1410 * Otherwise set FREE_WAIT to indicate that queues are being used
1411 * to hold the freeing process until we finish.
1412 */
1413 spin_lock_irq(&phba->hbalock);
1414 if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) {
1415 psli->sli_flag |= LPFC_QUEUE_FREE_WAIT;
1416 } else {
1417 spin_unlock_irq(&phba->hbalock);
1418 goto skip_wait;
1419 }
1420 spin_unlock_irq(&phba->hbalock);
1421
1422 /* Wait a little for things to settle down, but not
1423 * long enough for dev loss timeout to expire.
1424 */
1425 if (phba->sli_rev != LPFC_SLI_REV4) {
1426 for (i = 0; i < psli->num_rings; i++) {
1427 pring = &psli->sli3_ring[i];
1428 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1429 &phba->hbalock))
1430 goto out;
1431 }
1432 } else {
1433 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1434 pring = qp->pring;
1435 if (!pring)
1436 continue;
1437 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1438 &pring->ring_lock))
1439 goto out;
1440 }
1441 }
1442 out:
1443 spin_lock_irq(&phba->hbalock);
1444 psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT;
1445 spin_unlock_irq(&phba->hbalock);
1446
1447 skip_wait:
1448 init_completion(&online_compl);
1449 rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
1450 if (rc == 0)
1451 return -ENOMEM;
1452
1453 wait_for_completion(&online_compl);
1454
1455 if (status != 0)
1456 return -EIO;
1457
1458 return 0;
1459 }
1460
1461 /**
1462 * lpfc_reset_pci_bus - resets PCI bridge controller's secondary bus of an HBA
1463 * @phba: lpfc_hba pointer.
1464 *
1465 * Description:
1466 * Issues a PCI secondary bus reset for the phba->pcidev.
1467 *
1468 * Notes:
1469 * First walks the bus_list to ensure only PCI devices with Emulex
1470 * vendor id, device ids that support hot reset, only one occurrence
1471 * of function 0, and all ports on the bus are in offline mode to ensure the
1472 * hot reset only affects one valid HBA.
1473 *
1474 * Returns:
1475 * -ENOTSUPP, cfg_enable_hba_reset must be of value 2
1476 * -ENODEV, NULL ptr to pcidev
1477 * -EBADSLT, detected invalid device
1478 * -EBUSY, port is not in offline state
1479 * 0, successful
1480 */
1481 static int
lpfc_reset_pci_bus(struct lpfc_hba * phba)1482 lpfc_reset_pci_bus(struct lpfc_hba *phba)
1483 {
1484 struct pci_dev *pdev = phba->pcidev;
1485 struct Scsi_Host *shost = NULL;
1486 struct lpfc_hba *phba_other = NULL;
1487 struct pci_dev *ptr = NULL;
1488 int res;
1489
1490 if (phba->cfg_enable_hba_reset != 2)
1491 return -ENOTSUPP;
1492
1493 if (!pdev) {
1494 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n");
1495 return -ENODEV;
1496 }
1497
1498 res = lpfc_check_pci_resettable(phba);
1499 if (res)
1500 return res;
1501
1502 /* Walk the list of devices on the pci_dev's bus */
1503 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
1504 /* Check port is offline */
1505 shost = pci_get_drvdata(ptr);
1506 if (shost) {
1507 phba_other =
1508 ((struct lpfc_vport *)shost->hostdata)->phba;
1509 if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) {
1510 lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT,
1511 "8349 WWPN = 0x%02x%02x%02x%02x"
1512 "%02x%02x%02x%02x is not "
1513 "offline!\n",
1514 phba_other->wwpn[0],
1515 phba_other->wwpn[1],
1516 phba_other->wwpn[2],
1517 phba_other->wwpn[3],
1518 phba_other->wwpn[4],
1519 phba_other->wwpn[5],
1520 phba_other->wwpn[6],
1521 phba_other->wwpn[7]);
1522 return -EBUSY;
1523 }
1524 }
1525 }
1526
1527 /* Issue PCI bus reset */
1528 res = pci_reset_bus(pdev);
1529 if (res) {
1530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1531 "8350 PCI reset bus failed: %d\n", res);
1532 }
1533
1534 return res;
1535 }
1536
1537 /**
1538 * lpfc_selective_reset - Offline then onlines the port
1539 * @phba: lpfc_hba pointer.
1540 *
1541 * Description:
1542 * If the port is configured to allow a reset then the hba is brought
1543 * offline then online.
1544 *
1545 * Notes:
1546 * Assumes any error from lpfc_do_offline() will be negative.
1547 * Do not make this function static.
1548 *
1549 * Returns:
1550 * lpfc_do_offline() return code if not zero
1551 * -EIO reset not configured or error posting the event
1552 * zero for success
1553 **/
1554 int
lpfc_selective_reset(struct lpfc_hba * phba)1555 lpfc_selective_reset(struct lpfc_hba *phba)
1556 {
1557 struct completion online_compl;
1558 int status = 0;
1559 int rc;
1560
1561 if (!phba->cfg_enable_hba_reset)
1562 return -EACCES;
1563
1564 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
1565 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1566
1567 if (status != 0)
1568 return status;
1569 }
1570
1571 init_completion(&online_compl);
1572 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1573 LPFC_EVT_ONLINE);
1574 if (rc == 0)
1575 return -ENOMEM;
1576
1577 wait_for_completion(&online_compl);
1578
1579 if (status != 0)
1580 return -EIO;
1581
1582 return 0;
1583 }
1584
1585 /**
1586 * lpfc_issue_reset - Selectively resets an adapter
1587 * @dev: class device that is converted into a Scsi_host.
1588 * @attr: device attribute, not used.
1589 * @buf: containing the string "selective".
1590 * @count: unused variable.
1591 *
1592 * Description:
1593 * If the buf contains the string "selective" then lpfc_selective_reset()
1594 * is called to perform the reset.
1595 *
1596 * Notes:
1597 * Assumes any error from lpfc_selective_reset() will be negative.
1598 * If lpfc_selective_reset() returns zero then the length of the buffer
1599 * is returned which indicates success
1600 *
1601 * Returns:
1602 * -EINVAL if the buffer does not contain the string "selective"
1603 * length of buf if lpfc-selective_reset() if the call succeeds
1604 * return value of lpfc_selective_reset() if the call fails
1605 **/
1606 static ssize_t
lpfc_issue_reset(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1607 lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
1608 const char *buf, size_t count)
1609 {
1610 struct Scsi_Host *shost = class_to_shost(dev);
1611 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1612 struct lpfc_hba *phba = vport->phba;
1613 int status = -EINVAL;
1614
1615 if (!phba->cfg_enable_hba_reset)
1616 return -EACCES;
1617
1618 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
1619 status = phba->lpfc_selective_reset(phba);
1620
1621 if (status == 0)
1622 return strlen(buf);
1623 else
1624 return status;
1625 }
1626
1627 /**
1628 * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
1629 * @phba: lpfc_hba pointer.
1630 *
1631 * Description:
1632 * SLI4 interface type-2 device to wait on the sliport status register for
1633 * the readyness after performing a firmware reset.
1634 *
1635 * Returns:
1636 * zero for success, -EPERM when port does not have privilege to perform the
1637 * reset, -EIO when port timeout from recovering from the reset.
1638 *
1639 * Note:
1640 * As the caller will interpret the return code by value, be careful in making
1641 * change or addition to return codes.
1642 **/
1643 int
lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba * phba)1644 lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
1645 {
1646 struct lpfc_register portstat_reg = {0};
1647 int i;
1648
1649 msleep(100);
1650 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1651 &portstat_reg.word0))
1652 return -EIO;
1653
1654 /* verify if privileged for the request operation */
1655 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
1656 !bf_get(lpfc_sliport_status_err, &portstat_reg))
1657 return -EPERM;
1658
1659 /* wait for the SLI port firmware ready after firmware reset */
1660 for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
1661 msleep(10);
1662 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1663 &portstat_reg.word0))
1664 continue;
1665 if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
1666 continue;
1667 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
1668 continue;
1669 if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
1670 continue;
1671 break;
1672 }
1673
1674 if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
1675 return 0;
1676 else
1677 return -EIO;
1678 }
1679
1680 /**
1681 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
1682 * @phba: lpfc_hba pointer.
1683 * @opcode: The sli4 config command opcode.
1684 *
1685 * Description:
1686 * Request SLI4 interface type-2 device to perform a physical register set
1687 * access.
1688 *
1689 * Returns:
1690 * zero for success
1691 **/
1692 static ssize_t
lpfc_sli4_pdev_reg_request(struct lpfc_hba * phba,uint32_t opcode)1693 lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
1694 {
1695 struct completion online_compl;
1696 struct pci_dev *pdev = phba->pcidev;
1697 uint32_t before_fc_flag;
1698 uint32_t sriov_nr_virtfn;
1699 uint32_t reg_val;
1700 int status = 0, rc = 0;
1701 int job_posted = 1, sriov_err;
1702
1703 if (!phba->cfg_enable_hba_reset)
1704 return -EACCES;
1705
1706 if ((phba->sli_rev < LPFC_SLI_REV4) ||
1707 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
1708 LPFC_SLI_INTF_IF_TYPE_2))
1709 return -EPERM;
1710
1711 /* Keep state if we need to restore back */
1712 before_fc_flag = phba->pport->fc_flag;
1713 sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
1714
1715 if (opcode == LPFC_FW_DUMP) {
1716 init_completion(&online_compl);
1717 phba->fw_dump_cmpl = &online_compl;
1718 } else {
1719 /* Disable SR-IOV virtual functions if enabled */
1720 if (phba->cfg_sriov_nr_virtfn) {
1721 pci_disable_sriov(pdev);
1722 phba->cfg_sriov_nr_virtfn = 0;
1723 }
1724
1725 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1726
1727 if (status != 0)
1728 return status;
1729
1730 /* wait for the device to be quiesced before firmware reset */
1731 msleep(100);
1732 }
1733
1734 reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
1735 LPFC_CTL_PDEV_CTL_OFFSET);
1736
1737 if (opcode == LPFC_FW_DUMP)
1738 reg_val |= LPFC_FW_DUMP_REQUEST;
1739 else if (opcode == LPFC_FW_RESET)
1740 reg_val |= LPFC_CTL_PDEV_CTL_FRST;
1741 else if (opcode == LPFC_DV_RESET)
1742 reg_val |= LPFC_CTL_PDEV_CTL_DRST;
1743
1744 writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
1745 LPFC_CTL_PDEV_CTL_OFFSET);
1746 /* flush */
1747 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
1748
1749 /* delay driver action following IF_TYPE_2 reset */
1750 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1751
1752 if (rc == -EPERM) {
1753 /* no privilege for reset */
1754 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1755 "3150 No privilege to perform the requested "
1756 "access: x%x\n", reg_val);
1757 } else if (rc == -EIO) {
1758 /* reset failed, there is nothing more we can do */
1759 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1760 "3153 Fail to perform the requested "
1761 "access: x%x\n", reg_val);
1762 if (phba->fw_dump_cmpl)
1763 phba->fw_dump_cmpl = NULL;
1764 return rc;
1765 }
1766
1767 /* keep the original port state */
1768 if (before_fc_flag & FC_OFFLINE_MODE) {
1769 if (phba->fw_dump_cmpl)
1770 phba->fw_dump_cmpl = NULL;
1771 goto out;
1772 }
1773
1774 /* Firmware dump will trigger an HA_ERATT event, and
1775 * lpfc_handle_eratt_s4 routine already handles bringing the port back
1776 * online.
1777 */
1778 if (opcode == LPFC_FW_DUMP) {
1779 wait_for_completion(phba->fw_dump_cmpl);
1780 } else {
1781 init_completion(&online_compl);
1782 job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
1783 LPFC_EVT_ONLINE);
1784 if (!job_posted)
1785 goto out;
1786
1787 wait_for_completion(&online_compl);
1788 }
1789 out:
1790 /* in any case, restore the virtual functions enabled as before */
1791 if (sriov_nr_virtfn) {
1792 /* If fw_dump was performed, first disable to clean up */
1793 if (opcode == LPFC_FW_DUMP) {
1794 pci_disable_sriov(pdev);
1795 phba->cfg_sriov_nr_virtfn = 0;
1796 }
1797
1798 sriov_err =
1799 lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
1800 if (!sriov_err)
1801 phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
1802 }
1803
1804 /* return proper error code */
1805 if (!rc) {
1806 if (!job_posted)
1807 rc = -ENOMEM;
1808 else if (status)
1809 rc = -EIO;
1810 }
1811 return rc;
1812 }
1813
1814 /**
1815 * lpfc_nport_evt_cnt_show - Return the number of nport events
1816 * @dev: class device that is converted into a Scsi_host.
1817 * @attr: device attribute, not used.
1818 * @buf: on return contains the ascii number of nport events.
1819 *
1820 * Returns: size of formatted string.
1821 **/
1822 static ssize_t
lpfc_nport_evt_cnt_show(struct device * dev,struct device_attribute * attr,char * buf)1823 lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
1824 char *buf)
1825 {
1826 struct Scsi_Host *shost = class_to_shost(dev);
1827 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1828 struct lpfc_hba *phba = vport->phba;
1829
1830 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
1831 }
1832
1833 static int
lpfc_set_trunking(struct lpfc_hba * phba,char * buff_out)1834 lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
1835 {
1836 LPFC_MBOXQ_t *mbox = NULL;
1837 unsigned long val = 0;
1838 char *pval = NULL;
1839 int rc = 0;
1840
1841 if (!strncmp("enable", buff_out,
1842 strlen("enable"))) {
1843 pval = buff_out + strlen("enable") + 1;
1844 rc = kstrtoul(pval, 0, &val);
1845 if (rc)
1846 return rc; /* Invalid number */
1847 } else if (!strncmp("disable", buff_out,
1848 strlen("disable"))) {
1849 val = 0;
1850 } else {
1851 return -EINVAL; /* Invalid command */
1852 }
1853
1854 switch (val) {
1855 case 0:
1856 val = 0x0; /* Disable */
1857 break;
1858 case 2:
1859 val = 0x1; /* Enable two port trunk */
1860 break;
1861 case 4:
1862 val = 0x2; /* Enable four port trunk */
1863 break;
1864 default:
1865 return -EINVAL;
1866 }
1867
1868 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1869 "0070 Set trunk mode with val %ld ", val);
1870
1871 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1872 if (!mbox)
1873 return -ENOMEM;
1874
1875 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1876 LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE,
1877 12, LPFC_SLI4_MBX_EMBED);
1878
1879 bf_set(lpfc_mbx_set_trunk_mode,
1880 &mbox->u.mqe.un.set_trunk_mode,
1881 val);
1882 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
1883 if (rc)
1884 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1885 "0071 Set trunk mode failed with status: %d",
1886 rc);
1887 mempool_free(mbox, phba->mbox_mem_pool);
1888
1889 return 0;
1890 }
1891
1892 /**
1893 * lpfc_board_mode_show - Return the state of the board
1894 * @dev: class device that is converted into a Scsi_host.
1895 * @attr: device attribute, not used.
1896 * @buf: on return contains the state of the adapter.
1897 *
1898 * Returns: size of formatted string.
1899 **/
1900 static ssize_t
lpfc_board_mode_show(struct device * dev,struct device_attribute * attr,char * buf)1901 lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
1902 char *buf)
1903 {
1904 struct Scsi_Host *shost = class_to_shost(dev);
1905 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1906 struct lpfc_hba *phba = vport->phba;
1907 char * state;
1908
1909 if (phba->link_state == LPFC_HBA_ERROR)
1910 state = "error";
1911 else if (phba->link_state == LPFC_WARM_START)
1912 state = "warm start";
1913 else if (phba->link_state == LPFC_INIT_START)
1914 state = "offline";
1915 else
1916 state = "online";
1917
1918 return scnprintf(buf, PAGE_SIZE, "%s\n", state);
1919 }
1920
1921 /**
1922 * lpfc_board_mode_store - Puts the hba in online, offline, warm or error state
1923 * @dev: class device that is converted into a Scsi_host.
1924 * @attr: device attribute, not used.
1925 * @buf: containing one of the strings "online", "offline", "warm" or "error".
1926 * @count: unused variable.
1927 *
1928 * Returns:
1929 * -EACCES if enable hba reset not enabled
1930 * -EINVAL if the buffer does not contain a valid string (see above)
1931 * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails
1932 * buf length greater than zero indicates success
1933 **/
1934 static ssize_t
lpfc_board_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1935 lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
1936 const char *buf, size_t count)
1937 {
1938 struct Scsi_Host *shost = class_to_shost(dev);
1939 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1940 struct lpfc_hba *phba = vport->phba;
1941 struct completion online_compl;
1942 char *board_mode_str = NULL;
1943 int status = 0;
1944 int rc;
1945
1946 if (!phba->cfg_enable_hba_reset) {
1947 status = -EACCES;
1948 goto board_mode_out;
1949 }
1950
1951 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1952 "3050 lpfc_board_mode set to %s\n", buf);
1953
1954 init_completion(&online_compl);
1955
1956 if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
1957 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1958 LPFC_EVT_ONLINE);
1959 if (rc == 0) {
1960 status = -ENOMEM;
1961 goto board_mode_out;
1962 }
1963 wait_for_completion(&online_compl);
1964 if (status)
1965 status = -EIO;
1966 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
1967 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1968 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
1969 if (phba->sli_rev == LPFC_SLI_REV4)
1970 status = -EINVAL;
1971 else
1972 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
1973 else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
1974 if (phba->sli_rev == LPFC_SLI_REV4)
1975 status = -EINVAL;
1976 else
1977 status = lpfc_do_offline(phba, LPFC_EVT_KILL);
1978 else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
1979 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
1980 else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
1981 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
1982 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
1983 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
1984 else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1)
1985 == 0)
1986 status = lpfc_reset_pci_bus(phba);
1987 else if (strncmp(buf, "heartbeat", sizeof("heartbeat") - 1) == 0)
1988 lpfc_issue_hb_tmo(phba);
1989 else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0)
1990 status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk"));
1991 else
1992 status = -EINVAL;
1993
1994 board_mode_out:
1995 if (!status)
1996 return strlen(buf);
1997 else {
1998 board_mode_str = strchr(buf, '\n');
1999 if (board_mode_str)
2000 *board_mode_str = '\0';
2001 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2002 "3097 Failed \"%s\", status(%d), "
2003 "fc_flag(x%x)\n",
2004 buf, status, phba->pport->fc_flag);
2005 return status;
2006 }
2007 }
2008
2009 /**
2010 * lpfc_get_hba_info - Return various bits of informaton about the adapter
2011 * @phba: pointer to the adapter structure.
2012 * @mxri: max xri count.
2013 * @axri: available xri count.
2014 * @mrpi: max rpi count.
2015 * @arpi: available rpi count.
2016 * @mvpi: max vpi count.
2017 * @avpi: available vpi count.
2018 *
2019 * Description:
2020 * If an integer pointer for an count is not null then the value for the
2021 * count is returned.
2022 *
2023 * Returns:
2024 * zero on error
2025 * one for success
2026 **/
2027 static int
lpfc_get_hba_info(struct lpfc_hba * phba,uint32_t * mxri,uint32_t * axri,uint32_t * mrpi,uint32_t * arpi,uint32_t * mvpi,uint32_t * avpi)2028 lpfc_get_hba_info(struct lpfc_hba *phba,
2029 uint32_t *mxri, uint32_t *axri,
2030 uint32_t *mrpi, uint32_t *arpi,
2031 uint32_t *mvpi, uint32_t *avpi)
2032 {
2033 struct lpfc_mbx_read_config *rd_config;
2034 LPFC_MBOXQ_t *pmboxq;
2035 MAILBOX_t *pmb;
2036 int rc = 0;
2037 uint32_t max_vpi;
2038
2039 /*
2040 * prevent udev from issuing mailbox commands until the port is
2041 * configured.
2042 */
2043 if (phba->link_state < LPFC_LINK_DOWN ||
2044 !phba->mbox_mem_pool ||
2045 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
2046 return 0;
2047
2048 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
2049 return 0;
2050
2051 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2052 if (!pmboxq)
2053 return 0;
2054 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
2055
2056 pmb = &pmboxq->u.mb;
2057 pmb->mbxCommand = MBX_READ_CONFIG;
2058 pmb->mbxOwner = OWN_HOST;
2059 pmboxq->ctx_buf = NULL;
2060
2061 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2062 rc = MBX_NOT_FINISHED;
2063 else
2064 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
2065
2066 if (rc != MBX_SUCCESS) {
2067 if (rc != MBX_TIMEOUT)
2068 mempool_free(pmboxq, phba->mbox_mem_pool);
2069 return 0;
2070 }
2071
2072 if (phba->sli_rev == LPFC_SLI_REV4) {
2073 rd_config = &pmboxq->u.mqe.un.rd_config;
2074 if (mrpi)
2075 *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
2076 if (arpi)
2077 *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
2078 phba->sli4_hba.max_cfg_param.rpi_used;
2079 if (mxri)
2080 *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
2081 if (axri)
2082 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
2083 phba->sli4_hba.max_cfg_param.xri_used;
2084
2085 /* Account for differences with SLI-3. Get vpi count from
2086 * mailbox data and subtract one for max vpi value.
2087 */
2088 max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
2089 (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
2090
2091 /* Limit the max we support */
2092 if (max_vpi > LPFC_MAX_VPI)
2093 max_vpi = LPFC_MAX_VPI;
2094 if (mvpi)
2095 *mvpi = max_vpi;
2096 if (avpi)
2097 *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
2098 } else {
2099 if (mrpi)
2100 *mrpi = pmb->un.varRdConfig.max_rpi;
2101 if (arpi)
2102 *arpi = pmb->un.varRdConfig.avail_rpi;
2103 if (mxri)
2104 *mxri = pmb->un.varRdConfig.max_xri;
2105 if (axri)
2106 *axri = pmb->un.varRdConfig.avail_xri;
2107 if (mvpi)
2108 *mvpi = pmb->un.varRdConfig.max_vpi;
2109 if (avpi) {
2110 /* avail_vpi is only valid if link is up and ready */
2111 if (phba->link_state == LPFC_HBA_READY)
2112 *avpi = pmb->un.varRdConfig.avail_vpi;
2113 else
2114 *avpi = pmb->un.varRdConfig.max_vpi;
2115 }
2116 }
2117
2118 mempool_free(pmboxq, phba->mbox_mem_pool);
2119 return 1;
2120 }
2121
2122 /**
2123 * lpfc_max_rpi_show - Return maximum rpi
2124 * @dev: class device that is converted into a Scsi_host.
2125 * @attr: device attribute, not used.
2126 * @buf: on return contains the maximum rpi count in decimal or "Unknown".
2127 *
2128 * Description:
2129 * Calls lpfc_get_hba_info() asking for just the mrpi count.
2130 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2131 * to "Unknown" and the buffer length is returned, therefore the caller
2132 * must check for "Unknown" in the buffer to detect a failure.
2133 *
2134 * Returns: size of formatted string.
2135 **/
2136 static ssize_t
lpfc_max_rpi_show(struct device * dev,struct device_attribute * attr,char * buf)2137 lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
2138 char *buf)
2139 {
2140 struct Scsi_Host *shost = class_to_shost(dev);
2141 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2142 struct lpfc_hba *phba = vport->phba;
2143 uint32_t cnt;
2144
2145 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
2146 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2147 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2148 }
2149
2150 /**
2151 * lpfc_used_rpi_show - Return maximum rpi minus available rpi
2152 * @dev: class device that is converted into a Scsi_host.
2153 * @attr: device attribute, not used.
2154 * @buf: containing the used rpi count in decimal or "Unknown".
2155 *
2156 * Description:
2157 * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts.
2158 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2159 * to "Unknown" and the buffer length is returned, therefore the caller
2160 * must check for "Unknown" in the buffer to detect a failure.
2161 *
2162 * Returns: size of formatted string.
2163 **/
2164 static ssize_t
lpfc_used_rpi_show(struct device * dev,struct device_attribute * attr,char * buf)2165 lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
2166 char *buf)
2167 {
2168 struct Scsi_Host *shost = class_to_shost(dev);
2169 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2170 struct lpfc_hba *phba = vport->phba;
2171 uint32_t cnt, acnt;
2172
2173 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
2174 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2175 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2176 }
2177
2178 /**
2179 * lpfc_max_xri_show - Return maximum xri
2180 * @dev: class device that is converted into a Scsi_host.
2181 * @attr: device attribute, not used.
2182 * @buf: on return contains the maximum xri count in decimal or "Unknown".
2183 *
2184 * Description:
2185 * Calls lpfc_get_hba_info() asking for just the mrpi count.
2186 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2187 * to "Unknown" and the buffer length is returned, therefore the caller
2188 * must check for "Unknown" in the buffer to detect a failure.
2189 *
2190 * Returns: size of formatted string.
2191 **/
2192 static ssize_t
lpfc_max_xri_show(struct device * dev,struct device_attribute * attr,char * buf)2193 lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
2194 char *buf)
2195 {
2196 struct Scsi_Host *shost = class_to_shost(dev);
2197 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2198 struct lpfc_hba *phba = vport->phba;
2199 uint32_t cnt;
2200
2201 if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
2202 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2203 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2204 }
2205
2206 /**
2207 * lpfc_used_xri_show - Return maximum xpi minus the available xpi
2208 * @dev: class device that is converted into a Scsi_host.
2209 * @attr: device attribute, not used.
2210 * @buf: on return contains the used xri count in decimal or "Unknown".
2211 *
2212 * Description:
2213 * Calls lpfc_get_hba_info() asking for just the mxri and axri counts.
2214 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2215 * to "Unknown" and the buffer length is returned, therefore the caller
2216 * must check for "Unknown" in the buffer to detect a failure.
2217 *
2218 * Returns: size of formatted string.
2219 **/
2220 static ssize_t
lpfc_used_xri_show(struct device * dev,struct device_attribute * attr,char * buf)2221 lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
2222 char *buf)
2223 {
2224 struct Scsi_Host *shost = class_to_shost(dev);
2225 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2226 struct lpfc_hba *phba = vport->phba;
2227 uint32_t cnt, acnt;
2228
2229 if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
2230 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2231 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2232 }
2233
2234 /**
2235 * lpfc_max_vpi_show - Return maximum vpi
2236 * @dev: class device that is converted into a Scsi_host.
2237 * @attr: device attribute, not used.
2238 * @buf: on return contains the maximum vpi count in decimal or "Unknown".
2239 *
2240 * Description:
2241 * Calls lpfc_get_hba_info() asking for just the mvpi count.
2242 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2243 * to "Unknown" and the buffer length is returned, therefore the caller
2244 * must check for "Unknown" in the buffer to detect a failure.
2245 *
2246 * Returns: size of formatted string.
2247 **/
2248 static ssize_t
lpfc_max_vpi_show(struct device * dev,struct device_attribute * attr,char * buf)2249 lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
2250 char *buf)
2251 {
2252 struct Scsi_Host *shost = class_to_shost(dev);
2253 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2254 struct lpfc_hba *phba = vport->phba;
2255 uint32_t cnt;
2256
2257 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
2258 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2259 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2260 }
2261
2262 /**
2263 * lpfc_used_vpi_show - Return maximum vpi minus the available vpi
2264 * @dev: class device that is converted into a Scsi_host.
2265 * @attr: device attribute, not used.
2266 * @buf: on return contains the used vpi count in decimal or "Unknown".
2267 *
2268 * Description:
2269 * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts.
2270 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2271 * to "Unknown" and the buffer length is returned, therefore the caller
2272 * must check for "Unknown" in the buffer to detect a failure.
2273 *
2274 * Returns: size of formatted string.
2275 **/
2276 static ssize_t
lpfc_used_vpi_show(struct device * dev,struct device_attribute * attr,char * buf)2277 lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
2278 char *buf)
2279 {
2280 struct Scsi_Host *shost = class_to_shost(dev);
2281 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2282 struct lpfc_hba *phba = vport->phba;
2283 uint32_t cnt, acnt;
2284
2285 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
2286 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2287 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2288 }
2289
2290 /**
2291 * lpfc_npiv_info_show - Return text about NPIV support for the adapter
2292 * @dev: class device that is converted into a Scsi_host.
2293 * @attr: device attribute, not used.
2294 * @buf: text that must be interpreted to determine if npiv is supported.
2295 *
2296 * Description:
2297 * Buffer will contain text indicating npiv is not suppoerted on the port,
2298 * the port is an NPIV physical port, or it is an npiv virtual port with
2299 * the id of the vport.
2300 *
2301 * Returns: size of formatted string.
2302 **/
2303 static ssize_t
lpfc_npiv_info_show(struct device * dev,struct device_attribute * attr,char * buf)2304 lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
2305 char *buf)
2306 {
2307 struct Scsi_Host *shost = class_to_shost(dev);
2308 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2309 struct lpfc_hba *phba = vport->phba;
2310
2311 if (!(phba->max_vpi))
2312 return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
2313 if (vport->port_type == LPFC_PHYSICAL_PORT)
2314 return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n");
2315 return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
2316 }
2317
2318 /**
2319 * lpfc_poll_show - Return text about poll support for the adapter
2320 * @dev: class device that is converted into a Scsi_host.
2321 * @attr: device attribute, not used.
2322 * @buf: on return contains the cfg_poll in hex.
2323 *
2324 * Notes:
2325 * cfg_poll should be a lpfc_polling_flags type.
2326 *
2327 * Returns: size of formatted string.
2328 **/
2329 static ssize_t
lpfc_poll_show(struct device * dev,struct device_attribute * attr,char * buf)2330 lpfc_poll_show(struct device *dev, struct device_attribute *attr,
2331 char *buf)
2332 {
2333 struct Scsi_Host *shost = class_to_shost(dev);
2334 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2335 struct lpfc_hba *phba = vport->phba;
2336
2337 return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
2338 }
2339
2340 /**
2341 * lpfc_poll_store - Set the value of cfg_poll for the adapter
2342 * @dev: class device that is converted into a Scsi_host.
2343 * @attr: device attribute, not used.
2344 * @buf: one or more lpfc_polling_flags values.
2345 * @count: not used.
2346 *
2347 * Notes:
2348 * buf contents converted to integer and checked for a valid value.
2349 *
2350 * Returns:
2351 * -EINVAL if the buffer connot be converted or is out of range
2352 * length of the buf on success
2353 **/
2354 static ssize_t
lpfc_poll_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2355 lpfc_poll_store(struct device *dev, struct device_attribute *attr,
2356 const char *buf, size_t count)
2357 {
2358 struct Scsi_Host *shost = class_to_shost(dev);
2359 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2360 struct lpfc_hba *phba = vport->phba;
2361 uint32_t creg_val;
2362 uint32_t old_val;
2363 int val=0;
2364
2365 if (!isdigit(buf[0]))
2366 return -EINVAL;
2367
2368 if (sscanf(buf, "%i", &val) != 1)
2369 return -EINVAL;
2370
2371 if ((val & 0x3) != val)
2372 return -EINVAL;
2373
2374 if (phba->sli_rev == LPFC_SLI_REV4)
2375 val = 0;
2376
2377 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2378 "3051 lpfc_poll changed from %d to %d\n",
2379 phba->cfg_poll, val);
2380
2381 spin_lock_irq(&phba->hbalock);
2382
2383 old_val = phba->cfg_poll;
2384
2385 if (val & ENABLE_FCP_RING_POLLING) {
2386 if ((val & DISABLE_FCP_RING_INT) &&
2387 !(old_val & DISABLE_FCP_RING_INT)) {
2388 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2389 spin_unlock_irq(&phba->hbalock);
2390 return -EINVAL;
2391 }
2392 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
2393 writel(creg_val, phba->HCregaddr);
2394 readl(phba->HCregaddr); /* flush */
2395
2396 lpfc_poll_start_timer(phba);
2397 }
2398 } else if (val != 0x0) {
2399 spin_unlock_irq(&phba->hbalock);
2400 return -EINVAL;
2401 }
2402
2403 if (!(val & DISABLE_FCP_RING_INT) &&
2404 (old_val & DISABLE_FCP_RING_INT))
2405 {
2406 spin_unlock_irq(&phba->hbalock);
2407 del_timer(&phba->fcp_poll_timer);
2408 spin_lock_irq(&phba->hbalock);
2409 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2410 spin_unlock_irq(&phba->hbalock);
2411 return -EINVAL;
2412 }
2413 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
2414 writel(creg_val, phba->HCregaddr);
2415 readl(phba->HCregaddr); /* flush */
2416 }
2417
2418 phba->cfg_poll = val;
2419
2420 spin_unlock_irq(&phba->hbalock);
2421
2422 return strlen(buf);
2423 }
2424
2425 /**
2426 * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
2427 * @dev: class converted to a Scsi_host structure.
2428 * @attr: device attribute, not used.
2429 * @buf: on return contains the formatted support level.
2430 *
2431 * Description:
2432 * Returns the maximum number of virtual functions a physical function can
2433 * support, 0 will be returned if called on virtual function.
2434 *
2435 * Returns: size of formatted string.
2436 **/
2437 static ssize_t
lpfc_sriov_hw_max_virtfn_show(struct device * dev,struct device_attribute * attr,char * buf)2438 lpfc_sriov_hw_max_virtfn_show(struct device *dev,
2439 struct device_attribute *attr,
2440 char *buf)
2441 {
2442 struct Scsi_Host *shost = class_to_shost(dev);
2443 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2444 struct lpfc_hba *phba = vport->phba;
2445 uint16_t max_nr_virtfn;
2446
2447 max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
2448 return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
2449 }
2450
2451 /**
2452 * lpfc_enable_bbcr_set: Sets an attribute value.
2453 * @phba: pointer the the adapter structure.
2454 * @val: integer attribute value.
2455 *
2456 * Description:
2457 * Validates the min and max values then sets the
2458 * adapter config field if in the valid range. prints error message
2459 * and does not set the parameter if invalid.
2460 *
2461 * Returns:
2462 * zero on success
2463 * -EINVAL if val is invalid
2464 */
2465 static ssize_t
lpfc_enable_bbcr_set(struct lpfc_hba * phba,uint val)2466 lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
2467 {
2468 if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) {
2469 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2470 "3068 lpfc_enable_bbcr changed from %d to "
2471 "%d\n", phba->cfg_enable_bbcr, val);
2472 phba->cfg_enable_bbcr = val;
2473 return 0;
2474 }
2475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2476 "0451 lpfc_enable_bbcr cannot set to %d, range is 0, "
2477 "1\n", val);
2478 return -EINVAL;
2479 }
2480
2481 /*
2482 * lpfc_param_show - Return a cfg attribute value in decimal
2483 *
2484 * Description:
2485 * Macro that given an attr e.g. hba_queue_depth expands
2486 * into a function with the name lpfc_hba_queue_depth_show.
2487 *
2488 * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field.
2489 * @dev: class device that is converted into a Scsi_host.
2490 * @attr: device attribute, not used.
2491 * @buf: on return contains the attribute value in decimal.
2492 *
2493 * Returns: size of formatted string.
2494 **/
2495 #define lpfc_param_show(attr) \
2496 static ssize_t \
2497 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2498 char *buf) \
2499 { \
2500 struct Scsi_Host *shost = class_to_shost(dev);\
2501 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2502 struct lpfc_hba *phba = vport->phba;\
2503 return scnprintf(buf, PAGE_SIZE, "%d\n",\
2504 phba->cfg_##attr);\
2505 }
2506
2507 /*
2508 * lpfc_param_hex_show - Return a cfg attribute value in hex
2509 *
2510 * Description:
2511 * Macro that given an attr e.g. hba_queue_depth expands
2512 * into a function with the name lpfc_hba_queue_depth_show
2513 *
2514 * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field.
2515 * @dev: class device that is converted into a Scsi_host.
2516 * @attr: device attribute, not used.
2517 * @buf: on return contains the attribute value in hexadecimal.
2518 *
2519 * Returns: size of formatted string.
2520 **/
2521 #define lpfc_param_hex_show(attr) \
2522 static ssize_t \
2523 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2524 char *buf) \
2525 { \
2526 struct Scsi_Host *shost = class_to_shost(dev);\
2527 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2528 struct lpfc_hba *phba = vport->phba;\
2529 uint val = 0;\
2530 val = phba->cfg_##attr;\
2531 return scnprintf(buf, PAGE_SIZE, "%#x\n",\
2532 phba->cfg_##attr);\
2533 }
2534
2535 /*
2536 * lpfc_param_init - Initializes a cfg attribute
2537 *
2538 * Description:
2539 * Macro that given an attr e.g. hba_queue_depth expands
2540 * into a function with the name lpfc_hba_queue_depth_init. The macro also
2541 * takes a default argument, a minimum and maximum argument.
2542 *
2543 * lpfc_##attr##_init: Initializes an attribute.
2544 * @phba: pointer the the adapter structure.
2545 * @val: integer attribute value.
2546 *
2547 * Validates the min and max values then sets the adapter config field
2548 * accordingly, or uses the default if out of range and prints an error message.
2549 *
2550 * Returns:
2551 * zero on success
2552 * -EINVAL if default used
2553 **/
2554 #define lpfc_param_init(attr, default, minval, maxval) \
2555 static int \
2556 lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
2557 { \
2558 if (lpfc_rangecheck(val, minval, maxval)) {\
2559 phba->cfg_##attr = val;\
2560 return 0;\
2561 }\
2562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2563 "0449 lpfc_"#attr" attribute cannot be set to %d, "\
2564 "allowed range is ["#minval", "#maxval"]\n", val); \
2565 phba->cfg_##attr = default;\
2566 return -EINVAL;\
2567 }
2568
2569 /*
2570 * lpfc_param_set - Set a cfg attribute value
2571 *
2572 * Description:
2573 * Macro that given an attr e.g. hba_queue_depth expands
2574 * into a function with the name lpfc_hba_queue_depth_set
2575 *
2576 * lpfc_##attr##_set: Sets an attribute value.
2577 * @phba: pointer the the adapter structure.
2578 * @val: integer attribute value.
2579 *
2580 * Description:
2581 * Validates the min and max values then sets the
2582 * adapter config field if in the valid range. prints error message
2583 * and does not set the parameter if invalid.
2584 *
2585 * Returns:
2586 * zero on success
2587 * -EINVAL if val is invalid
2588 **/
2589 #define lpfc_param_set(attr, default, minval, maxval) \
2590 static int \
2591 lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
2592 { \
2593 if (lpfc_rangecheck(val, minval, maxval)) {\
2594 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2595 "3052 lpfc_" #attr " changed from %d to %d\n", \
2596 phba->cfg_##attr, val); \
2597 phba->cfg_##attr = val;\
2598 return 0;\
2599 }\
2600 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2601 "0450 lpfc_"#attr" attribute cannot be set to %d, "\
2602 "allowed range is ["#minval", "#maxval"]\n", val); \
2603 return -EINVAL;\
2604 }
2605
2606 /*
2607 * lpfc_param_store - Set a vport attribute value
2608 *
2609 * Description:
2610 * Macro that given an attr e.g. hba_queue_depth expands
2611 * into a function with the name lpfc_hba_queue_depth_store.
2612 *
2613 * lpfc_##attr##_store: Set an sttribute value.
2614 * @dev: class device that is converted into a Scsi_host.
2615 * @attr: device attribute, not used.
2616 * @buf: contains the attribute value in ascii.
2617 * @count: not used.
2618 *
2619 * Description:
2620 * Convert the ascii text number to an integer, then
2621 * use the lpfc_##attr##_set function to set the value.
2622 *
2623 * Returns:
2624 * -EINVAL if val is invalid or lpfc_##attr##_set() fails
2625 * length of buffer upon success.
2626 **/
2627 #define lpfc_param_store(attr) \
2628 static ssize_t \
2629 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2630 const char *buf, size_t count) \
2631 { \
2632 struct Scsi_Host *shost = class_to_shost(dev);\
2633 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2634 struct lpfc_hba *phba = vport->phba;\
2635 uint val = 0;\
2636 if (!isdigit(buf[0]))\
2637 return -EINVAL;\
2638 if (sscanf(buf, "%i", &val) != 1)\
2639 return -EINVAL;\
2640 if (lpfc_##attr##_set(phba, val) == 0) \
2641 return strlen(buf);\
2642 else \
2643 return -EINVAL;\
2644 }
2645
2646 /*
2647 * lpfc_vport_param_show - Return decimal formatted cfg attribute value
2648 *
2649 * Description:
2650 * Macro that given an attr e.g. hba_queue_depth expands
2651 * into a function with the name lpfc_hba_queue_depth_show
2652 *
2653 * lpfc_##attr##_show: prints the attribute value in decimal.
2654 * @dev: class device that is converted into a Scsi_host.
2655 * @attr: device attribute, not used.
2656 * @buf: on return contains the attribute value in decimal.
2657 *
2658 * Returns: length of formatted string.
2659 **/
2660 #define lpfc_vport_param_show(attr) \
2661 static ssize_t \
2662 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2663 char *buf) \
2664 { \
2665 struct Scsi_Host *shost = class_to_shost(dev);\
2666 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2667 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
2668 }
2669
2670 /*
2671 * lpfc_vport_param_hex_show - Return hex formatted attribute value
2672 *
2673 * Description:
2674 * Macro that given an attr e.g.
2675 * hba_queue_depth expands into a function with the name
2676 * lpfc_hba_queue_depth_show
2677 *
2678 * lpfc_##attr##_show: prints the attribute value in hexadecimal.
2679 * @dev: class device that is converted into a Scsi_host.
2680 * @attr: device attribute, not used.
2681 * @buf: on return contains the attribute value in hexadecimal.
2682 *
2683 * Returns: length of formatted string.
2684 **/
2685 #define lpfc_vport_param_hex_show(attr) \
2686 static ssize_t \
2687 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2688 char *buf) \
2689 { \
2690 struct Scsi_Host *shost = class_to_shost(dev);\
2691 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2692 return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
2693 }
2694
2695 /*
2696 * lpfc_vport_param_init - Initialize a vport cfg attribute
2697 *
2698 * Description:
2699 * Macro that given an attr e.g. hba_queue_depth expands
2700 * into a function with the name lpfc_hba_queue_depth_init. The macro also
2701 * takes a default argument, a minimum and maximum argument.
2702 *
2703 * lpfc_##attr##_init: validates the min and max values then sets the
2704 * adapter config field accordingly, or uses the default if out of range
2705 * and prints an error message.
2706 * @phba: pointer the the adapter structure.
2707 * @val: integer attribute value.
2708 *
2709 * Returns:
2710 * zero on success
2711 * -EINVAL if default used
2712 **/
2713 #define lpfc_vport_param_init(attr, default, minval, maxval) \
2714 static int \
2715 lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
2716 { \
2717 if (lpfc_rangecheck(val, minval, maxval)) {\
2718 vport->cfg_##attr = val;\
2719 return 0;\
2720 }\
2721 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2722 "0423 lpfc_"#attr" attribute cannot be set to %d, "\
2723 "allowed range is ["#minval", "#maxval"]\n", val); \
2724 vport->cfg_##attr = default;\
2725 return -EINVAL;\
2726 }
2727
2728 /*
2729 * lpfc_vport_param_set - Set a vport cfg attribute
2730 *
2731 * Description:
2732 * Macro that given an attr e.g. hba_queue_depth expands
2733 * into a function with the name lpfc_hba_queue_depth_set
2734 *
2735 * lpfc_##attr##_set: validates the min and max values then sets the
2736 * adapter config field if in the valid range. prints error message
2737 * and does not set the parameter if invalid.
2738 * @phba: pointer the the adapter structure.
2739 * @val: integer attribute value.
2740 *
2741 * Returns:
2742 * zero on success
2743 * -EINVAL if val is invalid
2744 **/
2745 #define lpfc_vport_param_set(attr, default, minval, maxval) \
2746 static int \
2747 lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
2748 { \
2749 if (lpfc_rangecheck(val, minval, maxval)) {\
2750 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2751 "3053 lpfc_" #attr \
2752 " changed from %d (x%x) to %d (x%x)\n", \
2753 vport->cfg_##attr, vport->cfg_##attr, \
2754 val, val); \
2755 vport->cfg_##attr = val;\
2756 return 0;\
2757 }\
2758 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2759 "0424 lpfc_"#attr" attribute cannot be set to %d, "\
2760 "allowed range is ["#minval", "#maxval"]\n", val); \
2761 return -EINVAL;\
2762 }
2763
2764 /*
2765 * lpfc_vport_param_store - Set a vport attribute
2766 *
2767 * Description:
2768 * Macro that given an attr e.g. hba_queue_depth
2769 * expands into a function with the name lpfc_hba_queue_depth_store
2770 *
2771 * lpfc_##attr##_store: convert the ascii text number to an integer, then
2772 * use the lpfc_##attr##_set function to set the value.
2773 * @cdev: class device that is converted into a Scsi_host.
2774 * @buf: contains the attribute value in decimal.
2775 * @count: not used.
2776 *
2777 * Returns:
2778 * -EINVAL if val is invalid or lpfc_##attr##_set() fails
2779 * length of buffer upon success.
2780 **/
2781 #define lpfc_vport_param_store(attr) \
2782 static ssize_t \
2783 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2784 const char *buf, size_t count) \
2785 { \
2786 struct Scsi_Host *shost = class_to_shost(dev);\
2787 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2788 uint val = 0;\
2789 if (!isdigit(buf[0]))\
2790 return -EINVAL;\
2791 if (sscanf(buf, "%i", &val) != 1)\
2792 return -EINVAL;\
2793 if (lpfc_##attr##_set(vport, val) == 0) \
2794 return strlen(buf);\
2795 else \
2796 return -EINVAL;\
2797 }
2798
2799
2800 static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL);
2801 static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL);
2802 static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
2803 static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
2804 static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
2805 static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
2806 static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
2807 static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
2808 static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
2809 static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
2810 static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
2811 static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
2812 static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
2813 static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
2814 static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
2815 lpfc_link_state_store);
2816 static DEVICE_ATTR(option_rom_version, S_IRUGO,
2817 lpfc_option_rom_version_show, NULL);
2818 static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
2819 lpfc_num_discovered_ports_show, NULL);
2820 static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
2821 static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
2822 static DEVICE_ATTR_RO(lpfc_drvr_version);
2823 static DEVICE_ATTR_RO(lpfc_enable_fip);
2824 static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
2825 lpfc_board_mode_show, lpfc_board_mode_store);
2826 static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
2827 static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
2828 static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
2829 static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
2830 static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
2831 static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
2832 static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
2833 static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
2834 static DEVICE_ATTR_RO(lpfc_temp_sensor);
2835 static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
2836 static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
2837 static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
2838 NULL);
2839 static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL);
2840
2841 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
2842 #define WWN_SZ 8
2843 /**
2844 * lpfc_wwn_set - Convert string to the 8 byte WWN value.
2845 * @buf: WWN string.
2846 * @cnt: Length of string.
2847 * @wwn: Array to receive converted wwn value.
2848 *
2849 * Returns:
2850 * -EINVAL if the buffer does not contain a valid wwn
2851 * 0 success
2852 **/
2853 static size_t
lpfc_wwn_set(const char * buf,size_t cnt,char wwn[])2854 lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
2855 {
2856 unsigned int i, j;
2857
2858 /* Count may include a LF at end of string */
2859 if (buf[cnt-1] == '\n')
2860 cnt--;
2861
2862 if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
2863 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
2864 return -EINVAL;
2865
2866 memset(wwn, 0, WWN_SZ);
2867
2868 /* Validate and store the new name */
2869 for (i = 0, j = 0; i < 16; i++) {
2870 if ((*buf >= 'a') && (*buf <= 'f'))
2871 j = ((j << 4) | ((*buf++ - 'a') + 10));
2872 else if ((*buf >= 'A') && (*buf <= 'F'))
2873 j = ((j << 4) | ((*buf++ - 'A') + 10));
2874 else if ((*buf >= '0') && (*buf <= '9'))
2875 j = ((j << 4) | (*buf++ - '0'));
2876 else
2877 return -EINVAL;
2878 if (i % 2) {
2879 wwn[i/2] = j & 0xff;
2880 j = 0;
2881 }
2882 }
2883 return 0;
2884 }
2885 /**
2886 * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
2887 * @dev: class device that is converted into a Scsi_host.
2888 * @attr: device attribute, not used.
2889 * @buf: containing the string lpfc_soft_wwn_key.
2890 * @count: must be size of lpfc_soft_wwn_key.
2891 *
2892 * Returns:
2893 * -EINVAL if the buffer does not contain lpfc_soft_wwn_key
2894 * length of buf indicates success
2895 **/
2896 static ssize_t
lpfc_soft_wwn_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2897 lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
2898 const char *buf, size_t count)
2899 {
2900 struct Scsi_Host *shost = class_to_shost(dev);
2901 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2902 struct lpfc_hba *phba = vport->phba;
2903 unsigned int cnt = count;
2904 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
2905 u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0];
2906
2907 /*
2908 * We're doing a simple sanity check for soft_wwpn setting.
2909 * We require that the user write a specific key to enable
2910 * the soft_wwpn attribute to be settable. Once the attribute
2911 * is written, the enable key resets. If further updates are
2912 * desired, the key must be written again to re-enable the
2913 * attribute.
2914 *
2915 * The "key" is not secret - it is a hardcoded string shown
2916 * here. The intent is to protect against the random user or
2917 * application that is just writing attributes.
2918 */
2919 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) {
2920 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2921 "0051 lpfc soft wwpn can not be enabled: "
2922 "fawwpn is enabled\n");
2923 return -EINVAL;
2924 }
2925
2926 /* count may include a LF at end of string */
2927 if (buf[cnt-1] == '\n')
2928 cnt--;
2929
2930 if ((cnt != strlen(lpfc_soft_wwn_key)) ||
2931 (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
2932 return -EINVAL;
2933
2934 phba->soft_wwn_enable = 1;
2935
2936 dev_printk(KERN_WARNING, &phba->pcidev->dev,
2937 "lpfc%d: soft_wwpn assignment has been enabled.\n",
2938 phba->brd_no);
2939 dev_printk(KERN_WARNING, &phba->pcidev->dev,
2940 " The soft_wwpn feature is not supported by Broadcom.");
2941
2942 return count;
2943 }
2944 static DEVICE_ATTR_WO(lpfc_soft_wwn_enable);
2945
2946 /**
2947 * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
2948 * @dev: class device that is converted into a Scsi_host.
2949 * @attr: device attribute, not used.
2950 * @buf: on return contains the wwpn in hexadecimal.
2951 *
2952 * Returns: size of formatted string.
2953 **/
2954 static ssize_t
lpfc_soft_wwpn_show(struct device * dev,struct device_attribute * attr,char * buf)2955 lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
2956 char *buf)
2957 {
2958 struct Scsi_Host *shost = class_to_shost(dev);
2959 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2960 struct lpfc_hba *phba = vport->phba;
2961
2962 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2963 (unsigned long long)phba->cfg_soft_wwpn);
2964 }
2965
2966 /**
2967 * lpfc_soft_wwpn_store - Set the ww port name of the adapter
2968 * @dev: class device that is converted into a Scsi_host.
2969 * @attr: device attribute, not used.
2970 * @buf: contains the wwpn in hexadecimal.
2971 * @count: number of wwpn bytes in buf
2972 *
2973 * Returns:
2974 * -EACCES hba reset not enabled, adapter over temp
2975 * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
2976 * -EIO error taking adapter offline or online
2977 * value of count on success
2978 **/
2979 static ssize_t
lpfc_soft_wwpn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2980 lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
2981 const char *buf, size_t count)
2982 {
2983 struct Scsi_Host *shost = class_to_shost(dev);
2984 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2985 struct lpfc_hba *phba = vport->phba;
2986 struct completion online_compl;
2987 int stat1 = 0, stat2 = 0;
2988 unsigned int cnt = count;
2989 u8 wwpn[WWN_SZ];
2990 int rc;
2991
2992 if (!phba->cfg_enable_hba_reset)
2993 return -EACCES;
2994 spin_lock_irq(&phba->hbalock);
2995 if (phba->over_temp_state == HBA_OVER_TEMP) {
2996 spin_unlock_irq(&phba->hbalock);
2997 return -EACCES;
2998 }
2999 spin_unlock_irq(&phba->hbalock);
3000 /* count may include a LF at end of string */
3001 if (buf[cnt-1] == '\n')
3002 cnt--;
3003
3004 if (!phba->soft_wwn_enable)
3005 return -EINVAL;
3006
3007 /* lock setting wwpn, wwnn down */
3008 phba->soft_wwn_enable = 0;
3009
3010 rc = lpfc_wwn_set(buf, cnt, wwpn);
3011 if (rc) {
3012 /* not able to set wwpn, unlock it */
3013 phba->soft_wwn_enable = 1;
3014 return rc;
3015 }
3016
3017 phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
3018 fc_host_port_name(shost) = phba->cfg_soft_wwpn;
3019 if (phba->cfg_soft_wwnn)
3020 fc_host_node_name(shost) = phba->cfg_soft_wwnn;
3021
3022 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
3023 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
3024
3025 stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
3026 if (stat1)
3027 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3028 "0463 lpfc_soft_wwpn attribute set failed to "
3029 "reinit adapter - %d\n", stat1);
3030 init_completion(&online_compl);
3031 rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
3032 LPFC_EVT_ONLINE);
3033 if (rc == 0)
3034 return -ENOMEM;
3035
3036 wait_for_completion(&online_compl);
3037 if (stat2)
3038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3039 "0464 lpfc_soft_wwpn attribute set failed to "
3040 "reinit adapter - %d\n", stat2);
3041 return (stat1 || stat2) ? -EIO : count;
3042 }
3043 static DEVICE_ATTR_RW(lpfc_soft_wwpn);
3044
3045 /**
3046 * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter
3047 * @dev: class device that is converted into a Scsi_host.
3048 * @attr: device attribute, not used.
3049 * @buf: on return contains the wwnn in hexadecimal.
3050 *
3051 * Returns: size of formatted string.
3052 **/
3053 static ssize_t
lpfc_soft_wwnn_show(struct device * dev,struct device_attribute * attr,char * buf)3054 lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
3055 char *buf)
3056 {
3057 struct Scsi_Host *shost = class_to_shost(dev);
3058 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3059 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
3060 (unsigned long long)phba->cfg_soft_wwnn);
3061 }
3062
3063 /**
3064 * lpfc_soft_wwnn_store - sets the ww node name of the adapter
3065 * @dev: class device that is converted into a Scsi_host.
3066 * @attr: device attribute, not used.
3067 * @buf: contains the ww node name in hexadecimal.
3068 * @count: number of wwnn bytes in buf.
3069 *
3070 * Returns:
3071 * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
3072 * value of count on success
3073 **/
3074 static ssize_t
lpfc_soft_wwnn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3075 lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
3076 const char *buf, size_t count)
3077 {
3078 struct Scsi_Host *shost = class_to_shost(dev);
3079 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3080 unsigned int cnt = count;
3081 u8 wwnn[WWN_SZ];
3082 int rc;
3083
3084 /* count may include a LF at end of string */
3085 if (buf[cnt-1] == '\n')
3086 cnt--;
3087
3088 if (!phba->soft_wwn_enable)
3089 return -EINVAL;
3090
3091 rc = lpfc_wwn_set(buf, cnt, wwnn);
3092 if (rc) {
3093 /* Allow wwnn to be set many times, as long as the enable
3094 * is set. However, once the wwpn is set, everything locks.
3095 */
3096 return rc;
3097 }
3098
3099 phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
3100
3101 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
3102 "lpfc%d: soft_wwnn set. Value will take effect upon "
3103 "setting of the soft_wwpn\n", phba->brd_no);
3104
3105 return count;
3106 }
3107 static DEVICE_ATTR_RW(lpfc_soft_wwnn);
3108
3109 /**
3110 * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
3111 * Optimized Access Storage (OAS) operations.
3112 * @dev: class device that is converted into a Scsi_host.
3113 * @attr: device attribute, not used.
3114 * @buf: buffer for passing information.
3115 *
3116 * Returns:
3117 * value of count
3118 **/
3119 static ssize_t
lpfc_oas_tgt_show(struct device * dev,struct device_attribute * attr,char * buf)3120 lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
3121 char *buf)
3122 {
3123 struct Scsi_Host *shost = class_to_shost(dev);
3124 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3125
3126 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
3127 wwn_to_u64(phba->cfg_oas_tgt_wwpn));
3128 }
3129
3130 /**
3131 * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for
3132 * Optimized Access Storage (OAS) operations.
3133 * @dev: class device that is converted into a Scsi_host.
3134 * @attr: device attribute, not used.
3135 * @buf: buffer for passing information.
3136 * @count: Size of the data buffer.
3137 *
3138 * Returns:
3139 * -EINVAL count is invalid, invalid wwpn byte invalid
3140 * -EPERM oas is not supported by hba
3141 * value of count on success
3142 **/
3143 static ssize_t
lpfc_oas_tgt_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3144 lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
3145 const char *buf, size_t count)
3146 {
3147 struct Scsi_Host *shost = class_to_shost(dev);
3148 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3149 unsigned int cnt = count;
3150 uint8_t wwpn[WWN_SZ];
3151 int rc;
3152
3153 if (!phba->cfg_fof)
3154 return -EPERM;
3155
3156 /* count may include a LF at end of string */
3157 if (buf[cnt-1] == '\n')
3158 cnt--;
3159
3160 rc = lpfc_wwn_set(buf, cnt, wwpn);
3161 if (rc)
3162 return rc;
3163
3164 memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3165 memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3166 if (wwn_to_u64(wwpn) == 0)
3167 phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
3168 else
3169 phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
3170 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
3171 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
3172 return count;
3173 }
3174 static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
3175 lpfc_oas_tgt_show, lpfc_oas_tgt_store);
3176
3177 /**
3178 * lpfc_oas_priority_show - Return wwpn of target whose luns maybe enabled for
3179 * Optimized Access Storage (OAS) operations.
3180 * @dev: class device that is converted into a Scsi_host.
3181 * @attr: device attribute, not used.
3182 * @buf: buffer for passing information.
3183 *
3184 * Returns:
3185 * value of count
3186 **/
3187 static ssize_t
lpfc_oas_priority_show(struct device * dev,struct device_attribute * attr,char * buf)3188 lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr,
3189 char *buf)
3190 {
3191 struct Scsi_Host *shost = class_to_shost(dev);
3192 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3193
3194 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
3195 }
3196
3197 /**
3198 * lpfc_oas_priority_store - Store wwpn of target whose luns maybe enabled for
3199 * Optimized Access Storage (OAS) operations.
3200 * @dev: class device that is converted into a Scsi_host.
3201 * @attr: device attribute, not used.
3202 * @buf: buffer for passing information.
3203 * @count: Size of the data buffer.
3204 *
3205 * Returns:
3206 * -EINVAL count is invalid, invalid wwpn byte invalid
3207 * -EPERM oas is not supported by hba
3208 * value of count on success
3209 **/
3210 static ssize_t
lpfc_oas_priority_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3211 lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr,
3212 const char *buf, size_t count)
3213 {
3214 struct Scsi_Host *shost = class_to_shost(dev);
3215 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3216 unsigned int cnt = count;
3217 unsigned long val;
3218 int ret;
3219
3220 if (!phba->cfg_fof)
3221 return -EPERM;
3222
3223 /* count may include a LF at end of string */
3224 if (buf[cnt-1] == '\n')
3225 cnt--;
3226
3227 ret = kstrtoul(buf, 0, &val);
3228 if (ret || (val > 0x7f))
3229 return -EINVAL;
3230
3231 if (val)
3232 phba->cfg_oas_priority = (uint8_t)val;
3233 else
3234 phba->cfg_oas_priority = phba->cfg_XLanePriority;
3235 return count;
3236 }
3237 static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR,
3238 lpfc_oas_priority_show, lpfc_oas_priority_store);
3239
3240 /**
3241 * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
3242 * for Optimized Access Storage (OAS) operations.
3243 * @dev: class device that is converted into a Scsi_host.
3244 * @attr: device attribute, not used.
3245 * @buf: buffer for passing information.
3246 *
3247 * Returns:
3248 * value of count on success
3249 **/
3250 static ssize_t
lpfc_oas_vpt_show(struct device * dev,struct device_attribute * attr,char * buf)3251 lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
3252 char *buf)
3253 {
3254 struct Scsi_Host *shost = class_to_shost(dev);
3255 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3256
3257 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
3258 wwn_to_u64(phba->cfg_oas_vpt_wwpn));
3259 }
3260
3261 /**
3262 * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled
3263 * for Optimized Access Storage (OAS) operations.
3264 * @dev: class device that is converted into a Scsi_host.
3265 * @attr: device attribute, not used.
3266 * @buf: buffer for passing information.
3267 * @count: Size of the data buffer.
3268 *
3269 * Returns:
3270 * -EINVAL count is invalid, invalid wwpn byte invalid
3271 * -EPERM oas is not supported by hba
3272 * value of count on success
3273 **/
3274 static ssize_t
lpfc_oas_vpt_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3275 lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
3276 const char *buf, size_t count)
3277 {
3278 struct Scsi_Host *shost = class_to_shost(dev);
3279 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3280 unsigned int cnt = count;
3281 uint8_t wwpn[WWN_SZ];
3282 int rc;
3283
3284 if (!phba->cfg_fof)
3285 return -EPERM;
3286
3287 /* count may include a LF at end of string */
3288 if (buf[cnt-1] == '\n')
3289 cnt--;
3290
3291 rc = lpfc_wwn_set(buf, cnt, wwpn);
3292 if (rc)
3293 return rc;
3294
3295 memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3296 memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3297 if (wwn_to_u64(wwpn) == 0)
3298 phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
3299 else
3300 phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
3301 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
3302 if (phba->cfg_oas_priority == 0)
3303 phba->cfg_oas_priority = phba->cfg_XLanePriority;
3304 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
3305 return count;
3306 }
3307 static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
3308 lpfc_oas_vpt_show, lpfc_oas_vpt_store);
3309
3310 /**
3311 * lpfc_oas_lun_state_show - Return the current state (enabled or disabled)
3312 * of whether luns will be enabled or disabled
3313 * for Optimized Access Storage (OAS) operations.
3314 * @dev: class device that is converted into a Scsi_host.
3315 * @attr: device attribute, not used.
3316 * @buf: buffer for passing information.
3317 *
3318 * Returns:
3319 * size of formatted string.
3320 **/
3321 static ssize_t
lpfc_oas_lun_state_show(struct device * dev,struct device_attribute * attr,char * buf)3322 lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
3323 char *buf)
3324 {
3325 struct Scsi_Host *shost = class_to_shost(dev);
3326 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3327
3328 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
3329 }
3330
3331 /**
3332 * lpfc_oas_lun_state_store - Store the state (enabled or disabled)
3333 * of whether luns will be enabled or disabled
3334 * for Optimized Access Storage (OAS) operations.
3335 * @dev: class device that is converted into a Scsi_host.
3336 * @attr: device attribute, not used.
3337 * @buf: buffer for passing information.
3338 * @count: Size of the data buffer.
3339 *
3340 * Returns:
3341 * -EINVAL count is invalid, invalid wwpn byte invalid
3342 * -EPERM oas is not supported by hba
3343 * value of count on success
3344 **/
3345 static ssize_t
lpfc_oas_lun_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3346 lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
3347 const char *buf, size_t count)
3348 {
3349 struct Scsi_Host *shost = class_to_shost(dev);
3350 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3351 int val = 0;
3352
3353 if (!phba->cfg_fof)
3354 return -EPERM;
3355
3356 if (!isdigit(buf[0]))
3357 return -EINVAL;
3358
3359 if (sscanf(buf, "%i", &val) != 1)
3360 return -EINVAL;
3361
3362 if ((val != 0) && (val != 1))
3363 return -EINVAL;
3364
3365 phba->cfg_oas_lun_state = val;
3366 return strlen(buf);
3367 }
3368 static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
3369 lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
3370
3371 /**
3372 * lpfc_oas_lun_status_show - Return the status of the Optimized Access
3373 * Storage (OAS) lun returned by the
3374 * lpfc_oas_lun_show function.
3375 * @dev: class device that is converted into a Scsi_host.
3376 * @attr: device attribute, not used.
3377 * @buf: buffer for passing information.
3378 *
3379 * Returns:
3380 * size of formatted string.
3381 **/
3382 static ssize_t
lpfc_oas_lun_status_show(struct device * dev,struct device_attribute * attr,char * buf)3383 lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
3384 char *buf)
3385 {
3386 struct Scsi_Host *shost = class_to_shost(dev);
3387 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3388
3389 if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
3390 return -EFAULT;
3391
3392 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
3393 }
3394 static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
3395 lpfc_oas_lun_status_show, NULL);
3396
3397
3398 /**
3399 * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
3400 * (OAS) operations.
3401 * @phba: lpfc_hba pointer.
3402 * @vpt_wwpn: wwpn of the vport associated with the returned lun
3403 * @tgt_wwpn: wwpn of the target associated with the returned lun
3404 * @lun: the fc lun for setting oas state.
3405 * @oas_state: the oas state to be set to the lun.
3406 * @pri: priority
3407 *
3408 * Returns:
3409 * SUCCESS : 0
3410 * -EPERM OAS is not enabled or not supported by this port.
3411 *
3412 */
3413 static size_t
lpfc_oas_lun_state_set(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint64_t lun,uint32_t oas_state,uint8_t pri)3414 lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3415 uint8_t tgt_wwpn[], uint64_t lun,
3416 uint32_t oas_state, uint8_t pri)
3417 {
3418
3419 int rc = 0;
3420
3421 if (!phba->cfg_fof)
3422 return -EPERM;
3423
3424 if (oas_state) {
3425 if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3426 (struct lpfc_name *)tgt_wwpn,
3427 lun, pri))
3428 rc = -ENOMEM;
3429 } else {
3430 lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3431 (struct lpfc_name *)tgt_wwpn, lun, pri);
3432 }
3433 return rc;
3434
3435 }
3436
3437 /**
3438 * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized
3439 * Access Storage (OAS) operations.
3440 * @phba: lpfc_hba pointer.
3441 * @vpt_wwpn: wwpn of the vport associated with the returned lun
3442 * @tgt_wwpn: wwpn of the target associated with the returned lun
3443 * @lun_status: status of the lun returned lun
3444 * @lun_pri: priority of the lun returned lun
3445 *
3446 * Returns the first or next lun enabled for OAS operations for the vport/target
3447 * specified. If a lun is found, its vport wwpn, target wwpn and status is
3448 * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned.
3449 *
3450 * Return:
3451 * lun that is OAS enabled for the vport/target
3452 * NOT_OAS_ENABLED_LUN when no oas enabled lun found.
3453 */
3454 static uint64_t
lpfc_oas_lun_get_next(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint32_t * lun_status,uint32_t * lun_pri)3455 lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3456 uint8_t tgt_wwpn[], uint32_t *lun_status,
3457 uint32_t *lun_pri)
3458 {
3459 uint64_t found_lun;
3460
3461 if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
3462 return NOT_OAS_ENABLED_LUN;
3463 if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
3464 phba->sli4_hba.oas_next_vpt_wwpn,
3465 (struct lpfc_name *)
3466 phba->sli4_hba.oas_next_tgt_wwpn,
3467 &phba->sli4_hba.oas_next_lun,
3468 (struct lpfc_name *)vpt_wwpn,
3469 (struct lpfc_name *)tgt_wwpn,
3470 &found_lun, lun_status, lun_pri))
3471 return found_lun;
3472 else
3473 return NOT_OAS_ENABLED_LUN;
3474 }
3475
3476 /**
3477 * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations
3478 * @phba: lpfc_hba pointer.
3479 * @vpt_wwpn: vport wwpn by reference.
3480 * @tgt_wwpn: target wwpn by reference.
3481 * @lun: the fc lun for setting oas state.
3482 * @oas_state: the oas state to be set to the oas_lun.
3483 * @pri: priority
3484 *
3485 * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
3486 * a lun for OAS operations.
3487 *
3488 * Return:
3489 * SUCCESS: 0
3490 * -ENOMEM: failed to enable an lun for OAS operations
3491 * -EPERM: OAS is not enabled
3492 */
3493 static ssize_t
lpfc_oas_lun_state_change(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint64_t lun,uint32_t oas_state,uint8_t pri)3494 lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3495 uint8_t tgt_wwpn[], uint64_t lun,
3496 uint32_t oas_state, uint8_t pri)
3497 {
3498
3499 int rc;
3500
3501 rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
3502 oas_state, pri);
3503 return rc;
3504 }
3505
3506 /**
3507 * lpfc_oas_lun_show - Return oas enabled luns from a chosen target
3508 * @dev: class device that is converted into a Scsi_host.
3509 * @attr: device attribute, not used.
3510 * @buf: buffer for passing information.
3511 *
3512 * This routine returns a lun enabled for OAS each time the function
3513 * is called.
3514 *
3515 * Returns:
3516 * SUCCESS: size of formatted string.
3517 * -EFAULT: target or vport wwpn was not set properly.
3518 * -EPERM: oas is not enabled.
3519 **/
3520 static ssize_t
lpfc_oas_lun_show(struct device * dev,struct device_attribute * attr,char * buf)3521 lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
3522 char *buf)
3523 {
3524 struct Scsi_Host *shost = class_to_shost(dev);
3525 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3526
3527 uint64_t oas_lun;
3528 int len = 0;
3529
3530 if (!phba->cfg_fof)
3531 return -EPERM;
3532
3533 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3534 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
3535 return -EFAULT;
3536
3537 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3538 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
3539 return -EFAULT;
3540
3541 oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
3542 phba->cfg_oas_tgt_wwpn,
3543 &phba->cfg_oas_lun_status,
3544 &phba->cfg_oas_priority);
3545 if (oas_lun != NOT_OAS_ENABLED_LUN)
3546 phba->cfg_oas_flags |= OAS_LUN_VALID;
3547
3548 len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
3549
3550 return len;
3551 }
3552
3553 /**
3554 * lpfc_oas_lun_store - Sets the OAS state for lun
3555 * @dev: class device that is converted into a Scsi_host.
3556 * @attr: device attribute, not used.
3557 * @buf: buffer for passing information.
3558 * @count: size of the formatting string
3559 *
3560 * This function sets the OAS state for lun. Before this function is called,
3561 * the vport wwpn, target wwpn, and oas state need to be set.
3562 *
3563 * Returns:
3564 * SUCCESS: size of formatted string.
3565 * -EFAULT: target or vport wwpn was not set properly.
3566 * -EPERM: oas is not enabled.
3567 * size of formatted string.
3568 **/
3569 static ssize_t
lpfc_oas_lun_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3570 lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
3571 const char *buf, size_t count)
3572 {
3573 struct Scsi_Host *shost = class_to_shost(dev);
3574 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3575 uint64_t scsi_lun;
3576 uint32_t pri;
3577 ssize_t rc;
3578
3579 if (!phba->cfg_fof)
3580 return -EPERM;
3581
3582 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3583 return -EFAULT;
3584
3585 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3586 return -EFAULT;
3587
3588 if (!isdigit(buf[0]))
3589 return -EINVAL;
3590
3591 if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
3592 return -EINVAL;
3593
3594 pri = phba->cfg_oas_priority;
3595 if (pri == 0)
3596 pri = phba->cfg_XLanePriority;
3597
3598 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3599 "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx "
3600 "priority 0x%x with oas state %d\n",
3601 wwn_to_u64(phba->cfg_oas_vpt_wwpn),
3602 wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
3603 pri, phba->cfg_oas_lun_state);
3604
3605 rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
3606 phba->cfg_oas_tgt_wwpn, scsi_lun,
3607 phba->cfg_oas_lun_state, pri);
3608 if (rc)
3609 return rc;
3610
3611 return count;
3612 }
3613 static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
3614 lpfc_oas_lun_show, lpfc_oas_lun_store);
3615
3616 int lpfc_enable_nvmet_cnt;
3617 unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = {
3618 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3619 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3620 module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444);
3621 MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target");
3622
3623 static int lpfc_poll = 0;
3624 module_param(lpfc_poll, int, S_IRUGO);
3625 MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
3626 " 0 - none,"
3627 " 1 - poll with interrupts enabled"
3628 " 3 - poll and disable FCP ring interrupts");
3629
3630 static DEVICE_ATTR_RW(lpfc_poll);
3631
3632 int lpfc_no_hba_reset_cnt;
3633 unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
3634 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3635 module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
3636 MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
3637
3638 LPFC_ATTR(sli_mode, 3, 3, 3,
3639 "SLI mode selector: 3 - select SLI-3");
3640
3641 LPFC_ATTR_R(enable_npiv, 1, 0, 1,
3642 "Enable NPIV functionality");
3643
3644 LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
3645 "FCF Fast failover=1 Priority failover=2");
3646
3647 /*
3648 * lpfc_fcp_wait_abts_rsp: Modifies criteria for reporting completion of
3649 * aborted IO.
3650 * The range is [0,1]. Default value is 0
3651 * 0, IO completes after ABTS issued (default).
3652 * 1, IO completes after receipt of ABTS response or timeout.
3653 */
3654 LPFC_ATTR_R(fcp_wait_abts_rsp, 0, 0, 1, "Wait for FCP ABTS completion");
3655
3656 /*
3657 # lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
3658 # 0x0 = disabled, XRI/OXID use not tracked.
3659 # 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
3660 # 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
3661 */
3662 LPFC_ATTR_R(enable_rrq, 2, 0, 2,
3663 "Enable RRQ functionality");
3664
3665 /*
3666 # lpfc_suppress_link_up: Bring link up at initialization
3667 # 0x0 = bring link up (issue MBX_INIT_LINK)
3668 # 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK)
3669 # 0x2 = never bring up link
3670 # Default value is 0.
3671 */
3672 LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
3673 LPFC_DELAY_INIT_LINK_INDEFINITELY,
3674 "Suppress Link Up at initialization");
3675
3676 static ssize_t
lpfc_pls_show(struct device * dev,struct device_attribute * attr,char * buf)3677 lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf)
3678 {
3679 struct Scsi_Host *shost = class_to_shost(dev);
3680 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3681
3682 return scnprintf(buf, PAGE_SIZE, "%d\n",
3683 phba->sli4_hba.pc_sli4_params.pls);
3684 }
3685 static DEVICE_ATTR(pls, 0444,
3686 lpfc_pls_show, NULL);
3687
3688 static ssize_t
lpfc_pt_show(struct device * dev,struct device_attribute * attr,char * buf)3689 lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf)
3690 {
3691 struct Scsi_Host *shost = class_to_shost(dev);
3692 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3693
3694 return scnprintf(buf, PAGE_SIZE, "%d\n",
3695 (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0);
3696 }
3697 static DEVICE_ATTR(pt, 0444,
3698 lpfc_pt_show, NULL);
3699
3700 /*
3701 # lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
3702 # 1 - (1024)
3703 # 2 - (2048)
3704 # 3 - (3072)
3705 # 4 - (4096)
3706 # 5 - (5120)
3707 */
3708 static ssize_t
lpfc_iocb_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3709 lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3710 {
3711 struct Scsi_Host *shost = class_to_shost(dev);
3712 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3713
3714 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
3715 }
3716
3717 static DEVICE_ATTR(iocb_hw, S_IRUGO,
3718 lpfc_iocb_hw_show, NULL);
3719 static ssize_t
lpfc_txq_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3720 lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3721 {
3722 struct Scsi_Host *shost = class_to_shost(dev);
3723 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3724 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3725
3726 return scnprintf(buf, PAGE_SIZE, "%d\n",
3727 pring ? pring->txq_max : 0);
3728 }
3729
3730 static DEVICE_ATTR(txq_hw, S_IRUGO,
3731 lpfc_txq_hw_show, NULL);
3732 static ssize_t
lpfc_txcmplq_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3733 lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
3734 char *buf)
3735 {
3736 struct Scsi_Host *shost = class_to_shost(dev);
3737 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3738 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3739
3740 return scnprintf(buf, PAGE_SIZE, "%d\n",
3741 pring ? pring->txcmplq_max : 0);
3742 }
3743
3744 static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
3745 lpfc_txcmplq_hw_show, NULL);
3746
3747 /*
3748 # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
3749 # until the timer expires. Value range is [0,255]. Default value is 30.
3750 */
3751 static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3752 static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO;
3753 module_param(lpfc_nodev_tmo, int, 0);
3754 MODULE_PARM_DESC(lpfc_nodev_tmo,
3755 "Seconds driver will hold I/O waiting "
3756 "for a device to come back");
3757
3758 /**
3759 * lpfc_nodev_tmo_show - Return the hba dev loss timeout value
3760 * @dev: class converted to a Scsi_host structure.
3761 * @attr: device attribute, not used.
3762 * @buf: on return contains the dev loss timeout in decimal.
3763 *
3764 * Returns: size of formatted string.
3765 **/
3766 static ssize_t
lpfc_nodev_tmo_show(struct device * dev,struct device_attribute * attr,char * buf)3767 lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
3768 char *buf)
3769 {
3770 struct Scsi_Host *shost = class_to_shost(dev);
3771 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3772
3773 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
3774 }
3775
3776 /**
3777 * lpfc_nodev_tmo_init - Set the hba nodev timeout value
3778 * @vport: lpfc vport structure pointer.
3779 * @val: contains the nodev timeout value.
3780 *
3781 * Description:
3782 * If the devloss tmo is already set then nodev tmo is set to devloss tmo,
3783 * a kernel error message is printed and zero is returned.
3784 * Else if val is in range then nodev tmo and devloss tmo are set to val.
3785 * Otherwise nodev tmo is set to the default value.
3786 *
3787 * Returns:
3788 * zero if already set or if val is in range
3789 * -EINVAL val out of range
3790 **/
3791 static int
lpfc_nodev_tmo_init(struct lpfc_vport * vport,int val)3792 lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
3793 {
3794 if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
3795 vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
3796 if (val != LPFC_DEF_DEVLOSS_TMO)
3797 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3798 "0407 Ignoring lpfc_nodev_tmo module "
3799 "parameter because lpfc_devloss_tmo "
3800 "is set.\n");
3801 return 0;
3802 }
3803
3804 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3805 vport->cfg_nodev_tmo = val;
3806 vport->cfg_devloss_tmo = val;
3807 return 0;
3808 }
3809 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3810 "0400 lpfc_nodev_tmo attribute cannot be set to"
3811 " %d, allowed range is [%d, %d]\n",
3812 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3813 vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3814 return -EINVAL;
3815 }
3816
3817 /**
3818 * lpfc_update_rport_devloss_tmo - Update dev loss tmo value
3819 * @vport: lpfc vport structure pointer.
3820 *
3821 * Description:
3822 * Update all the ndlp's dev loss tmo with the vport devloss tmo value.
3823 **/
3824 static void
lpfc_update_rport_devloss_tmo(struct lpfc_vport * vport)3825 lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
3826 {
3827 struct Scsi_Host *shost;
3828 struct lpfc_nodelist *ndlp;
3829 #if (IS_ENABLED(CONFIG_NVME_FC))
3830 struct lpfc_nvme_rport *rport;
3831 struct nvme_fc_remote_port *remoteport = NULL;
3832 #endif
3833
3834 shost = lpfc_shost_from_vport(vport);
3835 spin_lock_irq(shost->host_lock);
3836 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3837 if (ndlp->rport)
3838 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3839 #if (IS_ENABLED(CONFIG_NVME_FC))
3840 spin_lock(&ndlp->lock);
3841 rport = lpfc_ndlp_get_nrport(ndlp);
3842 if (rport)
3843 remoteport = rport->remoteport;
3844 spin_unlock(&ndlp->lock);
3845 if (rport && remoteport)
3846 nvme_fc_set_remoteport_devloss(remoteport,
3847 vport->cfg_devloss_tmo);
3848 #endif
3849 }
3850 spin_unlock_irq(shost->host_lock);
3851 }
3852
3853 /**
3854 * lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values
3855 * @vport: lpfc vport structure pointer.
3856 * @val: contains the tmo value.
3857 *
3858 * Description:
3859 * If the devloss tmo is already set or the vport dev loss tmo has changed
3860 * then a kernel error message is printed and zero is returned.
3861 * Else if val is in range then nodev tmo and devloss tmo are set to val.
3862 * Otherwise nodev tmo is set to the default value.
3863 *
3864 * Returns:
3865 * zero if already set or if val is in range
3866 * -EINVAL val out of range
3867 **/
3868 static int
lpfc_nodev_tmo_set(struct lpfc_vport * vport,int val)3869 lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
3870 {
3871 if (vport->dev_loss_tmo_changed ||
3872 (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
3873 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3874 "0401 Ignoring change to lpfc_nodev_tmo "
3875 "because lpfc_devloss_tmo is set.\n");
3876 return 0;
3877 }
3878 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3879 vport->cfg_nodev_tmo = val;
3880 vport->cfg_devloss_tmo = val;
3881 /*
3882 * For compat: set the fc_host dev loss so new rports
3883 * will get the value.
3884 */
3885 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3886 lpfc_update_rport_devloss_tmo(vport);
3887 return 0;
3888 }
3889 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3890 "0403 lpfc_nodev_tmo attribute cannot be set to "
3891 "%d, allowed range is [%d, %d]\n",
3892 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3893 return -EINVAL;
3894 }
3895
3896 lpfc_vport_param_store(nodev_tmo)
3897
3898 static DEVICE_ATTR_RW(lpfc_nodev_tmo);
3899
3900 /*
3901 # lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
3902 # disappear until the timer expires. Value range is [0,255]. Default
3903 # value is 30.
3904 */
3905 module_param(lpfc_devloss_tmo, int, S_IRUGO);
3906 MODULE_PARM_DESC(lpfc_devloss_tmo,
3907 "Seconds driver will hold I/O waiting "
3908 "for a device to come back");
lpfc_vport_param_init(devloss_tmo,LPFC_DEF_DEVLOSS_TMO,LPFC_MIN_DEVLOSS_TMO,LPFC_MAX_DEVLOSS_TMO)3909 lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
3910 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
3911 lpfc_vport_param_show(devloss_tmo)
3912
3913 /**
3914 * lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit
3915 * @vport: lpfc vport structure pointer.
3916 * @val: contains the tmo value.
3917 *
3918 * Description:
3919 * If val is in a valid range then set the vport nodev tmo,
3920 * devloss tmo, also set the vport dev loss tmo changed flag.
3921 * Else a kernel error message is printed.
3922 *
3923 * Returns:
3924 * zero if val is in range
3925 * -EINVAL val out of range
3926 **/
3927 static int
3928 lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
3929 {
3930 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3931 vport->cfg_nodev_tmo = val;
3932 vport->cfg_devloss_tmo = val;
3933 vport->dev_loss_tmo_changed = 1;
3934 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3935 lpfc_update_rport_devloss_tmo(vport);
3936 return 0;
3937 }
3938
3939 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3940 "0404 lpfc_devloss_tmo attribute cannot be set to "
3941 "%d, allowed range is [%d, %d]\n",
3942 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3943 return -EINVAL;
3944 }
3945
3946 lpfc_vport_param_store(devloss_tmo)
3947 static DEVICE_ATTR_RW(lpfc_devloss_tmo);
3948
3949 /*
3950 * lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it
3951 * lpfc_suppress_rsp = 0 Disable
3952 * lpfc_suppress_rsp = 1 Enable (default)
3953 *
3954 */
3955 LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
3956 "Enable suppress rsp feature is firmware supports it");
3957
3958 /*
3959 * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
3960 * lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs
3961 * lpfc_nvmet_mrq = 1 use a single RQ pair
3962 * lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ
3963 *
3964 */
3965 LPFC_ATTR_R(nvmet_mrq,
3966 LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
3967 "Specify number of RQ pairs for processing NVMET cmds");
3968
3969 /*
3970 * lpfc_nvmet_mrq_post: Specify number of RQ buffer to initially post
3971 * to each NVMET RQ. Range 64 to 2048, default is 512.
3972 */
3973 LPFC_ATTR_R(nvmet_mrq_post,
3974 LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST,
3975 LPFC_NVMET_RQE_DEF_COUNT,
3976 "Specify number of RQ buffers to initially post");
3977
3978 /*
3979 * lpfc_enable_fc4_type: Defines what FC4 types are supported.
3980 * Supported Values: 1 - register just FCP
3981 * 3 - register both FCP and NVME
3982 * Supported values are [1,3]. Default value is 3
3983 */
3984 LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE,
3985 LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE,
3986 "Enable FC4 Protocol support - FCP / NVME");
3987
3988 /*
3989 # lpfc_log_verbose: Only turn this flag on if you are willing to risk being
3990 # deluged with LOTS of information.
3991 # You can set a bit mask to record specific types of verbose messages:
3992 # See lpfc_logmsh.h for definitions.
3993 */
3994 LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
3995 "Verbose logging bit-mask");
3996
3997 /*
3998 # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
3999 # objects that have been registered with the nameserver after login.
4000 */
4001 LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
4002 "Deregister nameserver objects before LOGO");
4003
4004 /*
4005 # lun_queue_depth: This parameter is used to limit the number of outstanding
4006 # commands per FCP LUN.
4007 */
4008 LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512,
4009 "Max number of FCP commands we can queue to a specific LUN");
4010
4011 /*
4012 # tgt_queue_depth: This parameter is used to limit the number of outstanding
4013 # commands per target port. Value range is [10,65535]. Default value is 65535.
4014 */
4015 static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH;
4016 module_param(lpfc_tgt_queue_depth, uint, 0444);
4017 MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth");
4018 lpfc_vport_param_show(tgt_queue_depth);
4019 lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
4020 LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH);
4021
4022 /**
4023 * lpfc_tgt_queue_depth_set: Sets an attribute value.
4024 * @vport: lpfc vport structure pointer.
4025 * @val: integer attribute value.
4026 *
4027 * Description: Sets the parameter to the new value.
4028 *
4029 * Returns:
4030 * zero on success
4031 * -EINVAL if val is invalid
4032 */
4033 static int
lpfc_tgt_queue_depth_set(struct lpfc_vport * vport,uint val)4034 lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
4035 {
4036 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4037 struct lpfc_nodelist *ndlp;
4038
4039 if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
4040 return -EINVAL;
4041
4042 if (val == vport->cfg_tgt_queue_depth)
4043 return 0;
4044
4045 spin_lock_irq(shost->host_lock);
4046 vport->cfg_tgt_queue_depth = val;
4047
4048 /* Next loop thru nodelist and change cmd_qdepth */
4049 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
4050 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
4051
4052 spin_unlock_irq(shost->host_lock);
4053 return 0;
4054 }
4055
4056 lpfc_vport_param_store(tgt_queue_depth);
4057 static DEVICE_ATTR_RW(lpfc_tgt_queue_depth);
4058
4059 /*
4060 # hba_queue_depth: This parameter is used to limit the number of outstanding
4061 # commands per lpfc HBA. Value range is [32,8192]. If this parameter
4062 # value is greater than the maximum number of exchanges supported by the HBA,
4063 # then maximum number of exchanges supported by the HBA is used to determine
4064 # the hba_queue_depth.
4065 */
4066 LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
4067 "Max number of FCP commands we can queue to a lpfc HBA");
4068
4069 /*
4070 # peer_port_login: This parameter allows/prevents logins
4071 # between peer ports hosted on the same physical port.
4072 # When this parameter is set 0 peer ports of same physical port
4073 # are not allowed to login to each other.
4074 # When this parameter is set 1 peer ports of same physical port
4075 # are allowed to login to each other.
4076 # Default value of this parameter is 0.
4077 */
4078 LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
4079 "Allow peer ports on the same physical port to login to each "
4080 "other.");
4081
4082 /*
4083 # restrict_login: This parameter allows/prevents logins
4084 # between Virtual Ports and remote initiators.
4085 # When this parameter is not set (0) Virtual Ports will accept PLOGIs from
4086 # other initiators and will attempt to PLOGI all remote ports.
4087 # When this parameter is set (1) Virtual Ports will reject PLOGIs from
4088 # remote ports and will not attempt to PLOGI to other initiators.
4089 # This parameter does not restrict to the physical port.
4090 # This parameter does not restrict logins to Fabric resident remote ports.
4091 # Default value of this parameter is 1.
4092 */
4093 static int lpfc_restrict_login = 1;
4094 module_param(lpfc_restrict_login, int, S_IRUGO);
4095 MODULE_PARM_DESC(lpfc_restrict_login,
4096 "Restrict virtual ports login to remote initiators.");
4097 lpfc_vport_param_show(restrict_login);
4098
4099 /**
4100 * lpfc_restrict_login_init - Set the vport restrict login flag
4101 * @vport: lpfc vport structure pointer.
4102 * @val: contains the restrict login value.
4103 *
4104 * Description:
4105 * If val is not in a valid range then log a kernel error message and set
4106 * the vport restrict login to one.
4107 * If the port type is physical clear the restrict login flag and return.
4108 * Else set the restrict login flag to val.
4109 *
4110 * Returns:
4111 * zero if val is in range
4112 * -EINVAL val out of range
4113 **/
4114 static int
lpfc_restrict_login_init(struct lpfc_vport * vport,int val)4115 lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
4116 {
4117 if (val < 0 || val > 1) {
4118 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4119 "0422 lpfc_restrict_login attribute cannot "
4120 "be set to %d, allowed range is [0, 1]\n",
4121 val);
4122 vport->cfg_restrict_login = 1;
4123 return -EINVAL;
4124 }
4125 if (vport->port_type == LPFC_PHYSICAL_PORT) {
4126 vport->cfg_restrict_login = 0;
4127 return 0;
4128 }
4129 vport->cfg_restrict_login = val;
4130 return 0;
4131 }
4132
4133 /**
4134 * lpfc_restrict_login_set - Set the vport restrict login flag
4135 * @vport: lpfc vport structure pointer.
4136 * @val: contains the restrict login value.
4137 *
4138 * Description:
4139 * If val is not in a valid range then log a kernel error message and set
4140 * the vport restrict login to one.
4141 * If the port type is physical and the val is not zero log a kernel
4142 * error message, clear the restrict login flag and return zero.
4143 * Else set the restrict login flag to val.
4144 *
4145 * Returns:
4146 * zero if val is in range
4147 * -EINVAL val out of range
4148 **/
4149 static int
lpfc_restrict_login_set(struct lpfc_vport * vport,int val)4150 lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
4151 {
4152 if (val < 0 || val > 1) {
4153 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4154 "0425 lpfc_restrict_login attribute cannot "
4155 "be set to %d, allowed range is [0, 1]\n",
4156 val);
4157 vport->cfg_restrict_login = 1;
4158 return -EINVAL;
4159 }
4160 if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) {
4161 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4162 "0468 lpfc_restrict_login must be 0 for "
4163 "Physical ports.\n");
4164 vport->cfg_restrict_login = 0;
4165 return 0;
4166 }
4167 vport->cfg_restrict_login = val;
4168 return 0;
4169 }
4170 lpfc_vport_param_store(restrict_login);
4171 static DEVICE_ATTR_RW(lpfc_restrict_login);
4172
4173 /*
4174 # Some disk devices have a "select ID" or "select Target" capability.
4175 # From a protocol standpoint "select ID" usually means select the
4176 # Fibre channel "ALPA". In the FC-AL Profile there is an "informative
4177 # annex" which contains a table that maps a "select ID" (a number
4178 # between 0 and 7F) to an ALPA. By default, for compatibility with
4179 # older drivers, the lpfc driver scans this table from low ALPA to high
4180 # ALPA.
4181 #
4182 # Turning on the scan-down variable (on = 1, off = 0) will
4183 # cause the lpfc driver to use an inverted table, effectively
4184 # scanning ALPAs from high to low. Value range is [0,1]. Default value is 1.
4185 #
4186 # (Note: This "select ID" functionality is a LOOP ONLY characteristic
4187 # and will not work across a fabric. Also this parameter will take
4188 # effect only in the case when ALPA map is not available.)
4189 */
4190 LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
4191 "Start scanning for devices from highest ALPA to lowest");
4192
4193 /*
4194 # lpfc_topology: link topology for init link
4195 # 0x0 = attempt loop mode then point-to-point
4196 # 0x01 = internal loopback mode
4197 # 0x02 = attempt point-to-point mode only
4198 # 0x04 = attempt loop mode only
4199 # 0x06 = attempt point-to-point mode then loop
4200 # Set point-to-point mode if you want to run as an N_Port.
4201 # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
4202 # Default value is 0.
4203 */
4204 LPFC_ATTR(topology, 0, 0, 6,
4205 "Select Fibre Channel topology");
4206
4207 /**
4208 * lpfc_topology_store - Set the adapters topology field
4209 * @dev: class device that is converted into a scsi_host.
4210 * @attr:device attribute, not used.
4211 * @buf: buffer for passing information.
4212 * @count: size of the data buffer.
4213 *
4214 * Description:
4215 * If val is in a valid range then set the adapter's topology field and
4216 * issue a lip; if the lip fails reset the topology to the old value.
4217 *
4218 * If the value is not in range log a kernel error message and return an error.
4219 *
4220 * Returns:
4221 * zero if val is in range and lip okay
4222 * non-zero return value from lpfc_issue_lip()
4223 * -EINVAL val out of range
4224 **/
4225 static ssize_t
lpfc_topology_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4226 lpfc_topology_store(struct device *dev, struct device_attribute *attr,
4227 const char *buf, size_t count)
4228 {
4229 struct Scsi_Host *shost = class_to_shost(dev);
4230 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4231 struct lpfc_hba *phba = vport->phba;
4232 int val = 0;
4233 int nolip = 0;
4234 const char *val_buf = buf;
4235 int err;
4236 uint32_t prev_val;
4237 u8 sli_family, if_type;
4238
4239 if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4240 nolip = 1;
4241 val_buf = &buf[strlen("nolip ")];
4242 }
4243
4244 if (!isdigit(val_buf[0]))
4245 return -EINVAL;
4246 if (sscanf(val_buf, "%i", &val) != 1)
4247 return -EINVAL;
4248
4249 if (val >= 0 && val <= 6) {
4250 prev_val = phba->cfg_topology;
4251 if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
4252 val == 4) {
4253 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4254 "3113 Loop mode not supported at speed %d\n",
4255 val);
4256 return -EINVAL;
4257 }
4258 /*
4259 * The 'topology' is not a configurable parameter if :
4260 * - persistent topology enabled
4261 * - ASIC_GEN_NUM >= 0xC, with no private loop support
4262 */
4263 sli_family = bf_get(lpfc_sli_intf_sli_family,
4264 &phba->sli4_hba.sli_intf);
4265 if_type = bf_get(lpfc_sli_intf_if_type,
4266 &phba->sli4_hba.sli_intf);
4267 if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
4268 (!phba->sli4_hba.pc_sli4_params.pls &&
4269 (sli_family == LPFC_SLI_INTF_FAMILY_G6 ||
4270 if_type == LPFC_SLI_INTF_IF_TYPE_6))) &&
4271 val == 4) {
4272 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4273 "3114 Loop mode not supported\n");
4274 return -EINVAL;
4275 }
4276 phba->cfg_topology = val;
4277 if (nolip)
4278 return strlen(buf);
4279
4280 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4281 "3054 lpfc_topology changed from %d to %d\n",
4282 prev_val, val);
4283 if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
4284 phba->fc_topology_changed = 1;
4285 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4286 if (err) {
4287 phba->cfg_topology = prev_val;
4288 return -EINVAL;
4289 } else
4290 return strlen(buf);
4291 }
4292 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4293 "%d:0467 lpfc_topology attribute cannot be set to %d, "
4294 "allowed range is [0, 6]\n",
4295 phba->brd_no, val);
4296 return -EINVAL;
4297 }
4298
4299 lpfc_param_show(topology)
4300 static DEVICE_ATTR_RW(lpfc_topology);
4301
4302 /**
4303 * lpfc_static_vport_show: Read callback function for
4304 * lpfc_static_vport sysfs file.
4305 * @dev: Pointer to class device object.
4306 * @attr: device attribute structure.
4307 * @buf: Data buffer.
4308 *
4309 * This function is the read call back function for
4310 * lpfc_static_vport sysfs file. The lpfc_static_vport
4311 * sysfs file report the mageability of the vport.
4312 **/
4313 static ssize_t
lpfc_static_vport_show(struct device * dev,struct device_attribute * attr,char * buf)4314 lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
4315 char *buf)
4316 {
4317 struct Scsi_Host *shost = class_to_shost(dev);
4318 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4319 if (vport->vport_flag & STATIC_VPORT)
4320 sprintf(buf, "1\n");
4321 else
4322 sprintf(buf, "0\n");
4323
4324 return strlen(buf);
4325 }
4326
4327 /*
4328 * Sysfs attribute to control the statistical data collection.
4329 */
4330 static DEVICE_ATTR_RO(lpfc_static_vport);
4331
4332 /**
4333 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
4334 * @dev: Pointer to class device.
4335 * @attr: Unused.
4336 * @buf: Data buffer.
4337 * @count: Size of the data buffer.
4338 *
4339 * This function get called when a user write to the lpfc_stat_data_ctrl
4340 * sysfs file. This function parse the command written to the sysfs file
4341 * and take appropriate action. These commands are used for controlling
4342 * driver statistical data collection.
4343 * Following are the command this function handles.
4344 *
4345 * setbucket <bucket_type> <base> <step>
4346 * = Set the latency buckets.
4347 * destroybucket = destroy all the buckets.
4348 * start = start data collection
4349 * stop = stop data collection
4350 * reset = reset the collected data
4351 **/
4352 static ssize_t
lpfc_stat_data_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4353 lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
4354 const char *buf, size_t count)
4355 {
4356 struct Scsi_Host *shost = class_to_shost(dev);
4357 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4358 struct lpfc_hba *phba = vport->phba;
4359 #define LPFC_MAX_DATA_CTRL_LEN 1024
4360 static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
4361 unsigned long i;
4362 char *str_ptr, *token;
4363 struct lpfc_vport **vports;
4364 struct Scsi_Host *v_shost;
4365 char *bucket_type_str, *base_str, *step_str;
4366 unsigned long base, step, bucket_type;
4367
4368 if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
4369 if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
4370 return -EINVAL;
4371
4372 strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
4373 str_ptr = &bucket_data[0];
4374 /* Ignore this token - this is command token */
4375 token = strsep(&str_ptr, "\t ");
4376 if (!token)
4377 return -EINVAL;
4378
4379 bucket_type_str = strsep(&str_ptr, "\t ");
4380 if (!bucket_type_str)
4381 return -EINVAL;
4382
4383 if (!strncmp(bucket_type_str, "linear", strlen("linear")))
4384 bucket_type = LPFC_LINEAR_BUCKET;
4385 else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
4386 bucket_type = LPFC_POWER2_BUCKET;
4387 else
4388 return -EINVAL;
4389
4390 base_str = strsep(&str_ptr, "\t ");
4391 if (!base_str)
4392 return -EINVAL;
4393 base = simple_strtoul(base_str, NULL, 0);
4394
4395 step_str = strsep(&str_ptr, "\t ");
4396 if (!step_str)
4397 return -EINVAL;
4398 step = simple_strtoul(step_str, NULL, 0);
4399 if (!step)
4400 return -EINVAL;
4401
4402 /* Block the data collection for every vport */
4403 vports = lpfc_create_vport_work_array(phba);
4404 if (vports == NULL)
4405 return -ENOMEM;
4406
4407 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4408 v_shost = lpfc_shost_from_vport(vports[i]);
4409 spin_lock_irq(v_shost->host_lock);
4410 /* Block and reset data collection */
4411 vports[i]->stat_data_blocked = 1;
4412 if (vports[i]->stat_data_enabled)
4413 lpfc_vport_reset_stat_data(vports[i]);
4414 spin_unlock_irq(v_shost->host_lock);
4415 }
4416
4417 /* Set the bucket attributes */
4418 phba->bucket_type = bucket_type;
4419 phba->bucket_base = base;
4420 phba->bucket_step = step;
4421
4422 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4423 v_shost = lpfc_shost_from_vport(vports[i]);
4424
4425 /* Unblock data collection */
4426 spin_lock_irq(v_shost->host_lock);
4427 vports[i]->stat_data_blocked = 0;
4428 spin_unlock_irq(v_shost->host_lock);
4429 }
4430 lpfc_destroy_vport_work_array(phba, vports);
4431 return strlen(buf);
4432 }
4433
4434 if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
4435 vports = lpfc_create_vport_work_array(phba);
4436 if (vports == NULL)
4437 return -ENOMEM;
4438
4439 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4440 v_shost = lpfc_shost_from_vport(vports[i]);
4441 spin_lock_irq(shost->host_lock);
4442 vports[i]->stat_data_blocked = 1;
4443 lpfc_free_bucket(vport);
4444 vport->stat_data_enabled = 0;
4445 vports[i]->stat_data_blocked = 0;
4446 spin_unlock_irq(shost->host_lock);
4447 }
4448 lpfc_destroy_vport_work_array(phba, vports);
4449 phba->bucket_type = LPFC_NO_BUCKET;
4450 phba->bucket_base = 0;
4451 phba->bucket_step = 0;
4452 return strlen(buf);
4453 }
4454
4455 if (!strncmp(buf, "start", strlen("start"))) {
4456 /* If no buckets configured return error */
4457 if (phba->bucket_type == LPFC_NO_BUCKET)
4458 return -EINVAL;
4459 spin_lock_irq(shost->host_lock);
4460 if (vport->stat_data_enabled) {
4461 spin_unlock_irq(shost->host_lock);
4462 return strlen(buf);
4463 }
4464 lpfc_alloc_bucket(vport);
4465 vport->stat_data_enabled = 1;
4466 spin_unlock_irq(shost->host_lock);
4467 return strlen(buf);
4468 }
4469
4470 if (!strncmp(buf, "stop", strlen("stop"))) {
4471 spin_lock_irq(shost->host_lock);
4472 if (vport->stat_data_enabled == 0) {
4473 spin_unlock_irq(shost->host_lock);
4474 return strlen(buf);
4475 }
4476 lpfc_free_bucket(vport);
4477 vport->stat_data_enabled = 0;
4478 spin_unlock_irq(shost->host_lock);
4479 return strlen(buf);
4480 }
4481
4482 if (!strncmp(buf, "reset", strlen("reset"))) {
4483 if ((phba->bucket_type == LPFC_NO_BUCKET)
4484 || !vport->stat_data_enabled)
4485 return strlen(buf);
4486 spin_lock_irq(shost->host_lock);
4487 vport->stat_data_blocked = 1;
4488 lpfc_vport_reset_stat_data(vport);
4489 vport->stat_data_blocked = 0;
4490 spin_unlock_irq(shost->host_lock);
4491 return strlen(buf);
4492 }
4493 return -EINVAL;
4494 }
4495
4496
4497 /**
4498 * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
4499 * @dev: Pointer to class device.
4500 * @attr: Unused.
4501 * @buf: Data buffer.
4502 *
4503 * This function is the read call back function for
4504 * lpfc_stat_data_ctrl sysfs file. This function report the
4505 * current statistical data collection state.
4506 **/
4507 static ssize_t
lpfc_stat_data_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)4508 lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
4509 char *buf)
4510 {
4511 struct Scsi_Host *shost = class_to_shost(dev);
4512 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4513 struct lpfc_hba *phba = vport->phba;
4514 int index = 0;
4515 int i;
4516 char *bucket_type;
4517 unsigned long bucket_value;
4518
4519 switch (phba->bucket_type) {
4520 case LPFC_LINEAR_BUCKET:
4521 bucket_type = "linear";
4522 break;
4523 case LPFC_POWER2_BUCKET:
4524 bucket_type = "power2";
4525 break;
4526 default:
4527 bucket_type = "No Bucket";
4528 break;
4529 }
4530
4531 sprintf(&buf[index], "Statistical Data enabled :%d, "
4532 "blocked :%d, Bucket type :%s, Bucket base :%d,"
4533 " Bucket step :%d\nLatency Ranges :",
4534 vport->stat_data_enabled, vport->stat_data_blocked,
4535 bucket_type, phba->bucket_base, phba->bucket_step);
4536 index = strlen(buf);
4537 if (phba->bucket_type != LPFC_NO_BUCKET) {
4538 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4539 if (phba->bucket_type == LPFC_LINEAR_BUCKET)
4540 bucket_value = phba->bucket_base +
4541 phba->bucket_step * i;
4542 else
4543 bucket_value = phba->bucket_base +
4544 (1 << i) * phba->bucket_step;
4545
4546 if (index + 10 > PAGE_SIZE)
4547 break;
4548 sprintf(&buf[index], "%08ld ", bucket_value);
4549 index = strlen(buf);
4550 }
4551 }
4552 sprintf(&buf[index], "\n");
4553 return strlen(buf);
4554 }
4555
4556 /*
4557 * Sysfs attribute to control the statistical data collection.
4558 */
4559 static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
4560
4561 /*
4562 * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
4563 */
4564
4565 /*
4566 * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
4567 * for each target.
4568 */
4569 #define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
4570 #define MAX_STAT_DATA_SIZE_PER_TARGET \
4571 STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
4572
4573
4574 /**
4575 * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
4576 * @filp: sysfs file
4577 * @kobj: Pointer to the kernel object
4578 * @bin_attr: Attribute object
4579 * @buf: Buffer pointer
4580 * @off: File offset
4581 * @count: Buffer size
4582 *
4583 * This function is the read call back function for lpfc_drvr_stat_data
4584 * sysfs file. This function export the statistical data to user
4585 * applications.
4586 **/
4587 static ssize_t
sysfs_drvr_stat_data_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)4588 sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
4589 struct bin_attribute *bin_attr,
4590 char *buf, loff_t off, size_t count)
4591 {
4592 struct device *dev = container_of(kobj, struct device,
4593 kobj);
4594 struct Scsi_Host *shost = class_to_shost(dev);
4595 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4596 struct lpfc_hba *phba = vport->phba;
4597 int i = 0, index = 0;
4598 unsigned long nport_index;
4599 struct lpfc_nodelist *ndlp = NULL;
4600 nport_index = (unsigned long)off /
4601 MAX_STAT_DATA_SIZE_PER_TARGET;
4602
4603 if (!vport->stat_data_enabled || vport->stat_data_blocked
4604 || (phba->bucket_type == LPFC_NO_BUCKET))
4605 return 0;
4606
4607 spin_lock_irq(shost->host_lock);
4608 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4609 if (!ndlp->lat_data)
4610 continue;
4611
4612 if (nport_index > 0) {
4613 nport_index--;
4614 continue;
4615 }
4616
4617 if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
4618 > count)
4619 break;
4620
4621 if (!ndlp->lat_data)
4622 continue;
4623
4624 /* Print the WWN */
4625 sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
4626 ndlp->nlp_portname.u.wwn[0],
4627 ndlp->nlp_portname.u.wwn[1],
4628 ndlp->nlp_portname.u.wwn[2],
4629 ndlp->nlp_portname.u.wwn[3],
4630 ndlp->nlp_portname.u.wwn[4],
4631 ndlp->nlp_portname.u.wwn[5],
4632 ndlp->nlp_portname.u.wwn[6],
4633 ndlp->nlp_portname.u.wwn[7]);
4634
4635 index = strlen(buf);
4636
4637 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4638 sprintf(&buf[index], "%010u,",
4639 ndlp->lat_data[i].cmd_count);
4640 index = strlen(buf);
4641 }
4642 sprintf(&buf[index], "\n");
4643 index = strlen(buf);
4644 }
4645 spin_unlock_irq(shost->host_lock);
4646 return index;
4647 }
4648
4649 static struct bin_attribute sysfs_drvr_stat_data_attr = {
4650 .attr = {
4651 .name = "lpfc_drvr_stat_data",
4652 .mode = S_IRUSR,
4653 },
4654 .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
4655 .read = sysfs_drvr_stat_data_read,
4656 .write = NULL,
4657 };
4658
4659 /*
4660 # lpfc_link_speed: Link speed selection for initializing the Fibre Channel
4661 # connection.
4662 # Value range is [0,16]. Default value is 0.
4663 */
4664 /**
4665 * lpfc_link_speed_store - Set the adapters link speed
4666 * @dev: Pointer to class device.
4667 * @attr: Unused.
4668 * @buf: Data buffer.
4669 * @count: Size of the data buffer.
4670 *
4671 * Description:
4672 * If val is in a valid range then set the adapter's link speed field and
4673 * issue a lip; if the lip fails reset the link speed to the old value.
4674 *
4675 * Notes:
4676 * If the value is not in range log a kernel error message and return an error.
4677 *
4678 * Returns:
4679 * zero if val is in range and lip okay.
4680 * non-zero return value from lpfc_issue_lip()
4681 * -EINVAL val out of range
4682 **/
4683 static ssize_t
lpfc_link_speed_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4684 lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
4685 const char *buf, size_t count)
4686 {
4687 struct Scsi_Host *shost = class_to_shost(dev);
4688 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4689 struct lpfc_hba *phba = vport->phba;
4690 int val = LPFC_USER_LINK_SPEED_AUTO;
4691 int nolip = 0;
4692 const char *val_buf = buf;
4693 int err;
4694 uint32_t prev_val, if_type;
4695
4696 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
4697 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 &&
4698 phba->hba_flag & HBA_FORCED_LINK_SPEED)
4699 return -EPERM;
4700
4701 if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4702 nolip = 1;
4703 val_buf = &buf[strlen("nolip ")];
4704 }
4705
4706 if (!isdigit(val_buf[0]))
4707 return -EINVAL;
4708 if (sscanf(val_buf, "%i", &val) != 1)
4709 return -EINVAL;
4710
4711 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4712 "3055 lpfc_link_speed changed from %d to %d %s\n",
4713 phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
4714
4715 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
4716 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
4717 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
4718 ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
4719 ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
4720 ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) ||
4721 ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb)) ||
4722 ((val == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) {
4723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4724 "2879 lpfc_link_speed attribute cannot be set "
4725 "to %d. Speed is not supported by this port.\n",
4726 val);
4727 return -EINVAL;
4728 }
4729 if (val >= LPFC_USER_LINK_SPEED_16G &&
4730 phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4731 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4732 "3112 lpfc_link_speed attribute cannot be set "
4733 "to %d. Speed is not supported in loop mode.\n",
4734 val);
4735 return -EINVAL;
4736 }
4737
4738 switch (val) {
4739 case LPFC_USER_LINK_SPEED_AUTO:
4740 case LPFC_USER_LINK_SPEED_1G:
4741 case LPFC_USER_LINK_SPEED_2G:
4742 case LPFC_USER_LINK_SPEED_4G:
4743 case LPFC_USER_LINK_SPEED_8G:
4744 case LPFC_USER_LINK_SPEED_16G:
4745 case LPFC_USER_LINK_SPEED_32G:
4746 case LPFC_USER_LINK_SPEED_64G:
4747 prev_val = phba->cfg_link_speed;
4748 phba->cfg_link_speed = val;
4749 if (nolip)
4750 return strlen(buf);
4751
4752 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4753 if (err) {
4754 phba->cfg_link_speed = prev_val;
4755 return -EINVAL;
4756 }
4757 return strlen(buf);
4758 default:
4759 break;
4760 }
4761
4762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4763 "0469 lpfc_link_speed attribute cannot be set to %d, "
4764 "allowed values are [%s]\n",
4765 val, LPFC_LINK_SPEED_STRING);
4766 return -EINVAL;
4767
4768 }
4769
4770 static int lpfc_link_speed = 0;
4771 module_param(lpfc_link_speed, int, S_IRUGO);
4772 MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
lpfc_param_show(link_speed)4773 lpfc_param_show(link_speed)
4774
4775 /**
4776 * lpfc_link_speed_init - Set the adapters link speed
4777 * @phba: lpfc_hba pointer.
4778 * @val: link speed value.
4779 *
4780 * Description:
4781 * If val is in a valid range then set the adapter's link speed field.
4782 *
4783 * Notes:
4784 * If the value is not in range log a kernel error message, clear the link
4785 * speed and return an error.
4786 *
4787 * Returns:
4788 * zero if val saved.
4789 * -EINVAL val out of range
4790 **/
4791 static int
4792 lpfc_link_speed_init(struct lpfc_hba *phba, int val)
4793 {
4794 if (val >= LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
4795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4796 "3111 lpfc_link_speed of %d cannot "
4797 "support loop mode, setting topology to default.\n",
4798 val);
4799 phba->cfg_topology = 0;
4800 }
4801
4802 switch (val) {
4803 case LPFC_USER_LINK_SPEED_AUTO:
4804 case LPFC_USER_LINK_SPEED_1G:
4805 case LPFC_USER_LINK_SPEED_2G:
4806 case LPFC_USER_LINK_SPEED_4G:
4807 case LPFC_USER_LINK_SPEED_8G:
4808 case LPFC_USER_LINK_SPEED_16G:
4809 case LPFC_USER_LINK_SPEED_32G:
4810 case LPFC_USER_LINK_SPEED_64G:
4811 phba->cfg_link_speed = val;
4812 return 0;
4813 default:
4814 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4815 "0405 lpfc_link_speed attribute cannot "
4816 "be set to %d, allowed values are "
4817 "["LPFC_LINK_SPEED_STRING"]\n", val);
4818 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
4819 return -EINVAL;
4820 }
4821 }
4822
4823 static DEVICE_ATTR_RW(lpfc_link_speed);
4824
4825 /*
4826 # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
4827 # 0 = aer disabled or not supported
4828 # 1 = aer supported and enabled (default)
4829 # Value range is [0,1]. Default value is 1.
4830 */
4831 LPFC_ATTR(aer_support, 1, 0, 1,
4832 "Enable PCIe device AER support");
lpfc_param_show(aer_support)4833 lpfc_param_show(aer_support)
4834
4835 /**
4836 * lpfc_aer_support_store - Set the adapter for aer support
4837 *
4838 * @dev: class device that is converted into a Scsi_host.
4839 * @attr: device attribute, not used.
4840 * @buf: containing enable or disable aer flag.
4841 * @count: unused variable.
4842 *
4843 * Description:
4844 * If the val is 1 and currently the device's AER capability was not
4845 * enabled, invoke the kernel's enable AER helper routine, trying to
4846 * enable the device's AER capability. If the helper routine enabling
4847 * AER returns success, update the device's cfg_aer_support flag to
4848 * indicate AER is supported by the device; otherwise, if the device
4849 * AER capability is already enabled to support AER, then do nothing.
4850 *
4851 * If the val is 0 and currently the device's AER support was enabled,
4852 * invoke the kernel's disable AER helper routine. After that, update
4853 * the device's cfg_aer_support flag to indicate AER is not supported
4854 * by the device; otherwise, if the device AER capability is already
4855 * disabled from supporting AER, then do nothing.
4856 *
4857 * Returns:
4858 * length of the buf on success if val is in range the intended mode
4859 * is supported.
4860 * -EINVAL if val out of range or intended mode is not supported.
4861 **/
4862 static ssize_t
4863 lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
4864 const char *buf, size_t count)
4865 {
4866 struct Scsi_Host *shost = class_to_shost(dev);
4867 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4868 struct lpfc_hba *phba = vport->phba;
4869 int val = 0, rc = -EINVAL;
4870
4871 if (!isdigit(buf[0]))
4872 return -EINVAL;
4873 if (sscanf(buf, "%i", &val) != 1)
4874 return -EINVAL;
4875
4876 switch (val) {
4877 case 0:
4878 if (phba->hba_flag & HBA_AER_ENABLED) {
4879 rc = pci_disable_pcie_error_reporting(phba->pcidev);
4880 if (!rc) {
4881 spin_lock_irq(&phba->hbalock);
4882 phba->hba_flag &= ~HBA_AER_ENABLED;
4883 spin_unlock_irq(&phba->hbalock);
4884 phba->cfg_aer_support = 0;
4885 rc = strlen(buf);
4886 } else
4887 rc = -EPERM;
4888 } else {
4889 phba->cfg_aer_support = 0;
4890 rc = strlen(buf);
4891 }
4892 break;
4893 case 1:
4894 if (!(phba->hba_flag & HBA_AER_ENABLED)) {
4895 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4896 if (!rc) {
4897 spin_lock_irq(&phba->hbalock);
4898 phba->hba_flag |= HBA_AER_ENABLED;
4899 spin_unlock_irq(&phba->hbalock);
4900 phba->cfg_aer_support = 1;
4901 rc = strlen(buf);
4902 } else
4903 rc = -EPERM;
4904 } else {
4905 phba->cfg_aer_support = 1;
4906 rc = strlen(buf);
4907 }
4908 break;
4909 default:
4910 rc = -EINVAL;
4911 break;
4912 }
4913 return rc;
4914 }
4915
4916 static DEVICE_ATTR_RW(lpfc_aer_support);
4917
4918 /**
4919 * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
4920 * @dev: class device that is converted into a Scsi_host.
4921 * @attr: device attribute, not used.
4922 * @buf: containing flag 1 for aer cleanup state.
4923 * @count: unused variable.
4924 *
4925 * Description:
4926 * If the @buf contains 1 and the device currently has the AER support
4927 * enabled, then invokes the kernel AER helper routine
4928 * pci_aer_clear_nonfatal_status() to clean up the uncorrectable
4929 * error status register.
4930 *
4931 * Notes:
4932 *
4933 * Returns:
4934 * -EINVAL if the buf does not contain the 1 or the device is not currently
4935 * enabled with the AER support.
4936 **/
4937 static ssize_t
lpfc_aer_cleanup_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4938 lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
4939 const char *buf, size_t count)
4940 {
4941 struct Scsi_Host *shost = class_to_shost(dev);
4942 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4943 struct lpfc_hba *phba = vport->phba;
4944 int val, rc = -1;
4945
4946 if (!isdigit(buf[0]))
4947 return -EINVAL;
4948 if (sscanf(buf, "%i", &val) != 1)
4949 return -EINVAL;
4950 if (val != 1)
4951 return -EINVAL;
4952
4953 if (phba->hba_flag & HBA_AER_ENABLED)
4954 rc = pci_aer_clear_nonfatal_status(phba->pcidev);
4955
4956 if (rc == 0)
4957 return strlen(buf);
4958 else
4959 return -EPERM;
4960 }
4961
4962 static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
4963 lpfc_aer_cleanup_state);
4964
4965 /**
4966 * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
4967 *
4968 * @dev: class device that is converted into a Scsi_host.
4969 * @attr: device attribute, not used.
4970 * @buf: containing the string the number of vfs to be enabled.
4971 * @count: unused variable.
4972 *
4973 * Description:
4974 * When this api is called either through user sysfs, the driver shall
4975 * try to enable or disable SR-IOV virtual functions according to the
4976 * following:
4977 *
4978 * If zero virtual function has been enabled to the physical function,
4979 * the driver shall invoke the pci enable virtual function api trying
4980 * to enable the virtual functions. If the nr_vfn provided is greater
4981 * than the maximum supported, the maximum virtual function number will
4982 * be used for invoking the api; otherwise, the nr_vfn provided shall
4983 * be used for invoking the api. If the api call returned success, the
4984 * actual number of virtual functions enabled will be set to the driver
4985 * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
4986 * cfg_sriov_nr_virtfn remains zero.
4987 *
4988 * If none-zero virtual functions have already been enabled to the
4989 * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
4990 * -EINVAL will be returned and the driver does nothing;
4991 *
4992 * If the nr_vfn provided is zero and none-zero virtual functions have
4993 * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
4994 * disabling virtual function api shall be invoded to disable all the
4995 * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
4996 * zero. Otherwise, if zero virtual function has been enabled, do
4997 * nothing.
4998 *
4999 * Returns:
5000 * length of the buf on success if val is in range the intended mode
5001 * is supported.
5002 * -EINVAL if val out of range or intended mode is not supported.
5003 **/
5004 static ssize_t
lpfc_sriov_nr_virtfn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5005 lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
5006 const char *buf, size_t count)
5007 {
5008 struct Scsi_Host *shost = class_to_shost(dev);
5009 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5010 struct lpfc_hba *phba = vport->phba;
5011 struct pci_dev *pdev = phba->pcidev;
5012 int val = 0, rc = -EINVAL;
5013
5014 /* Sanity check on user data */
5015 if (!isdigit(buf[0]))
5016 return -EINVAL;
5017 if (sscanf(buf, "%i", &val) != 1)
5018 return -EINVAL;
5019 if (val < 0)
5020 return -EINVAL;
5021
5022 /* Request disabling virtual functions */
5023 if (val == 0) {
5024 if (phba->cfg_sriov_nr_virtfn > 0) {
5025 pci_disable_sriov(pdev);
5026 phba->cfg_sriov_nr_virtfn = 0;
5027 }
5028 return strlen(buf);
5029 }
5030
5031 /* Request enabling virtual functions */
5032 if (phba->cfg_sriov_nr_virtfn > 0) {
5033 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5034 "3018 There are %d virtual functions "
5035 "enabled on physical function.\n",
5036 phba->cfg_sriov_nr_virtfn);
5037 return -EEXIST;
5038 }
5039
5040 if (val <= LPFC_MAX_VFN_PER_PFN)
5041 phba->cfg_sriov_nr_virtfn = val;
5042 else {
5043 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5044 "3019 Enabling %d virtual functions is not "
5045 "allowed.\n", val);
5046 return -EINVAL;
5047 }
5048
5049 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
5050 if (rc) {
5051 phba->cfg_sriov_nr_virtfn = 0;
5052 rc = -EPERM;
5053 } else
5054 rc = strlen(buf);
5055
5056 return rc;
5057 }
5058
5059 LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
5060 "Enable PCIe device SR-IOV virtual fn");
5061
5062 lpfc_param_show(sriov_nr_virtfn)
5063 static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn);
5064
5065 /**
5066 * lpfc_request_firmware_upgrade_store - Request for Linux generic firmware upgrade
5067 *
5068 * @dev: class device that is converted into a Scsi_host.
5069 * @attr: device attribute, not used.
5070 * @buf: containing the string the number of vfs to be enabled.
5071 * @count: unused variable.
5072 *
5073 * Description:
5074 *
5075 * Returns:
5076 * length of the buf on success if val is in range the intended mode
5077 * is supported.
5078 * -EINVAL if val out of range or intended mode is not supported.
5079 **/
5080 static ssize_t
lpfc_request_firmware_upgrade_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5081 lpfc_request_firmware_upgrade_store(struct device *dev,
5082 struct device_attribute *attr,
5083 const char *buf, size_t count)
5084 {
5085 struct Scsi_Host *shost = class_to_shost(dev);
5086 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5087 struct lpfc_hba *phba = vport->phba;
5088 int val = 0, rc;
5089
5090 /* Sanity check on user data */
5091 if (!isdigit(buf[0]))
5092 return -EINVAL;
5093 if (sscanf(buf, "%i", &val) != 1)
5094 return -EINVAL;
5095 if (val != 1)
5096 return -EINVAL;
5097
5098 rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE);
5099 if (rc)
5100 rc = -EPERM;
5101 else
5102 rc = strlen(buf);
5103 return rc;
5104 }
5105
5106 static int lpfc_req_fw_upgrade;
5107 module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR);
5108 MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade");
lpfc_param_show(request_firmware_upgrade)5109 lpfc_param_show(request_firmware_upgrade)
5110
5111 /**
5112 * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade
5113 * @phba: lpfc_hba pointer.
5114 * @val: 0 or 1.
5115 *
5116 * Description:
5117 * Set the initial Linux generic firmware upgrade enable or disable flag.
5118 *
5119 * Returns:
5120 * zero if val saved.
5121 * -EINVAL val out of range
5122 **/
5123 static int
5124 lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val)
5125 {
5126 if (val >= 0 && val <= 1) {
5127 phba->cfg_request_firmware_upgrade = val;
5128 return 0;
5129 }
5130 return -EINVAL;
5131 }
5132 static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR,
5133 lpfc_request_firmware_upgrade_show,
5134 lpfc_request_firmware_upgrade_store);
5135
5136 /**
5137 * lpfc_force_rscn_store
5138 *
5139 * @dev: class device that is converted into a Scsi_host.
5140 * @attr: device attribute, not used.
5141 * @buf: unused string
5142 * @count: unused variable.
5143 *
5144 * Description:
5145 * Force the switch to send a RSCN to all other NPorts in our zone
5146 * If we are direct connect pt2pt, build the RSCN command ourself
5147 * and send to the other NPort. Not supported for private loop.
5148 *
5149 * Returns:
5150 * 0 - on success
5151 * -EIO - if command is not sent
5152 **/
5153 static ssize_t
lpfc_force_rscn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5154 lpfc_force_rscn_store(struct device *dev, struct device_attribute *attr,
5155 const char *buf, size_t count)
5156 {
5157 struct Scsi_Host *shost = class_to_shost(dev);
5158 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5159 int i;
5160
5161 i = lpfc_issue_els_rscn(vport, 0);
5162 if (i)
5163 return -EIO;
5164 return strlen(buf);
5165 }
5166
5167 /*
5168 * lpfc_force_rscn: Force an RSCN to be sent to all remote NPorts
5169 * connected to the HBA.
5170 *
5171 * Value range is any ascii value
5172 */
5173 static int lpfc_force_rscn;
5174 module_param(lpfc_force_rscn, int, 0644);
5175 MODULE_PARM_DESC(lpfc_force_rscn,
5176 "Force an RSCN to be sent to all remote NPorts");
lpfc_param_show(force_rscn)5177 lpfc_param_show(force_rscn)
5178
5179 /**
5180 * lpfc_force_rscn_init - Force an RSCN to be sent to all remote NPorts
5181 * @phba: lpfc_hba pointer.
5182 * @val: unused value.
5183 *
5184 * Returns:
5185 * zero if val saved.
5186 **/
5187 static int
5188 lpfc_force_rscn_init(struct lpfc_hba *phba, int val)
5189 {
5190 return 0;
5191 }
5192 static DEVICE_ATTR_RW(lpfc_force_rscn);
5193
5194 /**
5195 * lpfc_fcp_imax_store
5196 *
5197 * @dev: class device that is converted into a Scsi_host.
5198 * @attr: device attribute, not used.
5199 * @buf: string with the number of fast-path FCP interrupts per second.
5200 * @count: unused variable.
5201 *
5202 * Description:
5203 * If val is in a valid range [636,651042], then set the adapter's
5204 * maximum number of fast-path FCP interrupts per second.
5205 *
5206 * Returns:
5207 * length of the buf on success if val is in range the intended mode
5208 * is supported.
5209 * -EINVAL if val out of range or intended mode is not supported.
5210 **/
5211 static ssize_t
lpfc_fcp_imax_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5212 lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
5213 const char *buf, size_t count)
5214 {
5215 struct Scsi_Host *shost = class_to_shost(dev);
5216 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5217 struct lpfc_hba *phba = vport->phba;
5218 struct lpfc_eq_intr_info *eqi;
5219 uint32_t usdelay;
5220 int val = 0, i;
5221
5222 /* fcp_imax is only valid for SLI4 */
5223 if (phba->sli_rev != LPFC_SLI_REV4)
5224 return -EINVAL;
5225
5226 /* Sanity check on user data */
5227 if (!isdigit(buf[0]))
5228 return -EINVAL;
5229 if (sscanf(buf, "%i", &val) != 1)
5230 return -EINVAL;
5231
5232 /*
5233 * Value range for the HBA is [5000,5000000]
5234 * The value for each EQ depends on how many EQs are configured.
5235 * Allow value == 0
5236 */
5237 if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX))
5238 return -EINVAL;
5239
5240 phba->cfg_auto_imax = (val) ? 0 : 1;
5241 if (phba->cfg_fcp_imax && !val) {
5242 queue_delayed_work(phba->wq, &phba->eq_delay_work,
5243 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
5244
5245 for_each_present_cpu(i) {
5246 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
5247 eqi->icnt = 0;
5248 }
5249 }
5250
5251 phba->cfg_fcp_imax = (uint32_t)val;
5252
5253 if (phba->cfg_fcp_imax)
5254 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
5255 else
5256 usdelay = 0;
5257
5258 for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
5259 lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
5260 usdelay);
5261
5262 return strlen(buf);
5263 }
5264
5265 /*
5266 # lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
5267 # for the HBA.
5268 #
5269 # Value range is [5,000 to 5,000,000]. Default value is 50,000.
5270 */
5271 static int lpfc_fcp_imax = LPFC_DEF_IMAX;
5272 module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
5273 MODULE_PARM_DESC(lpfc_fcp_imax,
5274 "Set the maximum number of FCP interrupts per second per HBA");
lpfc_param_show(fcp_imax)5275 lpfc_param_show(fcp_imax)
5276
5277 /**
5278 * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable
5279 * @phba: lpfc_hba pointer.
5280 * @val: link speed value.
5281 *
5282 * Description:
5283 * If val is in a valid range [636,651042], then initialize the adapter's
5284 * maximum number of fast-path FCP interrupts per second.
5285 *
5286 * Returns:
5287 * zero if val saved.
5288 * -EINVAL val out of range
5289 **/
5290 static int
5291 lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
5292 {
5293 if (phba->sli_rev != LPFC_SLI_REV4) {
5294 phba->cfg_fcp_imax = 0;
5295 return 0;
5296 }
5297
5298 if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) ||
5299 (val == 0)) {
5300 phba->cfg_fcp_imax = val;
5301 return 0;
5302 }
5303
5304 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5305 "3016 lpfc_fcp_imax: %d out of range, using default\n",
5306 val);
5307 phba->cfg_fcp_imax = LPFC_DEF_IMAX;
5308
5309 return 0;
5310 }
5311
5312 static DEVICE_ATTR_RW(lpfc_fcp_imax);
5313
5314 /**
5315 * lpfc_cq_max_proc_limit_store
5316 *
5317 * @dev: class device that is converted into a Scsi_host.
5318 * @attr: device attribute, not used.
5319 * @buf: string with the cq max processing limit of cqes
5320 * @count: unused variable.
5321 *
5322 * Description:
5323 * If val is in a valid range, then set value on each cq
5324 *
5325 * Returns:
5326 * The length of the buf: if successful
5327 * -ERANGE: if val is not in the valid range
5328 * -EINVAL: if bad value format or intended mode is not supported.
5329 **/
5330 static ssize_t
lpfc_cq_max_proc_limit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5331 lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr,
5332 const char *buf, size_t count)
5333 {
5334 struct Scsi_Host *shost = class_to_shost(dev);
5335 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5336 struct lpfc_hba *phba = vport->phba;
5337 struct lpfc_queue *eq, *cq;
5338 unsigned long val;
5339 int i;
5340
5341 /* cq_max_proc_limit is only valid for SLI4 */
5342 if (phba->sli_rev != LPFC_SLI_REV4)
5343 return -EINVAL;
5344
5345 /* Sanity check on user data */
5346 if (!isdigit(buf[0]))
5347 return -EINVAL;
5348 if (kstrtoul(buf, 0, &val))
5349 return -EINVAL;
5350
5351 if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT)
5352 return -ERANGE;
5353
5354 phba->cfg_cq_max_proc_limit = (uint32_t)val;
5355
5356 /* set the values on the cq's */
5357 for (i = 0; i < phba->cfg_irq_chann; i++) {
5358 /* Get the EQ corresponding to the IRQ vector */
5359 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
5360 if (!eq)
5361 continue;
5362
5363 list_for_each_entry(cq, &eq->child_list, list)
5364 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
5365 cq->entry_count);
5366 }
5367
5368 return strlen(buf);
5369 }
5370
5371 /*
5372 * lpfc_cq_max_proc_limit: The maximum number CQE entries processed in an
5373 * itteration of CQ processing.
5374 */
5375 static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5376 module_param(lpfc_cq_max_proc_limit, int, 0644);
5377 MODULE_PARM_DESC(lpfc_cq_max_proc_limit,
5378 "Set the maximum number CQEs processed in an iteration of "
5379 "CQ processing");
5380 lpfc_param_show(cq_max_proc_limit)
5381
5382 /*
5383 * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a
5384 * single handler call which should request a polled completion rather
5385 * than re-enabling interrupts.
5386 */
5387 LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL,
5388 LPFC_CQ_MIN_THRESHOLD_TO_POLL,
5389 LPFC_CQ_MAX_THRESHOLD_TO_POLL,
5390 "CQE Processing Threshold to enable Polling");
5391
5392 /**
5393 * lpfc_cq_max_proc_limit_init - Set the initial cq max_proc_limit
5394 * @phba: lpfc_hba pointer.
5395 * @val: entry limit
5396 *
5397 * Description:
5398 * If val is in a valid range, then initialize the adapter's maximum
5399 * value.
5400 *
5401 * Returns:
5402 * Always returns 0 for success, even if value not always set to
5403 * requested value. If value out of range or not supported, will fall
5404 * back to default.
5405 **/
5406 static int
lpfc_cq_max_proc_limit_init(struct lpfc_hba * phba,int val)5407 lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val)
5408 {
5409 phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5410
5411 if (phba->sli_rev != LPFC_SLI_REV4)
5412 return 0;
5413
5414 if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) {
5415 phba->cfg_cq_max_proc_limit = val;
5416 return 0;
5417 }
5418
5419 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5420 "0371 lpfc_cq_max_proc_limit: %d out of range, using "
5421 "default\n",
5422 phba->cfg_cq_max_proc_limit);
5423
5424 return 0;
5425 }
5426
5427 static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit);
5428
5429 /**
5430 * lpfc_fcp_cpu_map_show - Display current driver CPU affinity
5431 * @dev: class converted to a Scsi_host structure.
5432 * @attr: device attribute, not used.
5433 * @buf: on return contains text describing the state of the link.
5434 *
5435 * Returns: size of formatted string.
5436 **/
5437 static ssize_t
lpfc_fcp_cpu_map_show(struct device * dev,struct device_attribute * attr,char * buf)5438 lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
5439 char *buf)
5440 {
5441 struct Scsi_Host *shost = class_to_shost(dev);
5442 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5443 struct lpfc_hba *phba = vport->phba;
5444 struct lpfc_vector_map_info *cpup;
5445 int len = 0;
5446
5447 if ((phba->sli_rev != LPFC_SLI_REV4) ||
5448 (phba->intr_type != MSIX))
5449 return len;
5450
5451 switch (phba->cfg_fcp_cpu_map) {
5452 case 0:
5453 len += scnprintf(buf + len, PAGE_SIZE-len,
5454 "fcp_cpu_map: No mapping (%d)\n",
5455 phba->cfg_fcp_cpu_map);
5456 return len;
5457 case 1:
5458 len += scnprintf(buf + len, PAGE_SIZE-len,
5459 "fcp_cpu_map: HBA centric mapping (%d): "
5460 "%d of %d CPUs online from %d possible CPUs\n",
5461 phba->cfg_fcp_cpu_map, num_online_cpus(),
5462 num_present_cpus(),
5463 phba->sli4_hba.num_possible_cpu);
5464 break;
5465 }
5466
5467 while (phba->sli4_hba.curr_disp_cpu <
5468 phba->sli4_hba.num_possible_cpu) {
5469 cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
5470
5471 if (!cpu_present(phba->sli4_hba.curr_disp_cpu))
5472 len += scnprintf(buf + len, PAGE_SIZE - len,
5473 "CPU %02d not present\n",
5474 phba->sli4_hba.curr_disp_cpu);
5475 else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
5476 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5477 len += scnprintf(
5478 buf + len, PAGE_SIZE - len,
5479 "CPU %02d hdwq None "
5480 "physid %d coreid %d ht %d ua %d\n",
5481 phba->sli4_hba.curr_disp_cpu,
5482 cpup->phys_id, cpup->core_id,
5483 (cpup->flag & LPFC_CPU_MAP_HYPER),
5484 (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5485 else
5486 len += scnprintf(
5487 buf + len, PAGE_SIZE - len,
5488 "CPU %02d EQ None hdwq %04d "
5489 "physid %d coreid %d ht %d ua %d\n",
5490 phba->sli4_hba.curr_disp_cpu,
5491 cpup->hdwq, cpup->phys_id,
5492 cpup->core_id,
5493 (cpup->flag & LPFC_CPU_MAP_HYPER),
5494 (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5495 } else {
5496 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5497 len += scnprintf(
5498 buf + len, PAGE_SIZE - len,
5499 "CPU %02d hdwq None "
5500 "physid %d coreid %d ht %d ua %d IRQ %d\n",
5501 phba->sli4_hba.curr_disp_cpu,
5502 cpup->phys_id,
5503 cpup->core_id,
5504 (cpup->flag & LPFC_CPU_MAP_HYPER),
5505 (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5506 lpfc_get_irq(cpup->eq));
5507 else
5508 len += scnprintf(
5509 buf + len, PAGE_SIZE - len,
5510 "CPU %02d EQ %04d hdwq %04d "
5511 "physid %d coreid %d ht %d ua %d IRQ %d\n",
5512 phba->sli4_hba.curr_disp_cpu,
5513 cpup->eq, cpup->hdwq, cpup->phys_id,
5514 cpup->core_id,
5515 (cpup->flag & LPFC_CPU_MAP_HYPER),
5516 (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5517 lpfc_get_irq(cpup->eq));
5518 }
5519
5520 phba->sli4_hba.curr_disp_cpu++;
5521
5522 /* display max number of CPUs keeping some margin */
5523 if (phba->sli4_hba.curr_disp_cpu <
5524 phba->sli4_hba.num_possible_cpu &&
5525 (len >= (PAGE_SIZE - 64))) {
5526 len += scnprintf(buf + len,
5527 PAGE_SIZE - len, "more...\n");
5528 break;
5529 }
5530 }
5531
5532 if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu)
5533 phba->sli4_hba.curr_disp_cpu = 0;
5534
5535 return len;
5536 }
5537
5538 /**
5539 * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
5540 * @dev: class device that is converted into a Scsi_host.
5541 * @attr: device attribute, not used.
5542 * @buf: one or more lpfc_polling_flags values.
5543 * @count: not used.
5544 *
5545 * Returns:
5546 * -EINVAL - Not implemented yet.
5547 **/
5548 static ssize_t
lpfc_fcp_cpu_map_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5549 lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
5550 const char *buf, size_t count)
5551 {
5552 return -EINVAL;
5553 }
5554
5555 /*
5556 # lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
5557 # for the HBA.
5558 #
5559 # Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1).
5560 # 0 - Do not affinitze IRQ vectors
5561 # 1 - Affintize HBA vectors with respect to each HBA
5562 # (start with CPU0 for each HBA)
5563 # This also defines how Hardware Queues are mapped to specific CPUs.
5564 */
5565 static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5566 module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
5567 MODULE_PARM_DESC(lpfc_fcp_cpu_map,
5568 "Defines how to map CPUs to IRQ vectors per HBA");
5569
5570 /**
5571 * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
5572 * @phba: lpfc_hba pointer.
5573 * @val: link speed value.
5574 *
5575 * Description:
5576 * If val is in a valid range [0-2], then affinitze the adapter's
5577 * MSIX vectors.
5578 *
5579 * Returns:
5580 * zero if val saved.
5581 * -EINVAL val out of range
5582 **/
5583 static int
lpfc_fcp_cpu_map_init(struct lpfc_hba * phba,int val)5584 lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
5585 {
5586 if (phba->sli_rev != LPFC_SLI_REV4) {
5587 phba->cfg_fcp_cpu_map = 0;
5588 return 0;
5589 }
5590
5591 if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
5592 phba->cfg_fcp_cpu_map = val;
5593 return 0;
5594 }
5595
5596 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5597 "3326 lpfc_fcp_cpu_map: %d out of range, using "
5598 "default\n", val);
5599 phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5600
5601 return 0;
5602 }
5603
5604 static DEVICE_ATTR_RW(lpfc_fcp_cpu_map);
5605
5606 /*
5607 # lpfc_fcp_class: Determines FC class to use for the FCP protocol.
5608 # Value range is [2,3]. Default value is 3.
5609 */
5610 LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
5611 "Select Fibre Channel class of service for FCP sequences");
5612
5613 /*
5614 # lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
5615 # is [0,1]. Default value is 1.
5616 */
5617 LPFC_VPORT_ATTR_RW(use_adisc, 1, 0, 1,
5618 "Use ADISC on rediscovery to authenticate FCP devices");
5619
5620 /*
5621 # lpfc_first_burst_size: First burst size to use on the NPorts
5622 # that support first burst.
5623 # Value range is [0,65536]. Default value is 0.
5624 */
5625 LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
5626 "First burst size for Targets that support first burst");
5627
5628 /*
5629 * lpfc_nvmet_fb_size: NVME Target mode supported first burst size.
5630 * When the driver is configured as an NVME target, this value is
5631 * communicated to the NVME initiator in the PRLI response. It is
5632 * used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support
5633 * parameters are set and the target is sending the PRLI RSP.
5634 * Parameter supported on physical port only - no NPIV support.
5635 * Value range is [0,65536]. Default value is 0.
5636 */
5637 LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
5638 "NVME Target mode first burst size in 512B increments.");
5639
5640 /*
5641 * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
5642 * For the Initiator (I), enabling this parameter means that an NVMET
5643 * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
5644 * processed by the initiator for subsequent NVME FCP IO.
5645 * Currently, this feature is not supported on the NVME target
5646 * Value range is [0,1]. Default value is 0 (disabled).
5647 */
5648 LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
5649 "Enable First Burst feature for NVME Initiator.");
5650
5651 /*
5652 # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
5653 # depth. Default value is 0. When the value of this parameter is zero the
5654 # SCSI command completion time is not used for controlling I/O queue depth. When
5655 # the parameter is set to a non-zero value, the I/O queue depth is controlled
5656 # to limit the I/O completion time to the parameter value.
5657 # The value is set in milliseconds.
5658 */
5659 LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000,
5660 "Use command completion time to control queue depth");
5661
5662 lpfc_vport_param_show(max_scsicmpl_time);
5663 static int
lpfc_max_scsicmpl_time_set(struct lpfc_vport * vport,int val)5664 lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
5665 {
5666 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5667 struct lpfc_nodelist *ndlp, *next_ndlp;
5668
5669 if (val == vport->cfg_max_scsicmpl_time)
5670 return 0;
5671 if ((val < 0) || (val > 60000))
5672 return -EINVAL;
5673 vport->cfg_max_scsicmpl_time = val;
5674
5675 spin_lock_irq(shost->host_lock);
5676 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
5677 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
5678 continue;
5679 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
5680 }
5681 spin_unlock_irq(shost->host_lock);
5682 return 0;
5683 }
5684 lpfc_vport_param_store(max_scsicmpl_time);
5685 static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time);
5686
5687 /*
5688 # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
5689 # range is [0,1]. Default value is 0.
5690 */
5691 LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
5692
5693 /*
5694 # lpfc_xri_rebalancing: enable or disable XRI rebalancing feature
5695 # range is [0,1]. Default value is 1.
5696 */
5697 LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing");
5698
5699 /*
5700 * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds
5701 * range is [0,1]. Default value is 0.
5702 * For [0], FCP commands are issued to Work Queues based on upper layer
5703 * hardware queue index.
5704 * For [1], FCP commands are issued to a Work Queue associated with the
5705 * current CPU.
5706 *
5707 * LPFC_FCP_SCHED_BY_HDWQ == 0
5708 * LPFC_FCP_SCHED_BY_CPU == 1
5709 *
5710 * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu
5711 * affinity for FCP/NVME I/Os through Work Queues associated with the current
5712 * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os
5713 * through WQs will be used.
5714 */
5715 LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU,
5716 LPFC_FCP_SCHED_BY_HDWQ,
5717 LPFC_FCP_SCHED_BY_CPU,
5718 "Determine scheduling algorithm for "
5719 "issuing commands [0] - Hardware Queue, [1] - Current CPU");
5720
5721 /*
5722 * lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN
5723 * range is [0,1]. Default value is 0.
5724 * For [0], GID_FT is used for NameServer queries after RSCN (default)
5725 * For [1], GID_PT is used for NameServer queries after RSCN
5726 *
5727 */
5728 LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT,
5729 LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT,
5730 "Determine algorithm NameServer queries after RSCN "
5731 "[0] - GID_FT, [1] - GID_PT");
5732
5733 /*
5734 # lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
5735 # range is [0,1]. Default value is 0.
5736 # For [0], bus reset issues target reset to ALL devices
5737 # For [1], bus reset issues target reset to non-FCP2 devices
5738 */
5739 LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for "
5740 "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset");
5741
5742
5743 /*
5744 # lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
5745 # cr_delay (msec) or cr_count outstanding commands. cr_delay can take
5746 # value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
5747 # is 0. Default value of cr_count is 1. The cr_count feature is disabled if
5748 # cr_delay is set to 0.
5749 */
5750 LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an "
5751 "interrupt response is generated");
5752
5753 LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an "
5754 "interrupt response is generated");
5755
5756 /*
5757 # lpfc_multi_ring_support: Determines how many rings to spread available
5758 # cmd/rsp IOCB entries across.
5759 # Value range is [1,2]. Default value is 1.
5760 */
5761 LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
5762 "SLI rings to spread IOCB entries across");
5763
5764 /*
5765 # lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this
5766 # identifies what rctl value to configure the additional ring for.
5767 # Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
5768 */
5769 LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1,
5770 255, "Identifies RCTL for additional ring configuration");
5771
5772 /*
5773 # lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this
5774 # identifies what type value to configure the additional ring for.
5775 # Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
5776 */
5777 LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
5778 255, "Identifies TYPE for additional ring configuration");
5779
5780 /*
5781 # lpfc_enable_SmartSAN: Sets up FDMI support for SmartSAN
5782 # 0 = SmartSAN functionality disabled (default)
5783 # 1 = SmartSAN functionality enabled
5784 # This parameter will override the value of lpfc_fdmi_on module parameter.
5785 # Value range is [0,1]. Default value is 0.
5786 */
5787 LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
5788
5789 /*
5790 # lpfc_fdmi_on: Controls FDMI support.
5791 # 0 No FDMI support
5792 # 1 Traditional FDMI support (default)
5793 # Traditional FDMI support means the driver will assume FDMI-2 support;
5794 # however, if that fails, it will fallback to FDMI-1.
5795 # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.
5796 # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of
5797 # lpfc_fdmi_on.
5798 # Value range [0,1]. Default value is 1.
5799 */
5800 LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support");
5801
5802 /*
5803 # Specifies the maximum number of ELS cmds we can have outstanding (for
5804 # discovery). Value range is [1,64]. Default value = 32.
5805 */
5806 LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
5807 "during discovery");
5808
5809 /*
5810 # lpfc_max_luns: maximum allowed LUN ID. This is the highest LUN ID that
5811 # will be scanned by the SCSI midlayer when sequential scanning is
5812 # used; and is also the highest LUN ID allowed when the SCSI midlayer
5813 # parses REPORT_LUN responses. The lpfc driver has no LUN count or
5814 # LUN ID limit, but the SCSI midlayer requires this field for the uses
5815 # above. The lpfc driver limits the default value to 255 for two reasons.
5816 # As it bounds the sequential scan loop, scanning for thousands of luns
5817 # on a target can take minutes of wall clock time. Additionally,
5818 # there are FC targets, such as JBODs, that only recognize 8-bits of
5819 # LUN ID. When they receive a value greater than 8 bits, they chop off
5820 # the high order bits. In other words, they see LUN IDs 0, 256, 512,
5821 # and so on all as LUN ID 0. This causes the linux kernel, which sees
5822 # valid responses at each of the LUN IDs, to believe there are multiple
5823 # devices present, when in fact, there is only 1.
5824 # A customer that is aware of their target behaviors, and the results as
5825 # indicated above, is welcome to increase the lpfc_max_luns value.
5826 # As mentioned, this value is not used by the lpfc driver, only the
5827 # SCSI midlayer.
5828 # Value range is [0,65535]. Default value is 255.
5829 # NOTE: The SCSI layer might probe all allowed LUN on some old targets.
5830 */
5831 LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID");
5832
5833 /*
5834 # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
5835 # Value range is [1,255], default value is 10.
5836 */
5837 LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
5838 "Milliseconds driver will wait between polling FCP ring");
5839
5840 /*
5841 # lpfc_task_mgmt_tmo: Maximum time to wait for task management commands
5842 # to complete in seconds. Value range is [5,180], default value is 60.
5843 */
5844 LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
5845 "Maximum time to wait for task management commands to complete");
5846 /*
5847 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
5848 # support this feature
5849 # 0 = MSI disabled
5850 # 1 = MSI enabled
5851 # 2 = MSI-X enabled (default)
5852 # Value range is [0,2]. Default value is 2.
5853 */
5854 LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
5855 "MSI-X (2), if possible");
5856
5857 /*
5858 * lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs
5859 *
5860 * 0 = NVME OAS disabled
5861 * 1 = NVME OAS enabled
5862 *
5863 * Value range is [0,1]. Default value is 0.
5864 */
5865 LPFC_ATTR_RW(nvme_oas, 0, 0, 1,
5866 "Use OAS bit on NVME IOs");
5867
5868 /*
5869 * lpfc_nvme_embed_cmd: Use the oas bit when sending NVME/NVMET IOs
5870 *
5871 * 0 = Put NVME Command in SGL
5872 * 1 = Embed NVME Command in WQE (unless G7)
5873 * 2 = Embed NVME Command in WQE (force)
5874 *
5875 * Value range is [0,2]. Default value is 1.
5876 */
5877 LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
5878 "Embed NVME Command in WQE");
5879
5880 /*
5881 * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
5882 * the driver will advertise it supports to the SCSI layer.
5883 *
5884 * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
5885 * 1,256 = Manually specify nr_hw_queue value to be advertised,
5886 *
5887 * Value range is [0,256]. Default value is 8.
5888 */
5889 LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
5890 LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
5891 "Set the number of SCSI Queues advertised");
5892
5893 /*
5894 * lpfc_hdw_queue: Set the number of Hardware Queues the driver
5895 * will advertise it supports to the NVME and SCSI layers. This also
5896 * will map to the number of CQ/WQ pairs the driver will create.
5897 *
5898 * The NVME Layer will try to create this many, plus 1 administrative
5899 * hardware queue. The administrative queue will always map to WQ 0
5900 * A hardware IO queue maps (qidx) to a specific driver CQ/WQ.
5901 *
5902 * 0 = Configure the number of hdw queues to the number of active CPUs.
5903 * 1,256 = Manually specify how many hdw queues to use.
5904 *
5905 * Value range is [0,256]. Default value is 0.
5906 */
5907 LPFC_ATTR_R(hdw_queue,
5908 LPFC_HBA_HDWQ_DEF,
5909 LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
5910 "Set the number of I/O Hardware Queues");
5911
5912 #if IS_ENABLED(CONFIG_X86)
5913 /**
5914 * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on
5915 * irq_chann_mode
5916 * @phba: Pointer to HBA context object.
5917 **/
5918 static void
lpfc_cpumask_irq_mode_init(struct lpfc_hba * phba)5919 lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba)
5920 {
5921 unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE;
5922 const struct cpumask *sibling_mask;
5923 struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask;
5924
5925 cpumask_clear(aff_mask);
5926
5927 if (phba->irq_chann_mode == NUMA_MODE) {
5928 /* Check if we're a NUMA architecture */
5929 numa_node = dev_to_node(&phba->pcidev->dev);
5930 if (numa_node == NUMA_NO_NODE) {
5931 phba->irq_chann_mode = NORMAL_MODE;
5932 return;
5933 }
5934 }
5935
5936 for_each_possible_cpu(cpu) {
5937 switch (phba->irq_chann_mode) {
5938 case NUMA_MODE:
5939 if (cpu_to_node(cpu) == numa_node)
5940 cpumask_set_cpu(cpu, aff_mask);
5941 break;
5942 case NHT_MODE:
5943 sibling_mask = topology_sibling_cpumask(cpu);
5944 first_cpu = cpumask_first(sibling_mask);
5945 if (first_cpu < nr_cpu_ids)
5946 cpumask_set_cpu(first_cpu, aff_mask);
5947 break;
5948 default:
5949 break;
5950 }
5951 }
5952 }
5953 #endif
5954
5955 static void
lpfc_assign_default_irq_chann(struct lpfc_hba * phba)5956 lpfc_assign_default_irq_chann(struct lpfc_hba *phba)
5957 {
5958 #if IS_ENABLED(CONFIG_X86)
5959 switch (boot_cpu_data.x86_vendor) {
5960 case X86_VENDOR_AMD:
5961 /* If AMD architecture, then default is NUMA_MODE */
5962 phba->irq_chann_mode = NUMA_MODE;
5963 break;
5964 case X86_VENDOR_INTEL:
5965 /* If Intel architecture, then default is no hyperthread mode */
5966 phba->irq_chann_mode = NHT_MODE;
5967 break;
5968 default:
5969 phba->irq_chann_mode = NORMAL_MODE;
5970 break;
5971 }
5972 lpfc_cpumask_irq_mode_init(phba);
5973 #else
5974 phba->irq_chann_mode = NORMAL_MODE;
5975 #endif
5976 }
5977
5978 /*
5979 * lpfc_irq_chann: Set the number of IRQ vectors that are available
5980 * for Hardware Queues to utilize. This also will map to the number
5981 * of EQ / MSI-X vectors the driver will create. This should never be
5982 * more than the number of Hardware Queues
5983 *
5984 * 0 = Configure number of IRQ Channels to:
5985 * if AMD architecture, number of CPUs on HBA's NUMA node
5986 * if Intel architecture, number of physical CPUs.
5987 * otherwise, number of active CPUs.
5988 * [1,256] = Manually specify how many IRQ Channels to use.
5989 *
5990 * Value range is [0,256]. Default value is [0].
5991 */
5992 static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF;
5993 module_param(lpfc_irq_chann, uint, 0444);
5994 MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
5995
5996 /* lpfc_irq_chann_init - Set the hba irq_chann initial value
5997 * @phba: lpfc_hba pointer.
5998 * @val: contains the initial value
5999 *
6000 * Description:
6001 * Validates the initial value is within range and assigns it to the
6002 * adapter. If not in range, an error message is posted and the
6003 * default value is assigned.
6004 *
6005 * Returns:
6006 * zero if value is in range and is set
6007 * -EINVAL if value was out of range
6008 **/
6009 static int
lpfc_irq_chann_init(struct lpfc_hba * phba,uint32_t val)6010 lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
6011 {
6012 const struct cpumask *aff_mask;
6013
6014 if (phba->cfg_use_msi != 2) {
6015 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6016 "8532 use_msi = %u ignoring cfg_irq_numa\n",
6017 phba->cfg_use_msi);
6018 phba->irq_chann_mode = NORMAL_MODE;
6019 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
6020 return 0;
6021 }
6022
6023 /* Check if default setting was passed */
6024 if (val == LPFC_IRQ_CHANN_DEF &&
6025 phba->cfg_hdw_queue == LPFC_HBA_HDWQ_DEF &&
6026 phba->sli_rev == LPFC_SLI_REV4)
6027 lpfc_assign_default_irq_chann(phba);
6028
6029 if (phba->irq_chann_mode != NORMAL_MODE) {
6030 aff_mask = &phba->sli4_hba.irq_aff_mask;
6031
6032 if (cpumask_empty(aff_mask)) {
6033 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6034 "8533 Could not identify CPUS for "
6035 "mode %d, ignoring\n",
6036 phba->irq_chann_mode);
6037 phba->irq_chann_mode = NORMAL_MODE;
6038 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
6039 } else {
6040 phba->cfg_irq_chann = cpumask_weight(aff_mask);
6041
6042 /* If no hyperthread mode, then set hdwq count to
6043 * aff_mask weight as well
6044 */
6045 if (phba->irq_chann_mode == NHT_MODE)
6046 phba->cfg_hdw_queue = phba->cfg_irq_chann;
6047
6048 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6049 "8543 lpfc_irq_chann set to %u "
6050 "(mode: %d)\n", phba->cfg_irq_chann,
6051 phba->irq_chann_mode);
6052 }
6053 } else {
6054 if (val > LPFC_IRQ_CHANN_MAX) {
6055 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6056 "8545 lpfc_irq_chann attribute cannot "
6057 "be set to %u, allowed range is "
6058 "[%u,%u]\n",
6059 val,
6060 LPFC_IRQ_CHANN_MIN,
6061 LPFC_IRQ_CHANN_MAX);
6062 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
6063 return -EINVAL;
6064 }
6065 if (phba->sli_rev == LPFC_SLI_REV4) {
6066 phba->cfg_irq_chann = val;
6067 } else {
6068 phba->cfg_irq_chann = 2;
6069 phba->cfg_hdw_queue = 1;
6070 }
6071 }
6072
6073 return 0;
6074 }
6075
6076 /**
6077 * lpfc_irq_chann_show - Display value of irq_chann
6078 * @dev: class converted to a Scsi_host structure.
6079 * @attr: device attribute, not used.
6080 * @buf: on return contains a string with the list sizes
6081 *
6082 * Returns: size of formatted string.
6083 **/
6084 static ssize_t
lpfc_irq_chann_show(struct device * dev,struct device_attribute * attr,char * buf)6085 lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr,
6086 char *buf)
6087 {
6088 struct Scsi_Host *shost = class_to_shost(dev);
6089 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6090 struct lpfc_hba *phba = vport->phba;
6091
6092 return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann);
6093 }
6094
6095 static DEVICE_ATTR_RO(lpfc_irq_chann);
6096
6097 /*
6098 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
6099 # 0 = HBA resets disabled
6100 # 1 = HBA resets enabled (default)
6101 # 2 = HBA reset via PCI bus reset enabled
6102 # Value range is [0,2]. Default value is 1.
6103 */
6104 LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver.");
6105
6106 /*
6107 # lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
6108 # 0 = HBA Heartbeat disabled
6109 # 1 = HBA Heartbeat enabled (default)
6110 # Value range is [0,1]. Default value is 1.
6111 */
6112 LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
6113
6114 /*
6115 # lpfc_EnableXLane: Enable Express Lane Feature
6116 # 0x0 Express Lane Feature disabled
6117 # 0x1 Express Lane Feature enabled
6118 # Value range is [0,1]. Default value is 0.
6119 */
6120 LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
6121
6122 /*
6123 # lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature
6124 # 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits)
6125 # Value range is [0x0,0x7f]. Default value is 0
6126 */
6127 LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
6128
6129 /*
6130 # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
6131 # 0 = BlockGuard disabled (default)
6132 # 1 = BlockGuard enabled
6133 # Value range is [0,1]. Default value is 0.
6134 */
6135 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
6136
6137 /*
6138 # lpfc_prot_mask:
6139 # - Bit mask of host protection capabilities used to register with the
6140 # SCSI mid-layer
6141 # - Only meaningful if BG is turned on (lpfc_enable_bg=1).
6142 # - Allows you to ultimately specify which profiles to use
6143 # - Default will result in registering capabilities for all profiles.
6144 # - SHOST_DIF_TYPE1_PROTECTION 1
6145 # HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection
6146 # - SHOST_DIX_TYPE0_PROTECTION 8
6147 # HBA supports DIX Type 0: Host to HBA protection only
6148 # - SHOST_DIX_TYPE1_PROTECTION 16
6149 # HBA supports DIX Type 1: Host to HBA Type 1 protection
6150 #
6151 */
6152 LPFC_ATTR(prot_mask,
6153 (SHOST_DIF_TYPE1_PROTECTION |
6154 SHOST_DIX_TYPE0_PROTECTION |
6155 SHOST_DIX_TYPE1_PROTECTION),
6156 0,
6157 (SHOST_DIF_TYPE1_PROTECTION |
6158 SHOST_DIX_TYPE0_PROTECTION |
6159 SHOST_DIX_TYPE1_PROTECTION),
6160 "T10-DIF host protection capabilities mask");
6161
6162 /*
6163 # lpfc_prot_guard:
6164 # - Bit mask of protection guard types to register with the SCSI mid-layer
6165 # - Guard types are currently either 1) T10-DIF CRC 2) IP checksum
6166 # - Allows you to ultimately specify which profiles to use
6167 # - Default will result in registering capabilities for all guard types
6168 #
6169 */
6170 LPFC_ATTR(prot_guard,
6171 SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP,
6172 "T10-DIF host protection guard type");
6173
6174 /*
6175 * Delay initial NPort discovery when Clean Address bit is cleared in
6176 * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed.
6177 * This parameter can have value 0 or 1.
6178 * When this parameter is set to 0, no delay is added to the initial
6179 * discovery.
6180 * When this parameter is set to non-zero value, initial Nport discovery is
6181 * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC
6182 * accept and FCID/Fabric name/Fabric portname is changed.
6183 * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion
6184 * when Clean Address bit is cleared in FLOGI/FDISC
6185 * accept and FCID/Fabric name/Fabric portname is changed.
6186 * Default value is 0.
6187 */
6188 LPFC_ATTR(delay_discovery, 0, 0, 1,
6189 "Delay NPort discovery when Clean Address bit is cleared.");
6190
6191 /*
6192 * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
6193 * This value can be set to values between 64 and 4096. The default value
6194 * is 64, but may be increased to allow for larger Max I/O sizes. The scsi
6195 * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE).
6196 * Because of the additional overhead involved in setting up T10-DIF,
6197 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
6198 * and will be limited to 512 if BlockGuard is enabled under SLI3.
6199 */
6200 static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
6201 module_param(lpfc_sg_seg_cnt, uint, 0444);
6202 MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count");
6203
6204 /**
6205 * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes
6206 * configured for the adapter
6207 * @dev: class converted to a Scsi_host structure.
6208 * @attr: device attribute, not used.
6209 * @buf: on return contains a string with the list sizes
6210 *
6211 * Returns: size of formatted string.
6212 **/
6213 static ssize_t
lpfc_sg_seg_cnt_show(struct device * dev,struct device_attribute * attr,char * buf)6214 lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
6215 char *buf)
6216 {
6217 struct Scsi_Host *shost = class_to_shost(dev);
6218 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6219 struct lpfc_hba *phba = vport->phba;
6220 int len;
6221
6222 len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
6223 phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
6224
6225 len += scnprintf(buf + len, PAGE_SIZE - len,
6226 "Cfg: %d SCSI: %d NVME: %d\n",
6227 phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
6228 phba->cfg_nvme_seg_cnt);
6229 return len;
6230 }
6231
6232 static DEVICE_ATTR_RO(lpfc_sg_seg_cnt);
6233
6234 /**
6235 * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value
6236 * @phba: lpfc_hba pointer.
6237 * @val: contains the initial value
6238 *
6239 * Description:
6240 * Validates the initial value is within range and assigns it to the
6241 * adapter. If not in range, an error message is posted and the
6242 * default value is assigned.
6243 *
6244 * Returns:
6245 * zero if value is in range and is set
6246 * -EINVAL if value was out of range
6247 **/
6248 static int
lpfc_sg_seg_cnt_init(struct lpfc_hba * phba,int val)6249 lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
6250 {
6251 if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) {
6252 phba->cfg_sg_seg_cnt = val;
6253 return 0;
6254 }
6255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6256 "0409 lpfc_sg_seg_cnt attribute cannot be set to %d, "
6257 "allowed range is [%d, %d]\n",
6258 val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
6259 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
6260 return -EINVAL;
6261 }
6262
6263 /*
6264 * lpfc_enable_mds_diags: Enable MDS Diagnostics
6265 * 0 = MDS Diagnostics disabled (default)
6266 * 1 = MDS Diagnostics enabled
6267 * Value range is [0,1]. Default value is 0.
6268 */
6269 LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
6270
6271 /*
6272 * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
6273 * 0 = Disable firmware logging (default)
6274 * [1-4] = Multiple of 1/4th Mb of host memory for FW logging
6275 * Value range [0..4]. Default value is 0
6276 */
6277 LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
6278 lpfc_param_show(ras_fwlog_buffsize);
6279
6280 static ssize_t
lpfc_ras_fwlog_buffsize_set(struct lpfc_hba * phba,uint val)6281 lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val)
6282 {
6283 int ret = 0;
6284 enum ras_state state;
6285
6286 if (!lpfc_rangecheck(val, 0, 4))
6287 return -EINVAL;
6288
6289 if (phba->cfg_ras_fwlog_buffsize == val)
6290 return 0;
6291
6292 if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
6293 return -EINVAL;
6294
6295 spin_lock_irq(&phba->hbalock);
6296 state = phba->ras_fwlog.state;
6297 spin_unlock_irq(&phba->hbalock);
6298
6299 if (state == REG_INPROGRESS) {
6300 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
6301 "registration is in progress\n");
6302 return -EBUSY;
6303 }
6304
6305 /* For disable logging: stop the logs and free the DMA.
6306 * For ras_fwlog_buffsize size change we still need to free and
6307 * reallocate the DMA in lpfc_sli4_ras_fwlog_init.
6308 */
6309 phba->cfg_ras_fwlog_buffsize = val;
6310 if (state == ACTIVE) {
6311 lpfc_ras_stop_fwlog(phba);
6312 lpfc_sli4_ras_dma_free(phba);
6313 }
6314
6315 lpfc_sli4_ras_init(phba);
6316 if (phba->ras_fwlog.ras_enabled)
6317 ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6318 LPFC_RAS_ENABLE_LOGGING);
6319 return ret;
6320 }
6321
6322 lpfc_param_store(ras_fwlog_buffsize);
6323 static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize);
6324
6325 /*
6326 * lpfc_ras_fwlog_level: Firmware logging verbosity level
6327 * Valid only if firmware logging is enabled
6328 * 0(Least Verbosity) 4 (most verbosity)
6329 * Value range is [0..4]. Default value is 0
6330 */
6331 LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
6332
6333 /*
6334 * lpfc_ras_fwlog_func: Firmware logging enabled on function number
6335 * Default function which has RAS support : 0
6336 * Value Range is [0..7].
6337 * FW logging is a global action and enablement is via a specific
6338 * port.
6339 */
6340 LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
6341
6342 /*
6343 * lpfc_enable_bbcr: Enable BB Credit Recovery
6344 * 0 = BB Credit Recovery disabled
6345 * 1 = BB Credit Recovery enabled (default)
6346 * Value range is [0,1]. Default value is 1.
6347 */
6348 LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
6349
6350 /* Signaling module parameters */
6351 int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */
6352 module_param(lpfc_fabric_cgn_frequency, int, 0444);
6353 MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq");
6354
6355 int lpfc_acqe_cgn_frequency = 10; /* 10 sec default */
6356 module_param(lpfc_acqe_cgn_frequency, int, 0444);
6357 MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq");
6358
6359 int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail */
6360 module_param(lpfc_use_cgn_signal, int, 0444);
6361 MODULE_PARM_DESC(lpfc_use_cgn_signal, "Use Congestion signaling if available");
6362
6363 /*
6364 * lpfc_enable_dpp: Enable DPP on G7
6365 * 0 = DPP on G7 disabled
6366 * 1 = DPP on G7 enabled (default)
6367 * Value range is [0,1]. Default value is 1.
6368 */
6369 LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push");
6370
6371 /*
6372 * lpfc_enable_mi: Enable FDMI MIB
6373 * 0 = disabled
6374 * 1 = enabled (default)
6375 * Value range is [0,1].
6376 */
6377 LPFC_ATTR_R(enable_mi, 1, 0, 1, "Enable MI");
6378
6379 /*
6380 * lpfc_max_vmid: Maximum number of VMs to be tagged. This is valid only if
6381 * either vmid_app_header or vmid_priority_tagging is enabled.
6382 * 4 - 255 = vmid support enabled for 4-255 VMs
6383 * Value range is [4,255].
6384 */
6385 LPFC_ATTR_RW(max_vmid, LPFC_MIN_VMID, LPFC_MIN_VMID, LPFC_MAX_VMID,
6386 "Maximum number of VMs supported");
6387
6388 /*
6389 * lpfc_vmid_inactivity_timeout: Inactivity timeout duration in hours
6390 * 0 = Timeout is disabled
6391 * Value range is [0,24].
6392 */
6393 LPFC_ATTR_RW(vmid_inactivity_timeout, 4, 0, 24,
6394 "Inactivity timeout in hours");
6395
6396 /*
6397 * lpfc_vmid_app_header: Enable App Header VMID support
6398 * 0 = Support is disabled (default)
6399 * 1 = Support is enabled
6400 * Value range is [0,1].
6401 */
6402 LPFC_ATTR_RW(vmid_app_header, LPFC_VMID_APP_HEADER_DISABLE,
6403 LPFC_VMID_APP_HEADER_DISABLE, LPFC_VMID_APP_HEADER_ENABLE,
6404 "Enable App Header VMID support");
6405
6406 /*
6407 * lpfc_vmid_priority_tagging: Enable Priority Tagging VMID support
6408 * 0 = Support is disabled (default)
6409 * 1 = Allow supported targets only
6410 * 2 = Allow all targets
6411 * Value range is [0,2].
6412 */
6413 LPFC_ATTR_RW(vmid_priority_tagging, LPFC_VMID_PRIO_TAG_DISABLE,
6414 LPFC_VMID_PRIO_TAG_DISABLE,
6415 LPFC_VMID_PRIO_TAG_ALL_TARGETS,
6416 "Enable Priority Tagging VMID support");
6417
6418 struct device_attribute *lpfc_hba_attrs[] = {
6419 &dev_attr_nvme_info,
6420 &dev_attr_scsi_stat,
6421 &dev_attr_bg_info,
6422 &dev_attr_bg_guard_err,
6423 &dev_attr_bg_apptag_err,
6424 &dev_attr_bg_reftag_err,
6425 &dev_attr_info,
6426 &dev_attr_serialnum,
6427 &dev_attr_modeldesc,
6428 &dev_attr_modelname,
6429 &dev_attr_programtype,
6430 &dev_attr_portnum,
6431 &dev_attr_fwrev,
6432 &dev_attr_hdw,
6433 &dev_attr_option_rom_version,
6434 &dev_attr_link_state,
6435 &dev_attr_num_discovered_ports,
6436 &dev_attr_menlo_mgmt_mode,
6437 &dev_attr_lpfc_drvr_version,
6438 &dev_attr_lpfc_enable_fip,
6439 &dev_attr_lpfc_temp_sensor,
6440 &dev_attr_lpfc_log_verbose,
6441 &dev_attr_lpfc_lun_queue_depth,
6442 &dev_attr_lpfc_tgt_queue_depth,
6443 &dev_attr_lpfc_hba_queue_depth,
6444 &dev_attr_lpfc_peer_port_login,
6445 &dev_attr_lpfc_nodev_tmo,
6446 &dev_attr_lpfc_devloss_tmo,
6447 &dev_attr_lpfc_enable_fc4_type,
6448 &dev_attr_lpfc_fcp_class,
6449 &dev_attr_lpfc_use_adisc,
6450 &dev_attr_lpfc_first_burst_size,
6451 &dev_attr_lpfc_ack0,
6452 &dev_attr_lpfc_xri_rebalancing,
6453 &dev_attr_lpfc_topology,
6454 &dev_attr_lpfc_scan_down,
6455 &dev_attr_lpfc_link_speed,
6456 &dev_attr_lpfc_fcp_io_sched,
6457 &dev_attr_lpfc_ns_query,
6458 &dev_attr_lpfc_fcp2_no_tgt_reset,
6459 &dev_attr_lpfc_cr_delay,
6460 &dev_attr_lpfc_cr_count,
6461 &dev_attr_lpfc_multi_ring_support,
6462 &dev_attr_lpfc_multi_ring_rctl,
6463 &dev_attr_lpfc_multi_ring_type,
6464 &dev_attr_lpfc_fdmi_on,
6465 &dev_attr_lpfc_enable_SmartSAN,
6466 &dev_attr_lpfc_max_luns,
6467 &dev_attr_lpfc_enable_npiv,
6468 &dev_attr_lpfc_fcf_failover_policy,
6469 &dev_attr_lpfc_enable_rrq,
6470 &dev_attr_lpfc_fcp_wait_abts_rsp,
6471 &dev_attr_nport_evt_cnt,
6472 &dev_attr_board_mode,
6473 &dev_attr_max_vpi,
6474 &dev_attr_used_vpi,
6475 &dev_attr_max_rpi,
6476 &dev_attr_used_rpi,
6477 &dev_attr_max_xri,
6478 &dev_attr_used_xri,
6479 &dev_attr_npiv_info,
6480 &dev_attr_issue_reset,
6481 &dev_attr_lpfc_poll,
6482 &dev_attr_lpfc_poll_tmo,
6483 &dev_attr_lpfc_task_mgmt_tmo,
6484 &dev_attr_lpfc_use_msi,
6485 &dev_attr_lpfc_nvme_oas,
6486 &dev_attr_lpfc_nvme_embed_cmd,
6487 &dev_attr_lpfc_fcp_imax,
6488 &dev_attr_lpfc_force_rscn,
6489 &dev_attr_lpfc_cq_poll_threshold,
6490 &dev_attr_lpfc_cq_max_proc_limit,
6491 &dev_attr_lpfc_fcp_cpu_map,
6492 &dev_attr_lpfc_fcp_mq_threshold,
6493 &dev_attr_lpfc_hdw_queue,
6494 &dev_attr_lpfc_irq_chann,
6495 &dev_attr_lpfc_suppress_rsp,
6496 &dev_attr_lpfc_nvmet_mrq,
6497 &dev_attr_lpfc_nvmet_mrq_post,
6498 &dev_attr_lpfc_nvme_enable_fb,
6499 &dev_attr_lpfc_nvmet_fb_size,
6500 &dev_attr_lpfc_enable_bg,
6501 &dev_attr_lpfc_soft_wwnn,
6502 &dev_attr_lpfc_soft_wwpn,
6503 &dev_attr_lpfc_soft_wwn_enable,
6504 &dev_attr_lpfc_enable_hba_reset,
6505 &dev_attr_lpfc_enable_hba_heartbeat,
6506 &dev_attr_lpfc_EnableXLane,
6507 &dev_attr_lpfc_XLanePriority,
6508 &dev_attr_lpfc_xlane_lun,
6509 &dev_attr_lpfc_xlane_tgt,
6510 &dev_attr_lpfc_xlane_vpt,
6511 &dev_attr_lpfc_xlane_lun_state,
6512 &dev_attr_lpfc_xlane_lun_status,
6513 &dev_attr_lpfc_xlane_priority,
6514 &dev_attr_lpfc_sg_seg_cnt,
6515 &dev_attr_lpfc_max_scsicmpl_time,
6516 &dev_attr_lpfc_stat_data_ctrl,
6517 &dev_attr_lpfc_aer_support,
6518 &dev_attr_lpfc_aer_state_cleanup,
6519 &dev_attr_lpfc_sriov_nr_virtfn,
6520 &dev_attr_lpfc_req_fw_upgrade,
6521 &dev_attr_lpfc_suppress_link_up,
6522 &dev_attr_iocb_hw,
6523 &dev_attr_pls,
6524 &dev_attr_pt,
6525 &dev_attr_txq_hw,
6526 &dev_attr_txcmplq_hw,
6527 &dev_attr_lpfc_sriov_hw_max_virtfn,
6528 &dev_attr_protocol,
6529 &dev_attr_lpfc_xlane_supported,
6530 &dev_attr_lpfc_enable_mds_diags,
6531 &dev_attr_lpfc_ras_fwlog_buffsize,
6532 &dev_attr_lpfc_ras_fwlog_level,
6533 &dev_attr_lpfc_ras_fwlog_func,
6534 &dev_attr_lpfc_enable_bbcr,
6535 &dev_attr_lpfc_enable_dpp,
6536 &dev_attr_lpfc_enable_mi,
6537 &dev_attr_cmf_info,
6538 &dev_attr_lpfc_max_vmid,
6539 &dev_attr_lpfc_vmid_inactivity_timeout,
6540 &dev_attr_lpfc_vmid_app_header,
6541 &dev_attr_lpfc_vmid_priority_tagging,
6542 NULL,
6543 };
6544
6545 struct device_attribute *lpfc_vport_attrs[] = {
6546 &dev_attr_info,
6547 &dev_attr_link_state,
6548 &dev_attr_num_discovered_ports,
6549 &dev_attr_lpfc_drvr_version,
6550 &dev_attr_lpfc_log_verbose,
6551 &dev_attr_lpfc_lun_queue_depth,
6552 &dev_attr_lpfc_tgt_queue_depth,
6553 &dev_attr_lpfc_nodev_tmo,
6554 &dev_attr_lpfc_devloss_tmo,
6555 &dev_attr_lpfc_hba_queue_depth,
6556 &dev_attr_lpfc_peer_port_login,
6557 &dev_attr_lpfc_restrict_login,
6558 &dev_attr_lpfc_fcp_class,
6559 &dev_attr_lpfc_use_adisc,
6560 &dev_attr_lpfc_first_burst_size,
6561 &dev_attr_lpfc_max_luns,
6562 &dev_attr_nport_evt_cnt,
6563 &dev_attr_npiv_info,
6564 &dev_attr_lpfc_enable_da_id,
6565 &dev_attr_lpfc_max_scsicmpl_time,
6566 &dev_attr_lpfc_stat_data_ctrl,
6567 &dev_attr_lpfc_static_vport,
6568 &dev_attr_cmf_info,
6569 NULL,
6570 };
6571
6572 /**
6573 * sysfs_ctlreg_write - Write method for writing to ctlreg
6574 * @filp: open sysfs file
6575 * @kobj: kernel kobject that contains the kernel class device.
6576 * @bin_attr: kernel attributes passed to us.
6577 * @buf: contains the data to be written to the adapter IOREG space.
6578 * @off: offset into buffer to beginning of data.
6579 * @count: bytes to transfer.
6580 *
6581 * Description:
6582 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
6583 * Uses the adapter io control registers to send buf contents to the adapter.
6584 *
6585 * Returns:
6586 * -ERANGE off and count combo out of range
6587 * -EINVAL off, count or buff address invalid
6588 * -EPERM adapter is offline
6589 * value of count, buf contents written
6590 **/
6591 static ssize_t
sysfs_ctlreg_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6592 sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
6593 struct bin_attribute *bin_attr,
6594 char *buf, loff_t off, size_t count)
6595 {
6596 size_t buf_off;
6597 struct device *dev = container_of(kobj, struct device, kobj);
6598 struct Scsi_Host *shost = class_to_shost(dev);
6599 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6600 struct lpfc_hba *phba = vport->phba;
6601
6602 if (phba->sli_rev >= LPFC_SLI_REV4)
6603 return -EPERM;
6604
6605 if ((off + count) > FF_REG_AREA_SIZE)
6606 return -ERANGE;
6607
6608 if (count <= LPFC_REG_WRITE_KEY_SIZE)
6609 return 0;
6610
6611 if (off % 4 || count % 4 || (unsigned long)buf % 4)
6612 return -EINVAL;
6613
6614 /* This is to protect HBA registers from accidental writes. */
6615 if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
6616 return -EINVAL;
6617
6618 if (!(vport->fc_flag & FC_OFFLINE_MODE))
6619 return -EPERM;
6620
6621 spin_lock_irq(&phba->hbalock);
6622 for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
6623 buf_off += sizeof(uint32_t))
6624 writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
6625 phba->ctrl_regs_memmap_p + off + buf_off);
6626
6627 spin_unlock_irq(&phba->hbalock);
6628
6629 return count;
6630 }
6631
6632 /**
6633 * sysfs_ctlreg_read - Read method for reading from ctlreg
6634 * @filp: open sysfs file
6635 * @kobj: kernel kobject that contains the kernel class device.
6636 * @bin_attr: kernel attributes passed to us.
6637 * @buf: if successful contains the data from the adapter IOREG space.
6638 * @off: offset into buffer to beginning of data.
6639 * @count: bytes to transfer.
6640 *
6641 * Description:
6642 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
6643 * Uses the adapter io control registers to read data into buf.
6644 *
6645 * Returns:
6646 * -ERANGE off and count combo out of range
6647 * -EINVAL off, count or buff address invalid
6648 * value of count, buf contents read
6649 **/
6650 static ssize_t
sysfs_ctlreg_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6651 sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
6652 struct bin_attribute *bin_attr,
6653 char *buf, loff_t off, size_t count)
6654 {
6655 size_t buf_off;
6656 uint32_t * tmp_ptr;
6657 struct device *dev = container_of(kobj, struct device, kobj);
6658 struct Scsi_Host *shost = class_to_shost(dev);
6659 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6660 struct lpfc_hba *phba = vport->phba;
6661
6662 if (phba->sli_rev >= LPFC_SLI_REV4)
6663 return -EPERM;
6664
6665 if (off > FF_REG_AREA_SIZE)
6666 return -ERANGE;
6667
6668 if ((off + count) > FF_REG_AREA_SIZE)
6669 count = FF_REG_AREA_SIZE - off;
6670
6671 if (count == 0) return 0;
6672
6673 if (off % 4 || count % 4 || (unsigned long)buf % 4)
6674 return -EINVAL;
6675
6676 spin_lock_irq(&phba->hbalock);
6677
6678 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
6679 tmp_ptr = (uint32_t *)(buf + buf_off);
6680 *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
6681 }
6682
6683 spin_unlock_irq(&phba->hbalock);
6684
6685 return count;
6686 }
6687
6688 static struct bin_attribute sysfs_ctlreg_attr = {
6689 .attr = {
6690 .name = "ctlreg",
6691 .mode = S_IRUSR | S_IWUSR,
6692 },
6693 .size = 256,
6694 .read = sysfs_ctlreg_read,
6695 .write = sysfs_ctlreg_write,
6696 };
6697
6698 /**
6699 * sysfs_mbox_write - Write method for writing information via mbox
6700 * @filp: open sysfs file
6701 * @kobj: kernel kobject that contains the kernel class device.
6702 * @bin_attr: kernel attributes passed to us.
6703 * @buf: contains the data to be written to sysfs mbox.
6704 * @off: offset into buffer to beginning of data.
6705 * @count: bytes to transfer.
6706 *
6707 * Description:
6708 * Deprecated function. All mailbox access from user space is performed via the
6709 * bsg interface.
6710 *
6711 * Returns:
6712 * -EPERM operation not permitted
6713 **/
6714 static ssize_t
sysfs_mbox_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6715 sysfs_mbox_write(struct file *filp, struct kobject *kobj,
6716 struct bin_attribute *bin_attr,
6717 char *buf, loff_t off, size_t count)
6718 {
6719 return -EPERM;
6720 }
6721
6722 /**
6723 * sysfs_mbox_read - Read method for reading information via mbox
6724 * @filp: open sysfs file
6725 * @kobj: kernel kobject that contains the kernel class device.
6726 * @bin_attr: kernel attributes passed to us.
6727 * @buf: contains the data to be read from sysfs mbox.
6728 * @off: offset into buffer to beginning of data.
6729 * @count: bytes to transfer.
6730 *
6731 * Description:
6732 * Deprecated function. All mailbox access from user space is performed via the
6733 * bsg interface.
6734 *
6735 * Returns:
6736 * -EPERM operation not permitted
6737 **/
6738 static ssize_t
sysfs_mbox_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6739 sysfs_mbox_read(struct file *filp, struct kobject *kobj,
6740 struct bin_attribute *bin_attr,
6741 char *buf, loff_t off, size_t count)
6742 {
6743 return -EPERM;
6744 }
6745
6746 static struct bin_attribute sysfs_mbox_attr = {
6747 .attr = {
6748 .name = "mbox",
6749 .mode = S_IRUSR | S_IWUSR,
6750 },
6751 .size = MAILBOX_SYSFS_MAX,
6752 .read = sysfs_mbox_read,
6753 .write = sysfs_mbox_write,
6754 };
6755
6756 /**
6757 * lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries
6758 * @vport: address of lpfc vport structure.
6759 *
6760 * Return codes:
6761 * zero on success
6762 * error return code from sysfs_create_bin_file()
6763 **/
6764 int
lpfc_alloc_sysfs_attr(struct lpfc_vport * vport)6765 lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
6766 {
6767 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6768 int error;
6769
6770 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6771 &sysfs_drvr_stat_data_attr);
6772
6773 /* Virtual ports do not need ctrl_reg and mbox */
6774 if (error || vport->port_type == LPFC_NPIV_PORT)
6775 goto out;
6776
6777 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6778 &sysfs_ctlreg_attr);
6779 if (error)
6780 goto out_remove_stat_attr;
6781
6782 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6783 &sysfs_mbox_attr);
6784 if (error)
6785 goto out_remove_ctlreg_attr;
6786
6787 return 0;
6788 out_remove_ctlreg_attr:
6789 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6790 out_remove_stat_attr:
6791 sysfs_remove_bin_file(&shost->shost_dev.kobj,
6792 &sysfs_drvr_stat_data_attr);
6793 out:
6794 return error;
6795 }
6796
6797 /**
6798 * lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries
6799 * @vport: address of lpfc vport structure.
6800 **/
6801 void
lpfc_free_sysfs_attr(struct lpfc_vport * vport)6802 lpfc_free_sysfs_attr(struct lpfc_vport *vport)
6803 {
6804 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6805 sysfs_remove_bin_file(&shost->shost_dev.kobj,
6806 &sysfs_drvr_stat_data_attr);
6807 /* Virtual ports do not need ctrl_reg and mbox */
6808 if (vport->port_type == LPFC_NPIV_PORT)
6809 return;
6810 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
6811 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6812 }
6813
6814 /*
6815 * Dynamic FC Host Attributes Support
6816 */
6817
6818 /**
6819 * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host
6820 * @shost: kernel scsi host pointer.
6821 **/
6822 static void
lpfc_get_host_symbolic_name(struct Scsi_Host * shost)6823 lpfc_get_host_symbolic_name(struct Scsi_Host *shost)
6824 {
6825 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6826
6827 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
6828 sizeof fc_host_symbolic_name(shost));
6829 }
6830
6831 /**
6832 * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id
6833 * @shost: kernel scsi host pointer.
6834 **/
6835 static void
lpfc_get_host_port_id(struct Scsi_Host * shost)6836 lpfc_get_host_port_id(struct Scsi_Host *shost)
6837 {
6838 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6839
6840 /* note: fc_myDID already in cpu endianness */
6841 fc_host_port_id(shost) = vport->fc_myDID;
6842 }
6843
6844 /**
6845 * lpfc_get_host_port_type - Set the value of the scsi host port type
6846 * @shost: kernel scsi host pointer.
6847 **/
6848 static void
lpfc_get_host_port_type(struct Scsi_Host * shost)6849 lpfc_get_host_port_type(struct Scsi_Host *shost)
6850 {
6851 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6852 struct lpfc_hba *phba = vport->phba;
6853
6854 spin_lock_irq(shost->host_lock);
6855
6856 if (vport->port_type == LPFC_NPIV_PORT) {
6857 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
6858 } else if (lpfc_is_link_up(phba)) {
6859 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6860 if (vport->fc_flag & FC_PUBLIC_LOOP)
6861 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
6862 else
6863 fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
6864 } else {
6865 if (vport->fc_flag & FC_FABRIC)
6866 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
6867 else
6868 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
6869 }
6870 } else
6871 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
6872
6873 spin_unlock_irq(shost->host_lock);
6874 }
6875
6876 /**
6877 * lpfc_get_host_port_state - Set the value of the scsi host port state
6878 * @shost: kernel scsi host pointer.
6879 **/
6880 static void
lpfc_get_host_port_state(struct Scsi_Host * shost)6881 lpfc_get_host_port_state(struct Scsi_Host *shost)
6882 {
6883 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6884 struct lpfc_hba *phba = vport->phba;
6885
6886 spin_lock_irq(shost->host_lock);
6887
6888 if (vport->fc_flag & FC_OFFLINE_MODE)
6889 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
6890 else {
6891 switch (phba->link_state) {
6892 case LPFC_LINK_UNKNOWN:
6893 case LPFC_LINK_DOWN:
6894 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
6895 break;
6896 case LPFC_LINK_UP:
6897 case LPFC_CLEAR_LA:
6898 case LPFC_HBA_READY:
6899 /* Links up, reports port state accordingly */
6900 if (vport->port_state < LPFC_VPORT_READY)
6901 fc_host_port_state(shost) =
6902 FC_PORTSTATE_BYPASSED;
6903 else
6904 fc_host_port_state(shost) =
6905 FC_PORTSTATE_ONLINE;
6906 break;
6907 case LPFC_HBA_ERROR:
6908 fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
6909 break;
6910 default:
6911 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
6912 break;
6913 }
6914 }
6915
6916 spin_unlock_irq(shost->host_lock);
6917 }
6918
6919 /**
6920 * lpfc_get_host_speed - Set the value of the scsi host speed
6921 * @shost: kernel scsi host pointer.
6922 **/
6923 static void
lpfc_get_host_speed(struct Scsi_Host * shost)6924 lpfc_get_host_speed(struct Scsi_Host *shost)
6925 {
6926 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6927 struct lpfc_hba *phba = vport->phba;
6928
6929 spin_lock_irq(shost->host_lock);
6930
6931 if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) {
6932 switch(phba->fc_linkspeed) {
6933 case LPFC_LINK_SPEED_1GHZ:
6934 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6935 break;
6936 case LPFC_LINK_SPEED_2GHZ:
6937 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
6938 break;
6939 case LPFC_LINK_SPEED_4GHZ:
6940 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
6941 break;
6942 case LPFC_LINK_SPEED_8GHZ:
6943 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
6944 break;
6945 case LPFC_LINK_SPEED_10GHZ:
6946 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6947 break;
6948 case LPFC_LINK_SPEED_16GHZ:
6949 fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
6950 break;
6951 case LPFC_LINK_SPEED_32GHZ:
6952 fc_host_speed(shost) = FC_PORTSPEED_32GBIT;
6953 break;
6954 case LPFC_LINK_SPEED_64GHZ:
6955 fc_host_speed(shost) = FC_PORTSPEED_64GBIT;
6956 break;
6957 case LPFC_LINK_SPEED_128GHZ:
6958 fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
6959 break;
6960 case LPFC_LINK_SPEED_256GHZ:
6961 fc_host_speed(shost) = FC_PORTSPEED_256GBIT;
6962 break;
6963 default:
6964 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6965 break;
6966 }
6967 } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
6968 switch (phba->fc_linkspeed) {
6969 case LPFC_ASYNC_LINK_SPEED_1GBPS:
6970 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6971 break;
6972 case LPFC_ASYNC_LINK_SPEED_10GBPS:
6973 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6974 break;
6975 case LPFC_ASYNC_LINK_SPEED_20GBPS:
6976 fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
6977 break;
6978 case LPFC_ASYNC_LINK_SPEED_25GBPS:
6979 fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
6980 break;
6981 case LPFC_ASYNC_LINK_SPEED_40GBPS:
6982 fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
6983 break;
6984 case LPFC_ASYNC_LINK_SPEED_100GBPS:
6985 fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
6986 break;
6987 default:
6988 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6989 break;
6990 }
6991 } else
6992 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6993
6994 spin_unlock_irq(shost->host_lock);
6995 }
6996
6997 /**
6998 * lpfc_get_host_fabric_name - Set the value of the scsi host fabric name
6999 * @shost: kernel scsi host pointer.
7000 **/
7001 static void
lpfc_get_host_fabric_name(struct Scsi_Host * shost)7002 lpfc_get_host_fabric_name (struct Scsi_Host *shost)
7003 {
7004 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7005 struct lpfc_hba *phba = vport->phba;
7006 u64 node_name;
7007
7008 spin_lock_irq(shost->host_lock);
7009
7010 if ((vport->port_state > LPFC_FLOGI) &&
7011 ((vport->fc_flag & FC_FABRIC) ||
7012 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
7013 (vport->fc_flag & FC_PUBLIC_LOOP))))
7014 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
7015 else
7016 /* fabric is local port if there is no F/FL_Port */
7017 node_name = 0;
7018
7019 spin_unlock_irq(shost->host_lock);
7020
7021 fc_host_fabric_name(shost) = node_name;
7022 }
7023
7024 /**
7025 * lpfc_get_stats - Return statistical information about the adapter
7026 * @shost: kernel scsi host pointer.
7027 *
7028 * Notes:
7029 * NULL on error for link down, no mbox pool, sli2 active,
7030 * management not allowed, memory allocation error, or mbox error.
7031 *
7032 * Returns:
7033 * NULL for error
7034 * address of the adapter host statistics
7035 **/
7036 static struct fc_host_statistics *
lpfc_get_stats(struct Scsi_Host * shost)7037 lpfc_get_stats(struct Scsi_Host *shost)
7038 {
7039 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7040 struct lpfc_hba *phba = vport->phba;
7041 struct lpfc_sli *psli = &phba->sli;
7042 struct fc_host_statistics *hs = &phba->link_stats;
7043 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
7044 LPFC_MBOXQ_t *pmboxq;
7045 MAILBOX_t *pmb;
7046 int rc = 0;
7047
7048 /*
7049 * prevent udev from issuing mailbox commands until the port is
7050 * configured.
7051 */
7052 if (phba->link_state < LPFC_LINK_DOWN ||
7053 !phba->mbox_mem_pool ||
7054 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
7055 return NULL;
7056
7057 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
7058 return NULL;
7059
7060 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7061 if (!pmboxq)
7062 return NULL;
7063 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
7064
7065 pmb = &pmboxq->u.mb;
7066 pmb->mbxCommand = MBX_READ_STATUS;
7067 pmb->mbxOwner = OWN_HOST;
7068 pmboxq->ctx_buf = NULL;
7069 pmboxq->vport = vport;
7070
7071 if (vport->fc_flag & FC_OFFLINE_MODE) {
7072 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
7073 if (rc != MBX_SUCCESS) {
7074 mempool_free(pmboxq, phba->mbox_mem_pool);
7075 return NULL;
7076 }
7077 } else {
7078 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
7079 if (rc != MBX_SUCCESS) {
7080 if (rc != MBX_TIMEOUT)
7081 mempool_free(pmboxq, phba->mbox_mem_pool);
7082 return NULL;
7083 }
7084 }
7085
7086 memset(hs, 0, sizeof (struct fc_host_statistics));
7087
7088 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
7089 /*
7090 * The MBX_READ_STATUS returns tx_k_bytes which has to
7091 * converted to words
7092 */
7093 hs->tx_words = (uint64_t)
7094 ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
7095 * (uint64_t)256);
7096 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
7097 hs->rx_words = (uint64_t)
7098 ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
7099 * (uint64_t)256);
7100
7101 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
7102 pmb->mbxCommand = MBX_READ_LNK_STAT;
7103 pmb->mbxOwner = OWN_HOST;
7104 pmboxq->ctx_buf = NULL;
7105 pmboxq->vport = vport;
7106
7107 if (vport->fc_flag & FC_OFFLINE_MODE) {
7108 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
7109 if (rc != MBX_SUCCESS) {
7110 mempool_free(pmboxq, phba->mbox_mem_pool);
7111 return NULL;
7112 }
7113 } else {
7114 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
7115 if (rc != MBX_SUCCESS) {
7116 if (rc != MBX_TIMEOUT)
7117 mempool_free(pmboxq, phba->mbox_mem_pool);
7118 return NULL;
7119 }
7120 }
7121
7122 hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
7123 hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
7124 hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
7125 hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
7126 hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
7127 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
7128 hs->error_frames = pmb->un.varRdLnk.crcCnt;
7129
7130 hs->cn_sig_warn = atomic64_read(&phba->cgn_acqe_stat.warn);
7131 hs->cn_sig_alarm = atomic64_read(&phba->cgn_acqe_stat.alarm);
7132
7133 hs->link_failure_count -= lso->link_failure_count;
7134 hs->loss_of_sync_count -= lso->loss_of_sync_count;
7135 hs->loss_of_signal_count -= lso->loss_of_signal_count;
7136 hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
7137 hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
7138 hs->invalid_crc_count -= lso->invalid_crc_count;
7139 hs->error_frames -= lso->error_frames;
7140
7141 if (phba->hba_flag & HBA_FCOE_MODE) {
7142 hs->lip_count = -1;
7143 hs->nos_count = (phba->link_events >> 1);
7144 hs->nos_count -= lso->link_events;
7145 } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
7146 hs->lip_count = (phba->fc_eventTag >> 1);
7147 hs->lip_count -= lso->link_events;
7148 hs->nos_count = -1;
7149 } else {
7150 hs->lip_count = -1;
7151 hs->nos_count = (phba->fc_eventTag >> 1);
7152 hs->nos_count -= lso->link_events;
7153 }
7154
7155 hs->dumped_frames = -1;
7156
7157 hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start;
7158
7159 mempool_free(pmboxq, phba->mbox_mem_pool);
7160
7161 return hs;
7162 }
7163
7164 /**
7165 * lpfc_reset_stats - Copy the adapter link stats information
7166 * @shost: kernel scsi host pointer.
7167 **/
7168 static void
lpfc_reset_stats(struct Scsi_Host * shost)7169 lpfc_reset_stats(struct Scsi_Host *shost)
7170 {
7171 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7172 struct lpfc_hba *phba = vport->phba;
7173 struct lpfc_sli *psli = &phba->sli;
7174 struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets;
7175 LPFC_MBOXQ_t *pmboxq;
7176 MAILBOX_t *pmb;
7177 int rc = 0;
7178
7179 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
7180 return;
7181
7182 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7183 if (!pmboxq)
7184 return;
7185 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
7186
7187 pmb = &pmboxq->u.mb;
7188 pmb->mbxCommand = MBX_READ_STATUS;
7189 pmb->mbxOwner = OWN_HOST;
7190 pmb->un.varWords[0] = 0x1; /* reset request */
7191 pmboxq->ctx_buf = NULL;
7192 pmboxq->vport = vport;
7193
7194 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
7195 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
7196 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
7197 if (rc != MBX_SUCCESS) {
7198 mempool_free(pmboxq, phba->mbox_mem_pool);
7199 return;
7200 }
7201 } else {
7202 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
7203 if (rc != MBX_SUCCESS) {
7204 if (rc != MBX_TIMEOUT)
7205 mempool_free(pmboxq, phba->mbox_mem_pool);
7206 return;
7207 }
7208 }
7209
7210 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
7211 pmb->mbxCommand = MBX_READ_LNK_STAT;
7212 pmb->mbxOwner = OWN_HOST;
7213 pmboxq->ctx_buf = NULL;
7214 pmboxq->vport = vport;
7215
7216 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
7217 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
7218 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
7219 if (rc != MBX_SUCCESS) {
7220 mempool_free(pmboxq, phba->mbox_mem_pool);
7221 return;
7222 }
7223 } else {
7224 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
7225 if (rc != MBX_SUCCESS) {
7226 if (rc != MBX_TIMEOUT)
7227 mempool_free(pmboxq, phba->mbox_mem_pool);
7228 return;
7229 }
7230 }
7231
7232 lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
7233 lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
7234 lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
7235 lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
7236 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
7237 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
7238 lso->error_frames = pmb->un.varRdLnk.crcCnt;
7239 if (phba->hba_flag & HBA_FCOE_MODE)
7240 lso->link_events = (phba->link_events >> 1);
7241 else
7242 lso->link_events = (phba->fc_eventTag >> 1);
7243
7244 atomic64_set(&phba->cgn_acqe_stat.warn, 0);
7245 atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
7246
7247 memset(&shost_to_fc_host(shost)->fpin_stats, 0,
7248 sizeof(shost_to_fc_host(shost)->fpin_stats));
7249
7250 psli->stats_start = ktime_get_seconds();
7251
7252 mempool_free(pmboxq, phba->mbox_mem_pool);
7253
7254 return;
7255 }
7256
7257 /*
7258 * The LPFC driver treats linkdown handling as target loss events so there
7259 * are no sysfs handlers for link_down_tmo.
7260 */
7261
7262 /**
7263 * lpfc_get_node_by_target - Return the nodelist for a target
7264 * @starget: kernel scsi target pointer.
7265 *
7266 * Returns:
7267 * address of the node list if found
7268 * NULL target not found
7269 **/
7270 static struct lpfc_nodelist *
lpfc_get_node_by_target(struct scsi_target * starget)7271 lpfc_get_node_by_target(struct scsi_target *starget)
7272 {
7273 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
7274 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7275 struct lpfc_nodelist *ndlp;
7276
7277 spin_lock_irq(shost->host_lock);
7278 /* Search for this, mapped, target ID */
7279 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
7280 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
7281 starget->id == ndlp->nlp_sid) {
7282 spin_unlock_irq(shost->host_lock);
7283 return ndlp;
7284 }
7285 }
7286 spin_unlock_irq(shost->host_lock);
7287 return NULL;
7288 }
7289
7290 /**
7291 * lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1
7292 * @starget: kernel scsi target pointer.
7293 **/
7294 static void
lpfc_get_starget_port_id(struct scsi_target * starget)7295 lpfc_get_starget_port_id(struct scsi_target *starget)
7296 {
7297 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7298
7299 fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
7300 }
7301
7302 /**
7303 * lpfc_get_starget_node_name - Set the target node name
7304 * @starget: kernel scsi target pointer.
7305 *
7306 * Description: Set the target node name to the ndlp node name wwn or zero.
7307 **/
7308 static void
lpfc_get_starget_node_name(struct scsi_target * starget)7309 lpfc_get_starget_node_name(struct scsi_target *starget)
7310 {
7311 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7312
7313 fc_starget_node_name(starget) =
7314 ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
7315 }
7316
7317 /**
7318 * lpfc_get_starget_port_name - Set the target port name
7319 * @starget: kernel scsi target pointer.
7320 *
7321 * Description: set the target port name to the ndlp port name wwn or zero.
7322 **/
7323 static void
lpfc_get_starget_port_name(struct scsi_target * starget)7324 lpfc_get_starget_port_name(struct scsi_target *starget)
7325 {
7326 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7327
7328 fc_starget_port_name(starget) =
7329 ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
7330 }
7331
7332 /**
7333 * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo
7334 * @rport: fc rport address.
7335 * @timeout: new value for dev loss tmo.
7336 *
7337 * Description:
7338 * If timeout is non zero set the dev_loss_tmo to timeout, else set
7339 * dev_loss_tmo to one.
7340 **/
7341 static void
lpfc_set_rport_loss_tmo(struct fc_rport * rport,uint32_t timeout)7342 lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
7343 {
7344 struct lpfc_rport_data *rdata = rport->dd_data;
7345 struct lpfc_nodelist *ndlp = rdata->pnode;
7346 #if (IS_ENABLED(CONFIG_NVME_FC))
7347 struct lpfc_nvme_rport *nrport = NULL;
7348 #endif
7349
7350 if (timeout)
7351 rport->dev_loss_tmo = timeout;
7352 else
7353 rport->dev_loss_tmo = 1;
7354
7355 if (!ndlp) {
7356 dev_info(&rport->dev, "Cannot find remote node to "
7357 "set rport dev loss tmo, port_id x%x\n",
7358 rport->port_id);
7359 return;
7360 }
7361
7362 #if (IS_ENABLED(CONFIG_NVME_FC))
7363 nrport = lpfc_ndlp_get_nrport(ndlp);
7364
7365 if (nrport && nrport->remoteport)
7366 nvme_fc_set_remoteport_devloss(nrport->remoteport,
7367 rport->dev_loss_tmo);
7368 #endif
7369 }
7370
7371 /*
7372 * lpfc_rport_show_function - Return rport target information
7373 *
7374 * Description:
7375 * Macro that uses field to generate a function with the name lpfc_show_rport_
7376 *
7377 * lpfc_show_rport_##field: returns the bytes formatted in buf
7378 * @cdev: class converted to an fc_rport.
7379 * @buf: on return contains the target_field or zero.
7380 *
7381 * Returns: size of formatted string.
7382 **/
7383 #define lpfc_rport_show_function(field, format_string, sz, cast) \
7384 static ssize_t \
7385 lpfc_show_rport_##field (struct device *dev, \
7386 struct device_attribute *attr, \
7387 char *buf) \
7388 { \
7389 struct fc_rport *rport = transport_class_to_rport(dev); \
7390 struct lpfc_rport_data *rdata = rport->hostdata; \
7391 return scnprintf(buf, sz, format_string, \
7392 (rdata->target) ? cast rdata->target->field : 0); \
7393 }
7394
7395 #define lpfc_rport_rd_attr(field, format_string, sz) \
7396 lpfc_rport_show_function(field, format_string, sz, ) \
7397 static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
7398
7399 /**
7400 * lpfc_set_vport_symbolic_name - Set the vport's symbolic name
7401 * @fc_vport: The fc_vport who's symbolic name has been changed.
7402 *
7403 * Description:
7404 * This function is called by the transport after the @fc_vport's symbolic name
7405 * has been changed. This function re-registers the symbolic name with the
7406 * switch to propagate the change into the fabric if the vport is active.
7407 **/
7408 static void
lpfc_set_vport_symbolic_name(struct fc_vport * fc_vport)7409 lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
7410 {
7411 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
7412
7413 if (vport->port_state == LPFC_VPORT_READY)
7414 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
7415 }
7416
7417 /**
7418 * lpfc_hba_log_verbose_init - Set hba's log verbose level
7419 * @phba: Pointer to lpfc_hba struct.
7420 * @verbose: Verbose level to set.
7421 *
7422 * This function is called by the lpfc_get_cfgparam() routine to set the
7423 * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
7424 * log message according to the module's lpfc_log_verbose parameter setting
7425 * before hba port or vport created.
7426 **/
7427 static void
lpfc_hba_log_verbose_init(struct lpfc_hba * phba,uint32_t verbose)7428 lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
7429 {
7430 phba->cfg_log_verbose = verbose;
7431 }
7432
7433 struct fc_function_template lpfc_transport_functions = {
7434 /* fixed attributes the driver supports */
7435 .show_host_node_name = 1,
7436 .show_host_port_name = 1,
7437 .show_host_supported_classes = 1,
7438 .show_host_supported_fc4s = 1,
7439 .show_host_supported_speeds = 1,
7440 .show_host_maxframe_size = 1,
7441
7442 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
7443 .show_host_symbolic_name = 1,
7444
7445 /* dynamic attributes the driver supports */
7446 .get_host_port_id = lpfc_get_host_port_id,
7447 .show_host_port_id = 1,
7448
7449 .get_host_port_type = lpfc_get_host_port_type,
7450 .show_host_port_type = 1,
7451
7452 .get_host_port_state = lpfc_get_host_port_state,
7453 .show_host_port_state = 1,
7454
7455 /* active_fc4s is shown but doesn't change (thus no get function) */
7456 .show_host_active_fc4s = 1,
7457
7458 .get_host_speed = lpfc_get_host_speed,
7459 .show_host_speed = 1,
7460
7461 .get_host_fabric_name = lpfc_get_host_fabric_name,
7462 .show_host_fabric_name = 1,
7463
7464 /*
7465 * The LPFC driver treats linkdown handling as target loss events
7466 * so there are no sysfs handlers for link_down_tmo.
7467 */
7468
7469 .get_fc_host_stats = lpfc_get_stats,
7470 .reset_fc_host_stats = lpfc_reset_stats,
7471
7472 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
7473 .show_rport_maxframe_size = 1,
7474 .show_rport_supported_classes = 1,
7475
7476 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
7477 .show_rport_dev_loss_tmo = 1,
7478
7479 .get_starget_port_id = lpfc_get_starget_port_id,
7480 .show_starget_port_id = 1,
7481
7482 .get_starget_node_name = lpfc_get_starget_node_name,
7483 .show_starget_node_name = 1,
7484
7485 .get_starget_port_name = lpfc_get_starget_port_name,
7486 .show_starget_port_name = 1,
7487
7488 .issue_fc_host_lip = lpfc_issue_lip,
7489 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
7490 .terminate_rport_io = lpfc_terminate_rport_io,
7491
7492 .dd_fcvport_size = sizeof(struct lpfc_vport *),
7493
7494 .vport_disable = lpfc_vport_disable,
7495
7496 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
7497
7498 .bsg_request = lpfc_bsg_request,
7499 .bsg_timeout = lpfc_bsg_timeout,
7500 };
7501
7502 struct fc_function_template lpfc_vport_transport_functions = {
7503 /* fixed attributes the driver supports */
7504 .show_host_node_name = 1,
7505 .show_host_port_name = 1,
7506 .show_host_supported_classes = 1,
7507 .show_host_supported_fc4s = 1,
7508 .show_host_supported_speeds = 1,
7509 .show_host_maxframe_size = 1,
7510
7511 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
7512 .show_host_symbolic_name = 1,
7513
7514 /* dynamic attributes the driver supports */
7515 .get_host_port_id = lpfc_get_host_port_id,
7516 .show_host_port_id = 1,
7517
7518 .get_host_port_type = lpfc_get_host_port_type,
7519 .show_host_port_type = 1,
7520
7521 .get_host_port_state = lpfc_get_host_port_state,
7522 .show_host_port_state = 1,
7523
7524 /* active_fc4s is shown but doesn't change (thus no get function) */
7525 .show_host_active_fc4s = 1,
7526
7527 .get_host_speed = lpfc_get_host_speed,
7528 .show_host_speed = 1,
7529
7530 .get_host_fabric_name = lpfc_get_host_fabric_name,
7531 .show_host_fabric_name = 1,
7532
7533 /*
7534 * The LPFC driver treats linkdown handling as target loss events
7535 * so there are no sysfs handlers for link_down_tmo.
7536 */
7537
7538 .get_fc_host_stats = lpfc_get_stats,
7539 .reset_fc_host_stats = lpfc_reset_stats,
7540
7541 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
7542 .show_rport_maxframe_size = 1,
7543 .show_rport_supported_classes = 1,
7544
7545 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
7546 .show_rport_dev_loss_tmo = 1,
7547
7548 .get_starget_port_id = lpfc_get_starget_port_id,
7549 .show_starget_port_id = 1,
7550
7551 .get_starget_node_name = lpfc_get_starget_node_name,
7552 .show_starget_node_name = 1,
7553
7554 .get_starget_port_name = lpfc_get_starget_port_name,
7555 .show_starget_port_name = 1,
7556
7557 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
7558 .terminate_rport_io = lpfc_terminate_rport_io,
7559
7560 .vport_disable = lpfc_vport_disable,
7561
7562 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
7563 };
7564
7565 /**
7566 * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE
7567 * Mode
7568 * @phba: lpfc_hba pointer.
7569 **/
7570 static void
lpfc_get_hba_function_mode(struct lpfc_hba * phba)7571 lpfc_get_hba_function_mode(struct lpfc_hba *phba)
7572 {
7573 /* If the adapter supports FCoE mode */
7574 switch (phba->pcidev->device) {
7575 case PCI_DEVICE_ID_SKYHAWK:
7576 case PCI_DEVICE_ID_SKYHAWK_VF:
7577 case PCI_DEVICE_ID_LANCER_FCOE:
7578 case PCI_DEVICE_ID_LANCER_FCOE_VF:
7579 case PCI_DEVICE_ID_ZEPHYR_DCSP:
7580 case PCI_DEVICE_ID_HORNET:
7581 case PCI_DEVICE_ID_TIGERSHARK:
7582 case PCI_DEVICE_ID_TOMCAT:
7583 phba->hba_flag |= HBA_FCOE_MODE;
7584 break;
7585 default:
7586 /* for others, clear the flag */
7587 phba->hba_flag &= ~HBA_FCOE_MODE;
7588 }
7589 }
7590
7591 /**
7592 * lpfc_get_cfgparam - Used during probe_one to init the adapter structure
7593 * @phba: lpfc_hba pointer.
7594 **/
7595 void
lpfc_get_cfgparam(struct lpfc_hba * phba)7596 lpfc_get_cfgparam(struct lpfc_hba *phba)
7597 {
7598 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
7599 lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
7600 lpfc_ns_query_init(phba, lpfc_ns_query);
7601 lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
7602 lpfc_cr_delay_init(phba, lpfc_cr_delay);
7603 lpfc_cr_count_init(phba, lpfc_cr_count);
7604 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
7605 lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
7606 lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
7607 lpfc_ack0_init(phba, lpfc_ack0);
7608 lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing);
7609 lpfc_topology_init(phba, lpfc_topology);
7610 lpfc_link_speed_init(phba, lpfc_link_speed);
7611 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
7612 lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
7613 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
7614 lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
7615 lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
7616 lpfc_fcp_wait_abts_rsp_init(phba, lpfc_fcp_wait_abts_rsp);
7617 lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
7618 lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
7619 lpfc_use_msi_init(phba, lpfc_use_msi);
7620 lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
7621 lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd);
7622 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
7623 lpfc_force_rscn_init(phba, lpfc_force_rscn);
7624 lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold);
7625 lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit);
7626 lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
7627 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
7628 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
7629
7630 lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
7631 /* VMID Inits */
7632 lpfc_max_vmid_init(phba, lpfc_max_vmid);
7633 lpfc_vmid_inactivity_timeout_init(phba, lpfc_vmid_inactivity_timeout);
7634 lpfc_vmid_app_header_init(phba, lpfc_vmid_app_header);
7635 lpfc_vmid_priority_tagging_init(phba, lpfc_vmid_priority_tagging);
7636 if (phba->sli_rev != LPFC_SLI_REV4)
7637 phba->cfg_EnableXLane = 0;
7638 lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
7639
7640 memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
7641 memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
7642 phba->cfg_oas_lun_state = 0;
7643 phba->cfg_oas_lun_status = 0;
7644 phba->cfg_oas_flags = 0;
7645 phba->cfg_oas_priority = 0;
7646 lpfc_enable_bg_init(phba, lpfc_enable_bg);
7647 lpfc_prot_mask_init(phba, lpfc_prot_mask);
7648 lpfc_prot_guard_init(phba, lpfc_prot_guard);
7649 if (phba->sli_rev == LPFC_SLI_REV4)
7650 phba->cfg_poll = 0;
7651 else
7652 phba->cfg_poll = lpfc_poll;
7653
7654 /* Get the function mode */
7655 lpfc_get_hba_function_mode(phba);
7656
7657 /* BlockGuard allowed for FC only. */
7658 if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) {
7659 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7660 "0581 BlockGuard feature not supported\n");
7661 /* If set, clear the BlockGuard support param */
7662 phba->cfg_enable_bg = 0;
7663 } else if (phba->cfg_enable_bg) {
7664 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
7665 }
7666
7667 lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
7668
7669 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
7670 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
7671 lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
7672
7673 /* Initialize first burst. Target vs Initiator are different. */
7674 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
7675 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
7676 lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
7677 lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
7678 lpfc_irq_chann_init(phba, lpfc_irq_chann);
7679 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
7680 lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
7681 lpfc_enable_mi_init(phba, lpfc_enable_mi);
7682
7683 phba->cgn_p.cgn_param_mode = LPFC_CFG_OFF;
7684 phba->cmf_active_mode = LPFC_CFG_OFF;
7685 if (lpfc_fabric_cgn_frequency > EDC_CG_SIGFREQ_CNT_MAX ||
7686 lpfc_fabric_cgn_frequency < EDC_CG_SIGFREQ_CNT_MIN)
7687 lpfc_fabric_cgn_frequency = 100; /* 100 ms default */
7688
7689 if (phba->sli_rev != LPFC_SLI_REV4) {
7690 /* NVME only supported on SLI4 */
7691 phba->nvmet_support = 0;
7692 phba->cfg_nvmet_mrq = 0;
7693 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
7694 phba->cfg_enable_bbcr = 0;
7695 phba->cfg_xri_rebalancing = 0;
7696 } else {
7697 /* We MUST have FCP support */
7698 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
7699 phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
7700 }
7701
7702 phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1;
7703
7704 phba->cfg_enable_pbde = 0;
7705
7706 /* A value of 0 means use the number of CPUs found in the system */
7707 if (phba->cfg_hdw_queue == 0)
7708 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7709 if (phba->cfg_irq_chann == 0)
7710 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7711 if (phba->cfg_irq_chann > phba->cfg_hdw_queue &&
7712 phba->sli_rev == LPFC_SLI_REV4)
7713 phba->cfg_irq_chann = phba->cfg_hdw_queue;
7714
7715 phba->cfg_soft_wwnn = 0L;
7716 phba->cfg_soft_wwpn = 0L;
7717 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
7718 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
7719 lpfc_aer_support_init(phba, lpfc_aer_support);
7720 lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
7721 lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
7722 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
7723 lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
7724 lpfc_sli_mode_init(phba, lpfc_sli_mode);
7725 lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
7726 lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
7727 lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
7728 lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
7729
7730 return;
7731 }
7732
7733 /**
7734 * lpfc_nvme_mod_param_dep - Adjust module parameter value based on
7735 * dependencies between protocols and roles.
7736 * @phba: lpfc_hba pointer.
7737 **/
7738 void
lpfc_nvme_mod_param_dep(struct lpfc_hba * phba)7739 lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
7740 {
7741 int logit = 0;
7742
7743 if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) {
7744 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7745 logit = 1;
7746 }
7747 if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) {
7748 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7749 logit = 1;
7750 }
7751 if (phba->cfg_irq_chann > phba->cfg_hdw_queue) {
7752 phba->cfg_irq_chann = phba->cfg_hdw_queue;
7753 logit = 1;
7754 }
7755 if (logit)
7756 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7757 "2006 Reducing Queues - CPU limitation: "
7758 "IRQ %d HDWQ %d\n",
7759 phba->cfg_irq_chann,
7760 phba->cfg_hdw_queue);
7761
7762 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
7763 phba->nvmet_support) {
7764 phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
7765
7766 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
7767 "6013 %s x%x fb_size x%x, fb_max x%x\n",
7768 "NVME Target PRLI ACC enable_fb ",
7769 phba->cfg_nvme_enable_fb,
7770 phba->cfg_nvmet_fb_size,
7771 LPFC_NVMET_FB_SZ_MAX);
7772
7773 if (phba->cfg_nvme_enable_fb == 0)
7774 phba->cfg_nvmet_fb_size = 0;
7775 else {
7776 if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX)
7777 phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
7778 }
7779
7780 if (!phba->cfg_nvmet_mrq)
7781 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
7782
7783 /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
7784 if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
7785 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
7786 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
7787 "6018 Adjust lpfc_nvmet_mrq to %d\n",
7788 phba->cfg_nvmet_mrq);
7789 }
7790 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
7791 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
7792
7793 } else {
7794 /* Not NVME Target mode. Turn off Target parameters. */
7795 phba->nvmet_support = 0;
7796 phba->cfg_nvmet_mrq = 0;
7797 phba->cfg_nvmet_fb_size = 0;
7798 }
7799 }
7800
7801 /**
7802 * lpfc_get_vport_cfgparam - Used during port create, init the vport structure
7803 * @vport: lpfc_vport pointer.
7804 **/
7805 void
lpfc_get_vport_cfgparam(struct lpfc_vport * vport)7806 lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
7807 {
7808 lpfc_log_verbose_init(vport, lpfc_log_verbose);
7809 lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth);
7810 lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth);
7811 lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo);
7812 lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo);
7813 lpfc_peer_port_login_init(vport, lpfc_peer_port_login);
7814 lpfc_restrict_login_init(vport, lpfc_restrict_login);
7815 lpfc_fcp_class_init(vport, lpfc_fcp_class);
7816 lpfc_use_adisc_init(vport, lpfc_use_adisc);
7817 lpfc_first_burst_size_init(vport, lpfc_first_burst_size);
7818 lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
7819 lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
7820 lpfc_max_luns_init(vport, lpfc_max_luns);
7821 lpfc_scan_down_init(vport, lpfc_scan_down);
7822 lpfc_enable_da_id_init(vport, lpfc_enable_da_id);
7823 return;
7824 }
7825