• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23 
24 #include <linux/ctype.h>
25 #include <linux/delay.h>
26 #include <linux/pci.h>
27 #include <linux/interrupt.h>
28 #include <linux/module.h>
29 #include <linux/aer.h>
30 #include <linux/gfp.h>
31 #include <linux/kernel.h>
32 
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_tcq.h>
37 #include <scsi/scsi_transport_fc.h>
38 #include <scsi/fc/fc_fs.h>
39 
40 #include "lpfc_hw4.h"
41 #include "lpfc_hw.h"
42 #include "lpfc_sli.h"
43 #include "lpfc_sli4.h"
44 #include "lpfc_nl.h"
45 #include "lpfc_disc.h"
46 #include "lpfc.h"
47 #include "lpfc_scsi.h"
48 #include "lpfc_nvme.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_version.h"
51 #include "lpfc_compat.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_vport.h"
54 #include "lpfc_attr.h"
55 
56 #define LPFC_DEF_DEVLOSS_TMO	30
57 #define LPFC_MIN_DEVLOSS_TMO	1
58 #define LPFC_MAX_DEVLOSS_TMO	255
59 
60 #define LPFC_DEF_MRQ_POST	512
61 #define LPFC_MIN_MRQ_POST	512
62 #define LPFC_MAX_MRQ_POST	2048
63 
64 /*
65  * Write key size should be multiple of 4. If write key is changed
66  * make sure that library write key is also changed.
67  */
68 #define LPFC_REG_WRITE_KEY_SIZE	4
69 #define LPFC_REG_WRITE_KEY	"EMLX"
70 
71 const char *const trunk_errmsg[] = {	/* map errcode */
72 	"",	/* There is no such error code at index 0*/
73 	"link negotiated speed does not match existing"
74 		" trunk - link was \"low\" speed",
75 	"link negotiated speed does not match"
76 		" existing trunk - link was \"middle\" speed",
77 	"link negotiated speed does not match existing"
78 		" trunk - link was \"high\" speed",
79 	"Attached to non-trunking port - F_Port",
80 	"Attached to non-trunking port - N_Port",
81 	"FLOGI response timeout",
82 	"non-FLOGI frame received",
83 	"Invalid FLOGI response",
84 	"Trunking initialization protocol",
85 	"Trunk peer device mismatch",
86 };
87 
88 /**
89  * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
90  * @incr: integer to convert.
91  * @hdw: ascii string holding converted integer plus a string terminator.
92  *
93  * Description:
94  * JEDEC Joint Electron Device Engineering Council.
95  * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii
96  * character string. The string is then terminated with a NULL in byte 9.
97  * Hex 0-9 becomes ascii '0' to '9'.
98  * Hex a-f becomes ascii '=' to 'B' capital B.
99  *
100  * Notes:
101  * Coded for 32 bit integers only.
102  **/
103 static void
lpfc_jedec_to_ascii(int incr,char hdw[])104 lpfc_jedec_to_ascii(int incr, char hdw[])
105 {
106 	int i, j;
107 	for (i = 0; i < 8; i++) {
108 		j = (incr & 0xf);
109 		if (j <= 9)
110 			hdw[7 - i] = 0x30 +  j;
111 		 else
112 			hdw[7 - i] = 0x61 + j - 10;
113 		incr = (incr >> 4);
114 	}
115 	hdw[8] = 0;
116 	return;
117 }
118 
119 /**
120  * lpfc_drvr_version_show - Return the Emulex driver string with version number
121  * @dev: class unused variable.
122  * @attr: device attribute, not used.
123  * @buf: on return contains the module description text.
124  *
125  * Returns: size of formatted string.
126  **/
127 static ssize_t
lpfc_drvr_version_show(struct device * dev,struct device_attribute * attr,char * buf)128 lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
129 		       char *buf)
130 {
131 	return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
132 }
133 
134 /**
135  * lpfc_enable_fip_show - Return the fip mode of the HBA
136  * @dev: class unused variable.
137  * @attr: device attribute, not used.
138  * @buf: on return contains the module description text.
139  *
140  * Returns: size of formatted string.
141  **/
142 static ssize_t
lpfc_enable_fip_show(struct device * dev,struct device_attribute * attr,char * buf)143 lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
144 		       char *buf)
145 {
146 	struct Scsi_Host *shost = class_to_shost(dev);
147 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
148 	struct lpfc_hba   *phba = vport->phba;
149 
150 	if (phba->hba_flag & HBA_FIP_SUPPORT)
151 		return scnprintf(buf, PAGE_SIZE, "1\n");
152 	else
153 		return scnprintf(buf, PAGE_SIZE, "0\n");
154 }
155 
156 static ssize_t
lpfc_nvme_info_show(struct device * dev,struct device_attribute * attr,char * buf)157 lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
158 		    char *buf)
159 {
160 	struct Scsi_Host *shost = class_to_shost(dev);
161 	struct lpfc_vport *vport = shost_priv(shost);
162 	struct lpfc_hba   *phba = vport->phba;
163 	struct lpfc_nvmet_tgtport *tgtp;
164 	struct nvme_fc_local_port *localport;
165 	struct lpfc_nvme_lport *lport;
166 	struct lpfc_nvme_rport *rport;
167 	struct lpfc_nodelist *ndlp;
168 	struct nvme_fc_remote_port *nrport;
169 	struct lpfc_fc4_ctrl_stat *cstat;
170 	uint64_t data1, data2, data3;
171 	uint64_t totin, totout, tot;
172 	char *statep;
173 	int i;
174 	int len = 0;
175 	char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
176 
177 	if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
178 		len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
179 		return len;
180 	}
181 	if (phba->nvmet_support) {
182 		if (!phba->targetport) {
183 			len = scnprintf(buf, PAGE_SIZE,
184 					"NVME Target: x%llx is not allocated\n",
185 					wwn_to_u64(vport->fc_portname.u.wwn));
186 			return len;
187 		}
188 		/* Port state is only one of two values for now. */
189 		if (phba->targetport->port_id)
190 			statep = "REGISTERED";
191 		else
192 			statep = "INIT";
193 		scnprintf(tmp, sizeof(tmp),
194 			  "NVME Target Enabled  State %s\n",
195 			  statep);
196 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
197 			goto buffer_done;
198 
199 		scnprintf(tmp, sizeof(tmp),
200 			  "%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
201 			  "NVME Target: lpfc",
202 			  phba->brd_no,
203 			  wwn_to_u64(vport->fc_portname.u.wwn),
204 			  wwn_to_u64(vport->fc_nodename.u.wwn),
205 			  phba->targetport->port_id);
206 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
207 			goto buffer_done;
208 
209 		if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE)
210 		    >= PAGE_SIZE)
211 			goto buffer_done;
212 
213 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
214 		scnprintf(tmp, sizeof(tmp),
215 			  "LS: Rcv %08x Drop %08x Abort %08x\n",
216 			  atomic_read(&tgtp->rcv_ls_req_in),
217 			  atomic_read(&tgtp->rcv_ls_req_drop),
218 			  atomic_read(&tgtp->xmt_ls_abort));
219 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
220 			goto buffer_done;
221 
222 		if (atomic_read(&tgtp->rcv_ls_req_in) !=
223 		    atomic_read(&tgtp->rcv_ls_req_out)) {
224 			scnprintf(tmp, sizeof(tmp),
225 				  "Rcv LS: in %08x != out %08x\n",
226 				  atomic_read(&tgtp->rcv_ls_req_in),
227 				  atomic_read(&tgtp->rcv_ls_req_out));
228 			if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
229 				goto buffer_done;
230 		}
231 
232 		scnprintf(tmp, sizeof(tmp),
233 			  "LS: Xmt %08x Drop %08x Cmpl %08x\n",
234 			  atomic_read(&tgtp->xmt_ls_rsp),
235 			  atomic_read(&tgtp->xmt_ls_drop),
236 			  atomic_read(&tgtp->xmt_ls_rsp_cmpl));
237 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
238 			goto buffer_done;
239 
240 		scnprintf(tmp, sizeof(tmp),
241 			  "LS: RSP Abort %08x xb %08x Err %08x\n",
242 			  atomic_read(&tgtp->xmt_ls_rsp_aborted),
243 			  atomic_read(&tgtp->xmt_ls_rsp_xb_set),
244 			  atomic_read(&tgtp->xmt_ls_rsp_error));
245 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
246 			goto buffer_done;
247 
248 		scnprintf(tmp, sizeof(tmp),
249 			  "FCP: Rcv %08x Defer %08x Release %08x "
250 			  "Drop %08x\n",
251 			  atomic_read(&tgtp->rcv_fcp_cmd_in),
252 			  atomic_read(&tgtp->rcv_fcp_cmd_defer),
253 			  atomic_read(&tgtp->xmt_fcp_release),
254 			  atomic_read(&tgtp->rcv_fcp_cmd_drop));
255 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
256 			goto buffer_done;
257 
258 		if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
259 		    atomic_read(&tgtp->rcv_fcp_cmd_out)) {
260 			scnprintf(tmp, sizeof(tmp),
261 				  "Rcv FCP: in %08x != out %08x\n",
262 				  atomic_read(&tgtp->rcv_fcp_cmd_in),
263 				  atomic_read(&tgtp->rcv_fcp_cmd_out));
264 			if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
265 				goto buffer_done;
266 		}
267 
268 		scnprintf(tmp, sizeof(tmp),
269 			  "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
270 			  "drop %08x\n",
271 			  atomic_read(&tgtp->xmt_fcp_read),
272 			  atomic_read(&tgtp->xmt_fcp_read_rsp),
273 			  atomic_read(&tgtp->xmt_fcp_write),
274 			  atomic_read(&tgtp->xmt_fcp_rsp),
275 			  atomic_read(&tgtp->xmt_fcp_drop));
276 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
277 			goto buffer_done;
278 
279 		scnprintf(tmp, sizeof(tmp),
280 			  "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
281 			  atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
282 			  atomic_read(&tgtp->xmt_fcp_rsp_error),
283 			  atomic_read(&tgtp->xmt_fcp_rsp_drop));
284 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
285 			goto buffer_done;
286 
287 		scnprintf(tmp, sizeof(tmp),
288 			  "FCP Rsp Abort: %08x xb %08x xricqe  %08x\n",
289 			  atomic_read(&tgtp->xmt_fcp_rsp_aborted),
290 			  atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
291 			  atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
292 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
293 			goto buffer_done;
294 
295 		scnprintf(tmp, sizeof(tmp),
296 			  "ABORT: Xmt %08x Cmpl %08x\n",
297 			  atomic_read(&tgtp->xmt_fcp_abort),
298 			  atomic_read(&tgtp->xmt_fcp_abort_cmpl));
299 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
300 			goto buffer_done;
301 
302 		scnprintf(tmp, sizeof(tmp),
303 			  "ABORT: Sol %08x  Usol %08x Err %08x Cmpl %08x\n",
304 			  atomic_read(&tgtp->xmt_abort_sol),
305 			  atomic_read(&tgtp->xmt_abort_unsol),
306 			  atomic_read(&tgtp->xmt_abort_rsp),
307 			  atomic_read(&tgtp->xmt_abort_rsp_error));
308 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
309 			goto buffer_done;
310 
311 		scnprintf(tmp, sizeof(tmp),
312 			  "DELAY: ctx %08x  fod %08x wqfull %08x\n",
313 			  atomic_read(&tgtp->defer_ctx),
314 			  atomic_read(&tgtp->defer_fod),
315 			  atomic_read(&tgtp->defer_wqfull));
316 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
317 			goto buffer_done;
318 
319 		/* Calculate outstanding IOs */
320 		tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
321 		tot += atomic_read(&tgtp->xmt_fcp_release);
322 		tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
323 
324 		scnprintf(tmp, sizeof(tmp),
325 			  "IO_CTX: %08x  WAIT: cur %08x tot %08x\n"
326 			  "CTX Outstanding %08llx\n\n",
327 			  phba->sli4_hba.nvmet_xri_cnt,
328 			  phba->sli4_hba.nvmet_io_wait_cnt,
329 			  phba->sli4_hba.nvmet_io_wait_total,
330 			  tot);
331 		strlcat(buf, tmp, PAGE_SIZE);
332 		goto buffer_done;
333 	}
334 
335 	localport = vport->localport;
336 	if (!localport) {
337 		len = scnprintf(buf, PAGE_SIZE,
338 				"NVME Initiator x%llx is not allocated\n",
339 				wwn_to_u64(vport->fc_portname.u.wwn));
340 		return len;
341 	}
342 	lport = (struct lpfc_nvme_lport *)localport->private;
343 	if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
344 		goto buffer_done;
345 
346 	scnprintf(tmp, sizeof(tmp),
347 		  "XRI Dist lpfc%d Total %d IO %d ELS %d\n",
348 		  phba->brd_no,
349 		  phba->sli4_hba.max_cfg_param.max_xri,
350 		  phba->sli4_hba.io_xri_max,
351 		  lpfc_sli4_get_els_iocb_cnt(phba));
352 	if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
353 		goto buffer_done;
354 
355 	/* Port state is only one of two values for now. */
356 	if (localport->port_id)
357 		statep = "ONLINE";
358 	else
359 		statep = "UNKNOWN ";
360 
361 	scnprintf(tmp, sizeof(tmp),
362 		  "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
363 		  "NVME LPORT lpfc",
364 		  phba->brd_no,
365 		  wwn_to_u64(vport->fc_portname.u.wwn),
366 		  wwn_to_u64(vport->fc_nodename.u.wwn),
367 		  localport->port_id, statep);
368 	if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
369 		goto buffer_done;
370 
371 	spin_lock_irq(shost->host_lock);
372 
373 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
374 		nrport = NULL;
375 		spin_lock(&vport->phba->hbalock);
376 		rport = lpfc_ndlp_get_nrport(ndlp);
377 		if (rport)
378 			nrport = rport->remoteport;
379 		spin_unlock(&vport->phba->hbalock);
380 		if (!nrport)
381 			continue;
382 
383 		/* Port state is only one of two values for now. */
384 		switch (nrport->port_state) {
385 		case FC_OBJSTATE_ONLINE:
386 			statep = "ONLINE";
387 			break;
388 		case FC_OBJSTATE_UNKNOWN:
389 			statep = "UNKNOWN ";
390 			break;
391 		default:
392 			statep = "UNSUPPORTED";
393 			break;
394 		}
395 
396 		/* Tab in to show lport ownership. */
397 		if (strlcat(buf, "NVME RPORT       ", PAGE_SIZE) >= PAGE_SIZE)
398 			goto unlock_buf_done;
399 		if (phba->brd_no >= 10) {
400 			if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
401 				goto unlock_buf_done;
402 		}
403 
404 		scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
405 			  nrport->port_name);
406 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
407 			goto unlock_buf_done;
408 
409 		scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
410 			  nrport->node_name);
411 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
412 			goto unlock_buf_done;
413 
414 		scnprintf(tmp, sizeof(tmp), "DID x%06x ",
415 			  nrport->port_id);
416 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
417 			goto unlock_buf_done;
418 
419 		/* An NVME rport can have multiple roles. */
420 		if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
421 			if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
422 				goto unlock_buf_done;
423 		}
424 		if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
425 			if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
426 				goto unlock_buf_done;
427 		}
428 		if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
429 			if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
430 				goto unlock_buf_done;
431 		}
432 		if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
433 					  FC_PORT_ROLE_NVME_TARGET |
434 					  FC_PORT_ROLE_NVME_DISCOVERY)) {
435 			scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
436 				  nrport->port_role);
437 			if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
438 				goto unlock_buf_done;
439 		}
440 
441 		scnprintf(tmp, sizeof(tmp), "%s\n", statep);
442 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
443 			goto unlock_buf_done;
444 	}
445 	spin_unlock_irq(shost->host_lock);
446 
447 	if (!lport)
448 		goto buffer_done;
449 
450 	if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE)
451 		goto buffer_done;
452 
453 	scnprintf(tmp, sizeof(tmp),
454 		  "LS: Xmt %010x Cmpl %010x Abort %08x\n",
455 		  atomic_read(&lport->fc4NvmeLsRequests),
456 		  atomic_read(&lport->fc4NvmeLsCmpls),
457 		  atomic_read(&lport->xmt_ls_abort));
458 	if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
459 		goto buffer_done;
460 
461 	scnprintf(tmp, sizeof(tmp),
462 		  "LS XMIT: Err %08x  CMPL: xb %08x Err %08x\n",
463 		  atomic_read(&lport->xmt_ls_err),
464 		  atomic_read(&lport->cmpl_ls_xb),
465 		  atomic_read(&lport->cmpl_ls_err));
466 	if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
467 		goto buffer_done;
468 
469 	totin = 0;
470 	totout = 0;
471 	for (i = 0; i < phba->cfg_hdw_queue; i++) {
472 		cstat = &phba->sli4_hba.hdwq[i].nvme_cstat;
473 		tot = cstat->io_cmpls;
474 		totin += tot;
475 		data1 = cstat->input_requests;
476 		data2 = cstat->output_requests;
477 		data3 = cstat->control_requests;
478 		totout += (data1 + data2 + data3);
479 	}
480 	scnprintf(tmp, sizeof(tmp),
481 		  "Total FCP Cmpl %016llx Issue %016llx "
482 		  "OutIO %016llx\n",
483 		  totin, totout, totout - totin);
484 	if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
485 		goto buffer_done;
486 
487 	scnprintf(tmp, sizeof(tmp),
488 		  "\tabort %08x noxri %08x nondlp %08x qdepth %08x "
489 		  "wqerr %08x err %08x\n",
490 		  atomic_read(&lport->xmt_fcp_abort),
491 		  atomic_read(&lport->xmt_fcp_noxri),
492 		  atomic_read(&lport->xmt_fcp_bad_ndlp),
493 		  atomic_read(&lport->xmt_fcp_qdepth),
494 		  atomic_read(&lport->xmt_fcp_err),
495 		  atomic_read(&lport->xmt_fcp_wqerr));
496 	if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
497 		goto buffer_done;
498 
499 	scnprintf(tmp, sizeof(tmp),
500 		  "FCP CMPL: xb %08x Err %08x\n",
501 		  atomic_read(&lport->cmpl_fcp_xb),
502 		  atomic_read(&lport->cmpl_fcp_err));
503 	strlcat(buf, tmp, PAGE_SIZE);
504 
505 	/* host_lock is already unlocked. */
506 	goto buffer_done;
507 
508  unlock_buf_done:
509 	spin_unlock_irq(shost->host_lock);
510 
511  buffer_done:
512 	len = strnlen(buf, PAGE_SIZE);
513 
514 	if (unlikely(len >= (PAGE_SIZE - 1))) {
515 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
516 				"6314 Catching potential buffer "
517 				"overflow > PAGE_SIZE = %lu bytes\n",
518 				PAGE_SIZE);
519 		strlcpy(buf + PAGE_SIZE - 1 -
520 			strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1),
521 			LPFC_NVME_INFO_MORE_STR,
522 			strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1)
523 			+ 1);
524 	}
525 
526 	return len;
527 }
528 
529 static ssize_t
lpfc_scsi_stat_show(struct device * dev,struct device_attribute * attr,char * buf)530 lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr,
531 		    char *buf)
532 {
533 	struct Scsi_Host *shost = class_to_shost(dev);
534 	struct lpfc_vport *vport = shost_priv(shost);
535 	struct lpfc_hba *phba = vport->phba;
536 	int len;
537 	struct lpfc_fc4_ctrl_stat *cstat;
538 	u64 data1, data2, data3;
539 	u64 tot, totin, totout;
540 	int i;
541 	char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
542 
543 	if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ||
544 	    (phba->sli_rev != LPFC_SLI_REV4))
545 		return 0;
546 
547 	scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n");
548 
549 	totin = 0;
550 	totout = 0;
551 	for (i = 0; i < phba->cfg_hdw_queue; i++) {
552 		cstat = &phba->sli4_hba.hdwq[i].scsi_cstat;
553 		tot = cstat->io_cmpls;
554 		totin += tot;
555 		data1 = cstat->input_requests;
556 		data2 = cstat->output_requests;
557 		data3 = cstat->control_requests;
558 		totout += (data1 + data2 + data3);
559 
560 		scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx "
561 			  "IO %016llx ", i, data1, data2, data3);
562 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
563 			goto buffer_done;
564 
565 		scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n",
566 			  tot, ((data1 + data2 + data3) - tot));
567 		if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
568 			goto buffer_done;
569 	}
570 	scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx "
571 		  "OutIO %016llx\n", totin, totout, totout - totin);
572 	strlcat(buf, tmp, PAGE_SIZE);
573 
574 buffer_done:
575 	len = strnlen(buf, PAGE_SIZE);
576 
577 	return len;
578 }
579 
580 static ssize_t
lpfc_bg_info_show(struct device * dev,struct device_attribute * attr,char * buf)581 lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
582 		  char *buf)
583 {
584 	struct Scsi_Host *shost = class_to_shost(dev);
585 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
586 	struct lpfc_hba   *phba = vport->phba;
587 
588 	if (phba->cfg_enable_bg) {
589 		if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
590 			return scnprintf(buf, PAGE_SIZE,
591 					"BlockGuard Enabled\n");
592 		else
593 			return scnprintf(buf, PAGE_SIZE,
594 					"BlockGuard Not Supported\n");
595 	} else
596 		return scnprintf(buf, PAGE_SIZE,
597 					"BlockGuard Disabled\n");
598 }
599 
600 static ssize_t
lpfc_bg_guard_err_show(struct device * dev,struct device_attribute * attr,char * buf)601 lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
602 		       char *buf)
603 {
604 	struct Scsi_Host *shost = class_to_shost(dev);
605 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
606 	struct lpfc_hba   *phba = vport->phba;
607 
608 	return scnprintf(buf, PAGE_SIZE, "%llu\n",
609 			(unsigned long long)phba->bg_guard_err_cnt);
610 }
611 
612 static ssize_t
lpfc_bg_apptag_err_show(struct device * dev,struct device_attribute * attr,char * buf)613 lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
614 			char *buf)
615 {
616 	struct Scsi_Host *shost = class_to_shost(dev);
617 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
618 	struct lpfc_hba   *phba = vport->phba;
619 
620 	return scnprintf(buf, PAGE_SIZE, "%llu\n",
621 			(unsigned long long)phba->bg_apptag_err_cnt);
622 }
623 
624 static ssize_t
lpfc_bg_reftag_err_show(struct device * dev,struct device_attribute * attr,char * buf)625 lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
626 			char *buf)
627 {
628 	struct Scsi_Host *shost = class_to_shost(dev);
629 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
630 	struct lpfc_hba   *phba = vport->phba;
631 
632 	return scnprintf(buf, PAGE_SIZE, "%llu\n",
633 			(unsigned long long)phba->bg_reftag_err_cnt);
634 }
635 
636 /**
637  * lpfc_info_show - Return some pci info about the host in ascii
638  * @dev: class converted to a Scsi_host structure.
639  * @attr: device attribute, not used.
640  * @buf: on return contains the formatted text from lpfc_info().
641  *
642  * Returns: size of formatted string.
643  **/
644 static ssize_t
lpfc_info_show(struct device * dev,struct device_attribute * attr,char * buf)645 lpfc_info_show(struct device *dev, struct device_attribute *attr,
646 	       char *buf)
647 {
648 	struct Scsi_Host *host = class_to_shost(dev);
649 
650 	return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host));
651 }
652 
653 /**
654  * lpfc_serialnum_show - Return the hba serial number in ascii
655  * @dev: class converted to a Scsi_host structure.
656  * @attr: device attribute, not used.
657  * @buf: on return contains the formatted text serial number.
658  *
659  * Returns: size of formatted string.
660  **/
661 static ssize_t
lpfc_serialnum_show(struct device * dev,struct device_attribute * attr,char * buf)662 lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
663 		    char *buf)
664 {
665 	struct Scsi_Host  *shost = class_to_shost(dev);
666 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
667 	struct lpfc_hba   *phba = vport->phba;
668 
669 	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber);
670 }
671 
672 /**
673  * lpfc_temp_sensor_show - Return the temperature sensor level
674  * @dev: class converted to a Scsi_host structure.
675  * @attr: device attribute, not used.
676  * @buf: on return contains the formatted support level.
677  *
678  * Description:
679  * Returns a number indicating the temperature sensor level currently
680  * supported, zero or one in ascii.
681  *
682  * Returns: size of formatted string.
683  **/
684 static ssize_t
lpfc_temp_sensor_show(struct device * dev,struct device_attribute * attr,char * buf)685 lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
686 		      char *buf)
687 {
688 	struct Scsi_Host *shost = class_to_shost(dev);
689 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
690 	struct lpfc_hba   *phba = vport->phba;
691 	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support);
692 }
693 
694 /**
695  * lpfc_modeldesc_show - Return the model description of the hba
696  * @dev: class converted to a Scsi_host structure.
697  * @attr: device attribute, not used.
698  * @buf: on return contains the scsi vpd model description.
699  *
700  * Returns: size of formatted string.
701  **/
702 static ssize_t
lpfc_modeldesc_show(struct device * dev,struct device_attribute * attr,char * buf)703 lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
704 		    char *buf)
705 {
706 	struct Scsi_Host  *shost = class_to_shost(dev);
707 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
708 	struct lpfc_hba   *phba = vport->phba;
709 
710 	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc);
711 }
712 
713 /**
714  * lpfc_modelname_show - Return the model name of the hba
715  * @dev: class converted to a Scsi_host structure.
716  * @attr: device attribute, not used.
717  * @buf: on return contains the scsi vpd model name.
718  *
719  * Returns: size of formatted string.
720  **/
721 static ssize_t
lpfc_modelname_show(struct device * dev,struct device_attribute * attr,char * buf)722 lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
723 		    char *buf)
724 {
725 	struct Scsi_Host  *shost = class_to_shost(dev);
726 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
727 	struct lpfc_hba   *phba = vport->phba;
728 
729 	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName);
730 }
731 
732 /**
733  * lpfc_programtype_show - Return the program type of the hba
734  * @dev: class converted to a Scsi_host structure.
735  * @attr: device attribute, not used.
736  * @buf: on return contains the scsi vpd program type.
737  *
738  * Returns: size of formatted string.
739  **/
740 static ssize_t
lpfc_programtype_show(struct device * dev,struct device_attribute * attr,char * buf)741 lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
742 		      char *buf)
743 {
744 	struct Scsi_Host  *shost = class_to_shost(dev);
745 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
746 	struct lpfc_hba   *phba = vport->phba;
747 
748 	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType);
749 }
750 
751 /**
752  * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag
753  * @dev: class converted to a Scsi_host structure.
754  * @attr: device attribute, not used.
755  * @buf: on return contains the Menlo Maintenance sli flag.
756  *
757  * Returns: size of formatted string.
758  **/
759 static ssize_t
lpfc_mlomgmt_show(struct device * dev,struct device_attribute * attr,char * buf)760 lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
761 {
762 	struct Scsi_Host  *shost = class_to_shost(dev);
763 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
764 	struct lpfc_hba   *phba = vport->phba;
765 
766 	return scnprintf(buf, PAGE_SIZE, "%d\n",
767 		(phba->sli.sli_flag & LPFC_MENLO_MAINT));
768 }
769 
770 /**
771  * lpfc_vportnum_show - Return the port number in ascii of the hba
772  * @dev: class converted to a Scsi_host structure.
773  * @attr: device attribute, not used.
774  * @buf: on return contains scsi vpd program type.
775  *
776  * Returns: size of formatted string.
777  **/
778 static ssize_t
lpfc_vportnum_show(struct device * dev,struct device_attribute * attr,char * buf)779 lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
780 		   char *buf)
781 {
782 	struct Scsi_Host  *shost = class_to_shost(dev);
783 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
784 	struct lpfc_hba   *phba = vport->phba;
785 
786 	return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port);
787 }
788 
789 /**
790  * lpfc_fwrev_show - Return the firmware rev running in the hba
791  * @dev: class converted to a Scsi_host structure.
792  * @attr: device attribute, not used.
793  * @buf: on return contains the scsi vpd program type.
794  *
795  * Returns: size of formatted string.
796  **/
797 static ssize_t
lpfc_fwrev_show(struct device * dev,struct device_attribute * attr,char * buf)798 lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
799 		char *buf)
800 {
801 	struct Scsi_Host  *shost = class_to_shost(dev);
802 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
803 	struct lpfc_hba   *phba = vport->phba;
804 	uint32_t if_type;
805 	uint8_t sli_family;
806 	char fwrev[FW_REV_STR_SIZE];
807 	int len;
808 
809 	lpfc_decode_firmware_rev(phba, fwrev, 1);
810 	if_type = phba->sli4_hba.pc_sli4_params.if_type;
811 	sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
812 
813 	if (phba->sli_rev < LPFC_SLI_REV4)
814 		len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
815 			       fwrev, phba->sli_rev);
816 	else
817 		len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
818 			       fwrev, phba->sli_rev, if_type, sli_family);
819 
820 	return len;
821 }
822 
823 /**
824  * lpfc_hdw_show - Return the jedec information about the hba
825  * @dev: class converted to a Scsi_host structure.
826  * @attr: device attribute, not used.
827  * @buf: on return contains the scsi vpd program type.
828  *
829  * Returns: size of formatted string.
830  **/
831 static ssize_t
lpfc_hdw_show(struct device * dev,struct device_attribute * attr,char * buf)832 lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
833 {
834 	char hdw[9];
835 	struct Scsi_Host  *shost = class_to_shost(dev);
836 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
837 	struct lpfc_hba   *phba = vport->phba;
838 	lpfc_vpd_t *vp = &phba->vpd;
839 
840 	lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
841 	return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw,
842 			 vp->rev.smRev, vp->rev.smFwRev);
843 }
844 
845 /**
846  * lpfc_option_rom_version_show - Return the adapter ROM FCode version
847  * @dev: class converted to a Scsi_host structure.
848  * @attr: device attribute, not used.
849  * @buf: on return contains the ROM and FCode ascii strings.
850  *
851  * Returns: size of formatted string.
852  **/
853 static ssize_t
lpfc_option_rom_version_show(struct device * dev,struct device_attribute * attr,char * buf)854 lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
855 			     char *buf)
856 {
857 	struct Scsi_Host  *shost = class_to_shost(dev);
858 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
859 	struct lpfc_hba   *phba = vport->phba;
860 	char fwrev[FW_REV_STR_SIZE];
861 
862 	if (phba->sli_rev < LPFC_SLI_REV4)
863 		return scnprintf(buf, PAGE_SIZE, "%s\n",
864 				phba->OptionROMVersion);
865 
866 	lpfc_decode_firmware_rev(phba, fwrev, 1);
867 	return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev);
868 }
869 
870 /**
871  * lpfc_state_show - Return the link state of the port
872  * @dev: class converted to a Scsi_host structure.
873  * @attr: device attribute, not used.
874  * @buf: on return contains text describing the state of the link.
875  *
876  * Notes:
877  * The switch statement has no default so zero will be returned.
878  *
879  * Returns: size of formatted string.
880  **/
881 static ssize_t
lpfc_link_state_show(struct device * dev,struct device_attribute * attr,char * buf)882 lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
883 		     char *buf)
884 {
885 	struct Scsi_Host  *shost = class_to_shost(dev);
886 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
887 	struct lpfc_hba   *phba = vport->phba;
888 	int  len = 0;
889 
890 	switch (phba->link_state) {
891 	case LPFC_LINK_UNKNOWN:
892 	case LPFC_WARM_START:
893 	case LPFC_INIT_START:
894 	case LPFC_INIT_MBX_CMDS:
895 	case LPFC_LINK_DOWN:
896 	case LPFC_HBA_ERROR:
897 		if (phba->hba_flag & LINK_DISABLED)
898 			len += scnprintf(buf + len, PAGE_SIZE-len,
899 				"Link Down - User disabled\n");
900 		else
901 			len += scnprintf(buf + len, PAGE_SIZE-len,
902 				"Link Down\n");
903 		break;
904 	case LPFC_LINK_UP:
905 	case LPFC_CLEAR_LA:
906 	case LPFC_HBA_READY:
907 		len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
908 
909 		switch (vport->port_state) {
910 		case LPFC_LOCAL_CFG_LINK:
911 			len += scnprintf(buf + len, PAGE_SIZE-len,
912 					"Configuring Link\n");
913 			break;
914 		case LPFC_FDISC:
915 		case LPFC_FLOGI:
916 		case LPFC_FABRIC_CFG_LINK:
917 		case LPFC_NS_REG:
918 		case LPFC_NS_QRY:
919 		case LPFC_BUILD_DISC_LIST:
920 		case LPFC_DISC_AUTH:
921 			len += scnprintf(buf + len, PAGE_SIZE - len,
922 					"Discovery\n");
923 			break;
924 		case LPFC_VPORT_READY:
925 			len += scnprintf(buf + len, PAGE_SIZE - len,
926 					"Ready\n");
927 			break;
928 
929 		case LPFC_VPORT_FAILED:
930 			len += scnprintf(buf + len, PAGE_SIZE - len,
931 					"Failed\n");
932 			break;
933 
934 		case LPFC_VPORT_UNKNOWN:
935 			len += scnprintf(buf + len, PAGE_SIZE - len,
936 					"Unknown\n");
937 			break;
938 		}
939 		if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
940 			len += scnprintf(buf + len, PAGE_SIZE-len,
941 					"   Menlo Maint Mode\n");
942 		else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
943 			if (vport->fc_flag & FC_PUBLIC_LOOP)
944 				len += scnprintf(buf + len, PAGE_SIZE-len,
945 						"   Public Loop\n");
946 			else
947 				len += scnprintf(buf + len, PAGE_SIZE-len,
948 						"   Private Loop\n");
949 		} else {
950 			if (vport->fc_flag & FC_FABRIC)
951 				len += scnprintf(buf + len, PAGE_SIZE-len,
952 						"   Fabric\n");
953 			else
954 				len += scnprintf(buf + len, PAGE_SIZE-len,
955 						"   Point-2-Point\n");
956 		}
957 	}
958 
959 	if ((phba->sli_rev == LPFC_SLI_REV4) &&
960 	    ((bf_get(lpfc_sli_intf_if_type,
961 	     &phba->sli4_hba.sli_intf) ==
962 	     LPFC_SLI_INTF_IF_TYPE_6))) {
963 		struct lpfc_trunk_link link = phba->trunk_link;
964 
965 		if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
966 			len += scnprintf(buf + len, PAGE_SIZE - len,
967 				"Trunk port 0: Link %s %s\n",
968 				(link.link0.state == LPFC_LINK_UP) ?
969 				 "Up" : "Down. ",
970 				trunk_errmsg[link.link0.fault]);
971 
972 		if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
973 			len += scnprintf(buf + len, PAGE_SIZE - len,
974 				"Trunk port 1: Link %s %s\n",
975 				(link.link1.state == LPFC_LINK_UP) ?
976 				 "Up" : "Down. ",
977 				trunk_errmsg[link.link1.fault]);
978 
979 		if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
980 			len += scnprintf(buf + len, PAGE_SIZE - len,
981 				"Trunk port 2: Link %s %s\n",
982 				(link.link2.state == LPFC_LINK_UP) ?
983 				 "Up" : "Down. ",
984 				trunk_errmsg[link.link2.fault]);
985 
986 		if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
987 			len += scnprintf(buf + len, PAGE_SIZE - len,
988 				"Trunk port 3: Link %s %s\n",
989 				(link.link3.state == LPFC_LINK_UP) ?
990 				 "Up" : "Down. ",
991 				trunk_errmsg[link.link3.fault]);
992 
993 	}
994 
995 	return len;
996 }
997 
998 /**
999  * lpfc_sli4_protocol_show - Return the fip mode of the HBA
1000  * @dev: class unused variable.
1001  * @attr: device attribute, not used.
1002  * @buf: on return contains the module description text.
1003  *
1004  * Returns: size of formatted string.
1005  **/
1006 static ssize_t
lpfc_sli4_protocol_show(struct device * dev,struct device_attribute * attr,char * buf)1007 lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
1008 			char *buf)
1009 {
1010 	struct Scsi_Host *shost = class_to_shost(dev);
1011 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1012 	struct lpfc_hba *phba = vport->phba;
1013 
1014 	if (phba->sli_rev < LPFC_SLI_REV4)
1015 		return scnprintf(buf, PAGE_SIZE, "fc\n");
1016 
1017 	if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
1018 		if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
1019 			return scnprintf(buf, PAGE_SIZE, "fcoe\n");
1020 		if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
1021 			return scnprintf(buf, PAGE_SIZE, "fc\n");
1022 	}
1023 	return scnprintf(buf, PAGE_SIZE, "unknown\n");
1024 }
1025 
1026 /**
1027  * lpfc_oas_supported_show - Return whether or not Optimized Access Storage
1028  *			    (OAS) is supported.
1029  * @dev: class unused variable.
1030  * @attr: device attribute, not used.
1031  * @buf: on return contains the module description text.
1032  *
1033  * Returns: size of formatted string.
1034  **/
1035 static ssize_t
lpfc_oas_supported_show(struct device * dev,struct device_attribute * attr,char * buf)1036 lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
1037 			char *buf)
1038 {
1039 	struct Scsi_Host *shost = class_to_shost(dev);
1040 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
1041 	struct lpfc_hba *phba = vport->phba;
1042 
1043 	return scnprintf(buf, PAGE_SIZE, "%d\n",
1044 			phba->sli4_hba.pc_sli4_params.oas_supported);
1045 }
1046 
1047 /**
1048  * lpfc_link_state_store - Transition the link_state on an HBA port
1049  * @dev: class device that is converted into a Scsi_host.
1050  * @attr: device attribute, not used.
1051  * @buf: one or more lpfc_polling_flags values.
1052  * @count: not used.
1053  *
1054  * Returns:
1055  * -EINVAL if the buffer is not "up" or "down"
1056  * return from link state change function if non-zero
1057  * length of the buf on success
1058  **/
1059 static ssize_t
lpfc_link_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1060 lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
1061 		const char *buf, size_t count)
1062 {
1063 	struct Scsi_Host  *shost = class_to_shost(dev);
1064 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1065 	struct lpfc_hba   *phba = vport->phba;
1066 
1067 	int status = -EINVAL;
1068 
1069 	if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
1070 			(phba->link_state == LPFC_LINK_DOWN))
1071 		status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
1072 	else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
1073 			(phba->link_state >= LPFC_LINK_UP))
1074 		status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT);
1075 
1076 	if (status == 0)
1077 		return strlen(buf);
1078 	else
1079 		return status;
1080 }
1081 
1082 /**
1083  * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports
1084  * @dev: class device that is converted into a Scsi_host.
1085  * @attr: device attribute, not used.
1086  * @buf: on return contains the sum of fc mapped and unmapped.
1087  *
1088  * Description:
1089  * Returns the ascii text number of the sum of the fc mapped and unmapped
1090  * vport counts.
1091  *
1092  * Returns: size of formatted string.
1093  **/
1094 static ssize_t
lpfc_num_discovered_ports_show(struct device * dev,struct device_attribute * attr,char * buf)1095 lpfc_num_discovered_ports_show(struct device *dev,
1096 			       struct device_attribute *attr, char *buf)
1097 {
1098 	struct Scsi_Host  *shost = class_to_shost(dev);
1099 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1100 
1101 	return scnprintf(buf, PAGE_SIZE, "%d\n",
1102 			vport->fc_map_cnt + vport->fc_unmap_cnt);
1103 }
1104 
1105 /**
1106  * lpfc_issue_lip - Misnomer, name carried over from long ago
1107  * @shost: Scsi_Host pointer.
1108  *
1109  * Description:
1110  * Bring the link down gracefully then re-init the link. The firmware will
1111  * re-init the fiber channel interface as required. Does not issue a LIP.
1112  *
1113  * Returns:
1114  * -EPERM port offline or management commands are being blocked
1115  * -ENOMEM cannot allocate memory for the mailbox command
1116  * -EIO error sending the mailbox command
1117  * zero for success
1118  **/
1119 static int
lpfc_issue_lip(struct Scsi_Host * shost)1120 lpfc_issue_lip(struct Scsi_Host *shost)
1121 {
1122 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1123 	struct lpfc_hba   *phba = vport->phba;
1124 	LPFC_MBOXQ_t *pmboxq;
1125 	int mbxstatus = MBXERR_ERROR;
1126 
1127 	/*
1128 	 * If the link is offline, disabled or BLOCK_MGMT_IO
1129 	 * it doesn't make any sense to allow issue_lip
1130 	 */
1131 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1132 	    (phba->hba_flag & LINK_DISABLED) ||
1133 	    (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
1134 		return -EPERM;
1135 
1136 	pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
1137 
1138 	if (!pmboxq)
1139 		return -ENOMEM;
1140 
1141 	memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1142 	pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1143 	pmboxq->u.mb.mbxOwner = OWN_HOST;
1144 
1145 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
1146 
1147 	if ((mbxstatus == MBX_SUCCESS) &&
1148 	    (pmboxq->u.mb.mbxStatus == 0 ||
1149 	     pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
1150 		memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1151 		lpfc_init_link(phba, pmboxq, phba->cfg_topology,
1152 			       phba->cfg_link_speed);
1153 		mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1154 						     phba->fc_ratov * 2);
1155 		if ((mbxstatus == MBX_SUCCESS) &&
1156 		    (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
1157 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1158 					"2859 SLI authentication is required "
1159 					"for INIT_LINK but has not done yet\n");
1160 	}
1161 
1162 	lpfc_set_loopback_flag(phba);
1163 	if (mbxstatus != MBX_TIMEOUT)
1164 		mempool_free(pmboxq, phba->mbox_mem_pool);
1165 
1166 	if (mbxstatus == MBXERR_ERROR)
1167 		return -EIO;
1168 
1169 	return 0;
1170 }
1171 
1172 int
lpfc_emptyq_wait(struct lpfc_hba * phba,struct list_head * q,spinlock_t * lock)1173 lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
1174 {
1175 	int cnt = 0;
1176 
1177 	spin_lock_irq(lock);
1178 	while (!list_empty(q)) {
1179 		spin_unlock_irq(lock);
1180 		msleep(20);
1181 		if (cnt++ > 250) {  /* 5 secs */
1182 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1183 					"0466 %s %s\n",
1184 					"Outstanding IO when ",
1185 					"bringing Adapter offline\n");
1186 				return 0;
1187 		}
1188 		spin_lock_irq(lock);
1189 	}
1190 	spin_unlock_irq(lock);
1191 	return 1;
1192 }
1193 
1194 /**
1195  * lpfc_do_offline - Issues a mailbox command to bring the link down
1196  * @phba: lpfc_hba pointer.
1197  * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL.
1198  *
1199  * Notes:
1200  * Assumes any error from lpfc_do_offline() will be negative.
1201  * Can wait up to 5 seconds for the port ring buffers count
1202  * to reach zero, prints a warning if it is not zero and continues.
1203  * lpfc_workq_post_event() returns a non-zero return code if call fails.
1204  *
1205  * Returns:
1206  * -EIO error posting the event
1207  * zero for success
1208  **/
1209 static int
lpfc_do_offline(struct lpfc_hba * phba,uint32_t type)1210 lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
1211 {
1212 	struct completion online_compl;
1213 	struct lpfc_queue *qp = NULL;
1214 	struct lpfc_sli_ring *pring;
1215 	struct lpfc_sli *psli;
1216 	int status = 0;
1217 	int i;
1218 	int rc;
1219 
1220 	init_completion(&online_compl);
1221 	rc = lpfc_workq_post_event(phba, &status, &online_compl,
1222 			      LPFC_EVT_OFFLINE_PREP);
1223 	if (rc == 0)
1224 		return -ENOMEM;
1225 
1226 	wait_for_completion(&online_compl);
1227 
1228 	if (status != 0)
1229 		return -EIO;
1230 
1231 	psli = &phba->sli;
1232 
1233 	/*
1234 	 * If freeing the queues have already started, don't access them.
1235 	 * Otherwise set FREE_WAIT to indicate that queues are being used
1236 	 * to hold the freeing process until we finish.
1237 	 */
1238 	spin_lock_irq(&phba->hbalock);
1239 	if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) {
1240 		psli->sli_flag |= LPFC_QUEUE_FREE_WAIT;
1241 	} else {
1242 		spin_unlock_irq(&phba->hbalock);
1243 		goto skip_wait;
1244 	}
1245 	spin_unlock_irq(&phba->hbalock);
1246 
1247 	/* Wait a little for things to settle down, but not
1248 	 * long enough for dev loss timeout to expire.
1249 	 */
1250 	if (phba->sli_rev != LPFC_SLI_REV4) {
1251 		for (i = 0; i < psli->num_rings; i++) {
1252 			pring = &psli->sli3_ring[i];
1253 			if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1254 					      &phba->hbalock))
1255 				goto out;
1256 		}
1257 	} else {
1258 		list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1259 			pring = qp->pring;
1260 			if (!pring)
1261 				continue;
1262 			if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1263 					      &pring->ring_lock))
1264 				goto out;
1265 		}
1266 	}
1267 out:
1268 	spin_lock_irq(&phba->hbalock);
1269 	psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT;
1270 	spin_unlock_irq(&phba->hbalock);
1271 
1272 skip_wait:
1273 	init_completion(&online_compl);
1274 	rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
1275 	if (rc == 0)
1276 		return -ENOMEM;
1277 
1278 	wait_for_completion(&online_compl);
1279 
1280 	if (status != 0)
1281 		return -EIO;
1282 
1283 	return 0;
1284 }
1285 
1286 /**
1287  * lpfc_reset_pci_bus - resets PCI bridge controller's secondary bus of an HBA
1288  * @phba: lpfc_hba pointer.
1289  *
1290  * Description:
1291  * Issues a PCI secondary bus reset for the phba->pcidev.
1292  *
1293  * Notes:
1294  * First walks the bus_list to ensure only PCI devices with Emulex
1295  * vendor id, device ids that support hot reset, only one occurrence
1296  * of function 0, and all ports on the bus are in offline mode to ensure the
1297  * hot reset only affects one valid HBA.
1298  *
1299  * Returns:
1300  * -ENOTSUPP, cfg_enable_hba_reset must be of value 2
1301  * -ENODEV,   NULL ptr to pcidev
1302  * -EBADSLT,  detected invalid device
1303  * -EBUSY,    port is not in offline state
1304  *      0,    successful
1305  */
1306 static int
lpfc_reset_pci_bus(struct lpfc_hba * phba)1307 lpfc_reset_pci_bus(struct lpfc_hba *phba)
1308 {
1309 	struct pci_dev *pdev = phba->pcidev;
1310 	struct Scsi_Host *shost = NULL;
1311 	struct lpfc_hba *phba_other = NULL;
1312 	struct pci_dev *ptr = NULL;
1313 	int res;
1314 
1315 	if (phba->cfg_enable_hba_reset != 2)
1316 		return -ENOTSUPP;
1317 
1318 	if (!pdev) {
1319 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n");
1320 		return -ENODEV;
1321 	}
1322 
1323 	res = lpfc_check_pci_resettable(phba);
1324 	if (res)
1325 		return res;
1326 
1327 	/* Walk the list of devices on the pci_dev's bus */
1328 	list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
1329 		/* Check port is offline */
1330 		shost = pci_get_drvdata(ptr);
1331 		if (shost) {
1332 			phba_other =
1333 				((struct lpfc_vport *)shost->hostdata)->phba;
1334 			if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) {
1335 				lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT,
1336 						"8349 WWPN = 0x%02x%02x%02x%02x"
1337 						"%02x%02x%02x%02x is not "
1338 						"offline!\n",
1339 						phba_other->wwpn[0],
1340 						phba_other->wwpn[1],
1341 						phba_other->wwpn[2],
1342 						phba_other->wwpn[3],
1343 						phba_other->wwpn[4],
1344 						phba_other->wwpn[5],
1345 						phba_other->wwpn[6],
1346 						phba_other->wwpn[7]);
1347 				return -EBUSY;
1348 			}
1349 		}
1350 	}
1351 
1352 	/* Issue PCI bus reset */
1353 	res = pci_reset_bus(pdev);
1354 	if (res) {
1355 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1356 				"8350 PCI reset bus failed: %d\n", res);
1357 	}
1358 
1359 	return res;
1360 }
1361 
1362 /**
1363  * lpfc_selective_reset - Offline then onlines the port
1364  * @phba: lpfc_hba pointer.
1365  *
1366  * Description:
1367  * If the port is configured to allow a reset then the hba is brought
1368  * offline then online.
1369  *
1370  * Notes:
1371  * Assumes any error from lpfc_do_offline() will be negative.
1372  * Do not make this function static.
1373  *
1374  * Returns:
1375  * lpfc_do_offline() return code if not zero
1376  * -EIO reset not configured or error posting the event
1377  * zero for success
1378  **/
1379 int
lpfc_selective_reset(struct lpfc_hba * phba)1380 lpfc_selective_reset(struct lpfc_hba *phba)
1381 {
1382 	struct completion online_compl;
1383 	int status = 0;
1384 	int rc;
1385 
1386 	if (!phba->cfg_enable_hba_reset)
1387 		return -EACCES;
1388 
1389 	if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
1390 		status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1391 
1392 		if (status != 0)
1393 			return status;
1394 	}
1395 
1396 	init_completion(&online_compl);
1397 	rc = lpfc_workq_post_event(phba, &status, &online_compl,
1398 			      LPFC_EVT_ONLINE);
1399 	if (rc == 0)
1400 		return -ENOMEM;
1401 
1402 	wait_for_completion(&online_compl);
1403 
1404 	if (status != 0)
1405 		return -EIO;
1406 
1407 	return 0;
1408 }
1409 
1410 /**
1411  * lpfc_issue_reset - Selectively resets an adapter
1412  * @dev: class device that is converted into a Scsi_host.
1413  * @attr: device attribute, not used.
1414  * @buf: containing the string "selective".
1415  * @count: unused variable.
1416  *
1417  * Description:
1418  * If the buf contains the string "selective" then lpfc_selective_reset()
1419  * is called to perform the reset.
1420  *
1421  * Notes:
1422  * Assumes any error from lpfc_selective_reset() will be negative.
1423  * If lpfc_selective_reset() returns zero then the length of the buffer
1424  * is returned which indicates success
1425  *
1426  * Returns:
1427  * -EINVAL if the buffer does not contain the string "selective"
1428  * length of buf if lpfc-selective_reset() if the call succeeds
1429  * return value of lpfc_selective_reset() if the call fails
1430 **/
1431 static ssize_t
lpfc_issue_reset(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1432 lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
1433 		 const char *buf, size_t count)
1434 {
1435 	struct Scsi_Host  *shost = class_to_shost(dev);
1436 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1437 	struct lpfc_hba   *phba = vport->phba;
1438 	int status = -EINVAL;
1439 
1440 	if (!phba->cfg_enable_hba_reset)
1441 		return -EACCES;
1442 
1443 	if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
1444 		status = phba->lpfc_selective_reset(phba);
1445 
1446 	if (status == 0)
1447 		return strlen(buf);
1448 	else
1449 		return status;
1450 }
1451 
1452 /**
1453  * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
1454  * @phba: lpfc_hba pointer.
1455  *
1456  * Description:
1457  * SLI4 interface type-2 device to wait on the sliport status register for
1458  * the readyness after performing a firmware reset.
1459  *
1460  * Returns:
1461  * zero for success, -EPERM when port does not have privilege to perform the
1462  * reset, -EIO when port timeout from recovering from the reset.
1463  *
1464  * Note:
1465  * As the caller will interpret the return code by value, be careful in making
1466  * change or addition to return codes.
1467  **/
1468 int
lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba * phba)1469 lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
1470 {
1471 	struct lpfc_register portstat_reg = {0};
1472 	int i;
1473 
1474 	msleep(100);
1475 	if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1476 		       &portstat_reg.word0))
1477 		return -EIO;
1478 
1479 	/* verify if privileged for the request operation */
1480 	if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
1481 	    !bf_get(lpfc_sliport_status_err, &portstat_reg))
1482 		return -EPERM;
1483 
1484 	/* wait for the SLI port firmware ready after firmware reset */
1485 	for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
1486 		msleep(10);
1487 		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1488 			       &portstat_reg.word0))
1489 			continue;
1490 		if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
1491 			continue;
1492 		if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
1493 			continue;
1494 		if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
1495 			continue;
1496 		break;
1497 	}
1498 
1499 	if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
1500 		return 0;
1501 	else
1502 		return -EIO;
1503 }
1504 
1505 /**
1506  * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
1507  * @phba: lpfc_hba pointer.
1508  *
1509  * Description:
1510  * Request SLI4 interface type-2 device to perform a physical register set
1511  * access.
1512  *
1513  * Returns:
1514  * zero for success
1515  **/
1516 static ssize_t
lpfc_sli4_pdev_reg_request(struct lpfc_hba * phba,uint32_t opcode)1517 lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
1518 {
1519 	struct completion online_compl;
1520 	struct pci_dev *pdev = phba->pcidev;
1521 	uint32_t before_fc_flag;
1522 	uint32_t sriov_nr_virtfn;
1523 	uint32_t reg_val;
1524 	int status = 0, rc = 0;
1525 	int job_posted = 1, sriov_err;
1526 
1527 	if (!phba->cfg_enable_hba_reset)
1528 		return -EACCES;
1529 
1530 	if ((phba->sli_rev < LPFC_SLI_REV4) ||
1531 	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
1532 	     LPFC_SLI_INTF_IF_TYPE_2))
1533 		return -EPERM;
1534 
1535 	/* Keep state if we need to restore back */
1536 	before_fc_flag = phba->pport->fc_flag;
1537 	sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
1538 
1539 	if (opcode == LPFC_FW_DUMP) {
1540 		init_completion(&online_compl);
1541 		phba->fw_dump_cmpl = &online_compl;
1542 	} else {
1543 		/* Disable SR-IOV virtual functions if enabled */
1544 		if (phba->cfg_sriov_nr_virtfn) {
1545 			pci_disable_sriov(pdev);
1546 			phba->cfg_sriov_nr_virtfn = 0;
1547 		}
1548 
1549 		status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1550 
1551 		if (status != 0)
1552 			return status;
1553 
1554 		/* wait for the device to be quiesced before firmware reset */
1555 		msleep(100);
1556 	}
1557 
1558 	reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
1559 			LPFC_CTL_PDEV_CTL_OFFSET);
1560 
1561 	if (opcode == LPFC_FW_DUMP)
1562 		reg_val |= LPFC_FW_DUMP_REQUEST;
1563 	else if (opcode == LPFC_FW_RESET)
1564 		reg_val |= LPFC_CTL_PDEV_CTL_FRST;
1565 	else if (opcode == LPFC_DV_RESET)
1566 		reg_val |= LPFC_CTL_PDEV_CTL_DRST;
1567 
1568 	writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
1569 	       LPFC_CTL_PDEV_CTL_OFFSET);
1570 	/* flush */
1571 	readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
1572 
1573 	/* delay driver action following IF_TYPE_2 reset */
1574 	rc = lpfc_sli4_pdev_status_reg_wait(phba);
1575 
1576 	if (rc == -EPERM) {
1577 		/* no privilege for reset */
1578 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1579 				"3150 No privilege to perform the requested "
1580 				"access: x%x\n", reg_val);
1581 	} else if (rc == -EIO) {
1582 		/* reset failed, there is nothing more we can do */
1583 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1584 				"3153 Fail to perform the requested "
1585 				"access: x%x\n", reg_val);
1586 		if (phba->fw_dump_cmpl)
1587 			phba->fw_dump_cmpl = NULL;
1588 		return rc;
1589 	}
1590 
1591 	/* keep the original port state */
1592 	if (before_fc_flag & FC_OFFLINE_MODE) {
1593 		if (phba->fw_dump_cmpl)
1594 			phba->fw_dump_cmpl = NULL;
1595 		goto out;
1596 	}
1597 
1598 	/* Firmware dump will trigger an HA_ERATT event, and
1599 	 * lpfc_handle_eratt_s4 routine already handles bringing the port back
1600 	 * online.
1601 	 */
1602 	if (opcode == LPFC_FW_DUMP) {
1603 		wait_for_completion(phba->fw_dump_cmpl);
1604 	} else  {
1605 		init_completion(&online_compl);
1606 		job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
1607 						   LPFC_EVT_ONLINE);
1608 		if (!job_posted)
1609 			goto out;
1610 
1611 		wait_for_completion(&online_compl);
1612 	}
1613 out:
1614 	/* in any case, restore the virtual functions enabled as before */
1615 	if (sriov_nr_virtfn) {
1616 		/* If fw_dump was performed, first disable to clean up */
1617 		if (opcode == LPFC_FW_DUMP) {
1618 			pci_disable_sriov(pdev);
1619 			phba->cfg_sriov_nr_virtfn = 0;
1620 		}
1621 
1622 		sriov_err =
1623 			lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
1624 		if (!sriov_err)
1625 			phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
1626 	}
1627 
1628 	/* return proper error code */
1629 	if (!rc) {
1630 		if (!job_posted)
1631 			rc = -ENOMEM;
1632 		else if (status)
1633 			rc = -EIO;
1634 	}
1635 	return rc;
1636 }
1637 
1638 /**
1639  * lpfc_nport_evt_cnt_show - Return the number of nport events
1640  * @dev: class device that is converted into a Scsi_host.
1641  * @attr: device attribute, not used.
1642  * @buf: on return contains the ascii number of nport events.
1643  *
1644  * Returns: size of formatted string.
1645  **/
1646 static ssize_t
lpfc_nport_evt_cnt_show(struct device * dev,struct device_attribute * attr,char * buf)1647 lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
1648 			char *buf)
1649 {
1650 	struct Scsi_Host  *shost = class_to_shost(dev);
1651 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1652 	struct lpfc_hba   *phba = vport->phba;
1653 
1654 	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
1655 }
1656 
1657 static int
lpfc_set_trunking(struct lpfc_hba * phba,char * buff_out)1658 lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
1659 {
1660 	LPFC_MBOXQ_t *mbox = NULL;
1661 	unsigned long val = 0;
1662 	char *pval = NULL;
1663 	int rc = 0;
1664 
1665 	if (!strncmp("enable", buff_out,
1666 				 strlen("enable"))) {
1667 		pval = buff_out + strlen("enable") + 1;
1668 		rc = kstrtoul(pval, 0, &val);
1669 		if (rc)
1670 			return rc; /* Invalid  number */
1671 	} else if (!strncmp("disable", buff_out,
1672 				 strlen("disable"))) {
1673 		val = 0;
1674 	} else {
1675 		return -EINVAL;  /* Invalid command */
1676 	}
1677 
1678 	switch (val) {
1679 	case 0:
1680 		val = 0x0; /* Disable */
1681 		break;
1682 	case 2:
1683 		val = 0x1; /* Enable two port trunk */
1684 		break;
1685 	case 4:
1686 		val = 0x2; /* Enable four port trunk */
1687 		break;
1688 	default:
1689 		return -EINVAL;
1690 	}
1691 
1692 	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1693 			"0070 Set trunk mode with val %ld ", val);
1694 
1695 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1696 	if (!mbox)
1697 		return -ENOMEM;
1698 
1699 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1700 			 LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE,
1701 			 12, LPFC_SLI4_MBX_EMBED);
1702 
1703 	bf_set(lpfc_mbx_set_trunk_mode,
1704 	       &mbox->u.mqe.un.set_trunk_mode,
1705 	       val);
1706 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
1707 	if (rc)
1708 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1709 				"0071 Set trunk mode failed with status: %d",
1710 				rc);
1711 	mempool_free(mbox, phba->mbox_mem_pool);
1712 
1713 	return 0;
1714 }
1715 
1716 /**
1717  * lpfc_board_mode_show - Return the state of the board
1718  * @dev: class device that is converted into a Scsi_host.
1719  * @attr: device attribute, not used.
1720  * @buf: on return contains the state of the adapter.
1721  *
1722  * Returns: size of formatted string.
1723  **/
1724 static ssize_t
lpfc_board_mode_show(struct device * dev,struct device_attribute * attr,char * buf)1725 lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
1726 		     char *buf)
1727 {
1728 	struct Scsi_Host  *shost = class_to_shost(dev);
1729 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1730 	struct lpfc_hba   *phba = vport->phba;
1731 	char  * state;
1732 
1733 	if (phba->link_state == LPFC_HBA_ERROR)
1734 		state = "error";
1735 	else if (phba->link_state == LPFC_WARM_START)
1736 		state = "warm start";
1737 	else if (phba->link_state == LPFC_INIT_START)
1738 		state = "offline";
1739 	else
1740 		state = "online";
1741 
1742 	return scnprintf(buf, PAGE_SIZE, "%s\n", state);
1743 }
1744 
1745 /**
1746  * lpfc_board_mode_store - Puts the hba in online, offline, warm or error state
1747  * @dev: class device that is converted into a Scsi_host.
1748  * @attr: device attribute, not used.
1749  * @buf: containing one of the strings "online", "offline", "warm" or "error".
1750  * @count: unused variable.
1751  *
1752  * Returns:
1753  * -EACCES if enable hba reset not enabled
1754  * -EINVAL if the buffer does not contain a valid string (see above)
1755  * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails
1756  * buf length greater than zero indicates success
1757  **/
1758 static ssize_t
lpfc_board_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1759 lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
1760 		      const char *buf, size_t count)
1761 {
1762 	struct Scsi_Host  *shost = class_to_shost(dev);
1763 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1764 	struct lpfc_hba   *phba = vport->phba;
1765 	struct completion online_compl;
1766 	char *board_mode_str = NULL;
1767 	int status = 0;
1768 	int rc;
1769 
1770 	if (!phba->cfg_enable_hba_reset) {
1771 		status = -EACCES;
1772 		goto board_mode_out;
1773 	}
1774 
1775 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1776 			 "3050 lpfc_board_mode set to %s\n", buf);
1777 
1778 	init_completion(&online_compl);
1779 
1780 	if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
1781 		rc = lpfc_workq_post_event(phba, &status, &online_compl,
1782 				      LPFC_EVT_ONLINE);
1783 		if (rc == 0) {
1784 			status = -ENOMEM;
1785 			goto board_mode_out;
1786 		}
1787 		wait_for_completion(&online_compl);
1788 		if (status)
1789 			status = -EIO;
1790 	} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
1791 		status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1792 	else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
1793 		if (phba->sli_rev == LPFC_SLI_REV4)
1794 			status = -EINVAL;
1795 		else
1796 			status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
1797 	else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
1798 		if (phba->sli_rev == LPFC_SLI_REV4)
1799 			status = -EINVAL;
1800 		else
1801 			status = lpfc_do_offline(phba, LPFC_EVT_KILL);
1802 	else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
1803 		status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
1804 	else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
1805 		status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
1806 	else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
1807 		status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
1808 	else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1)
1809 		 == 0)
1810 		status = lpfc_reset_pci_bus(phba);
1811 	else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0)
1812 		status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk"));
1813 	else
1814 		status = -EINVAL;
1815 
1816 board_mode_out:
1817 	if (!status)
1818 		return strlen(buf);
1819 	else {
1820 		board_mode_str = strchr(buf, '\n');
1821 		if (board_mode_str)
1822 			*board_mode_str = '\0';
1823 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1824 				 "3097 Failed \"%s\", status(%d), "
1825 				 "fc_flag(x%x)\n",
1826 				 buf, status, phba->pport->fc_flag);
1827 		return status;
1828 	}
1829 }
1830 
1831 /**
1832  * lpfc_get_hba_info - Return various bits of informaton about the adapter
1833  * @phba: pointer to the adapter structure.
1834  * @mxri: max xri count.
1835  * @axri: available xri count.
1836  * @mrpi: max rpi count.
1837  * @arpi: available rpi count.
1838  * @mvpi: max vpi count.
1839  * @avpi: available vpi count.
1840  *
1841  * Description:
1842  * If an integer pointer for an count is not null then the value for the
1843  * count is returned.
1844  *
1845  * Returns:
1846  * zero on error
1847  * one for success
1848  **/
1849 static int
lpfc_get_hba_info(struct lpfc_hba * phba,uint32_t * mxri,uint32_t * axri,uint32_t * mrpi,uint32_t * arpi,uint32_t * mvpi,uint32_t * avpi)1850 lpfc_get_hba_info(struct lpfc_hba *phba,
1851 		  uint32_t *mxri, uint32_t *axri,
1852 		  uint32_t *mrpi, uint32_t *arpi,
1853 		  uint32_t *mvpi, uint32_t *avpi)
1854 {
1855 	struct lpfc_mbx_read_config *rd_config;
1856 	LPFC_MBOXQ_t *pmboxq;
1857 	MAILBOX_t *pmb;
1858 	int rc = 0;
1859 	uint32_t max_vpi;
1860 
1861 	/*
1862 	 * prevent udev from issuing mailbox commands until the port is
1863 	 * configured.
1864 	 */
1865 	if (phba->link_state < LPFC_LINK_DOWN ||
1866 	    !phba->mbox_mem_pool ||
1867 	    (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
1868 		return 0;
1869 
1870 	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
1871 		return 0;
1872 
1873 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1874 	if (!pmboxq)
1875 		return 0;
1876 	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1877 
1878 	pmb = &pmboxq->u.mb;
1879 	pmb->mbxCommand = MBX_READ_CONFIG;
1880 	pmb->mbxOwner = OWN_HOST;
1881 	pmboxq->ctx_buf = NULL;
1882 
1883 	if (phba->pport->fc_flag & FC_OFFLINE_MODE)
1884 		rc = MBX_NOT_FINISHED;
1885 	else
1886 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1887 
1888 	if (rc != MBX_SUCCESS) {
1889 		if (rc != MBX_TIMEOUT)
1890 			mempool_free(pmboxq, phba->mbox_mem_pool);
1891 		return 0;
1892 	}
1893 
1894 	if (phba->sli_rev == LPFC_SLI_REV4) {
1895 		rd_config = &pmboxq->u.mqe.un.rd_config;
1896 		if (mrpi)
1897 			*mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
1898 		if (arpi)
1899 			*arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
1900 					phba->sli4_hba.max_cfg_param.rpi_used;
1901 		if (mxri)
1902 			*mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
1903 		if (axri)
1904 			*axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
1905 					phba->sli4_hba.max_cfg_param.xri_used;
1906 
1907 		/* Account for differences with SLI-3.  Get vpi count from
1908 		 * mailbox data and subtract one for max vpi value.
1909 		 */
1910 		max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
1911 			(bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
1912 
1913 		/* Limit the max we support */
1914 		if (max_vpi > LPFC_MAX_VPI)
1915 			max_vpi = LPFC_MAX_VPI;
1916 		if (mvpi)
1917 			*mvpi = max_vpi;
1918 		if (avpi)
1919 			*avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
1920 	} else {
1921 		if (mrpi)
1922 			*mrpi = pmb->un.varRdConfig.max_rpi;
1923 		if (arpi)
1924 			*arpi = pmb->un.varRdConfig.avail_rpi;
1925 		if (mxri)
1926 			*mxri = pmb->un.varRdConfig.max_xri;
1927 		if (axri)
1928 			*axri = pmb->un.varRdConfig.avail_xri;
1929 		if (mvpi)
1930 			*mvpi = pmb->un.varRdConfig.max_vpi;
1931 		if (avpi) {
1932 			/* avail_vpi is only valid if link is up and ready */
1933 			if (phba->link_state == LPFC_HBA_READY)
1934 				*avpi = pmb->un.varRdConfig.avail_vpi;
1935 			else
1936 				*avpi = pmb->un.varRdConfig.max_vpi;
1937 		}
1938 	}
1939 
1940 	mempool_free(pmboxq, phba->mbox_mem_pool);
1941 	return 1;
1942 }
1943 
1944 /**
1945  * lpfc_max_rpi_show - Return maximum rpi
1946  * @dev: class device that is converted into a Scsi_host.
1947  * @attr: device attribute, not used.
1948  * @buf: on return contains the maximum rpi count in decimal or "Unknown".
1949  *
1950  * Description:
1951  * Calls lpfc_get_hba_info() asking for just the mrpi count.
1952  * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
1953  * to "Unknown" and the buffer length is returned, therefore the caller
1954  * must check for "Unknown" in the buffer to detect a failure.
1955  *
1956  * Returns: size of formatted string.
1957  **/
1958 static ssize_t
lpfc_max_rpi_show(struct device * dev,struct device_attribute * attr,char * buf)1959 lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
1960 		  char *buf)
1961 {
1962 	struct Scsi_Host  *shost = class_to_shost(dev);
1963 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1964 	struct lpfc_hba   *phba = vport->phba;
1965 	uint32_t cnt;
1966 
1967 	if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
1968 		return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
1969 	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
1970 }
1971 
1972 /**
1973  * lpfc_used_rpi_show - Return maximum rpi minus available rpi
1974  * @dev: class device that is converted into a Scsi_host.
1975  * @attr: device attribute, not used.
1976  * @buf: containing the used rpi count in decimal or "Unknown".
1977  *
1978  * Description:
1979  * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts.
1980  * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
1981  * to "Unknown" and the buffer length is returned, therefore the caller
1982  * must check for "Unknown" in the buffer to detect a failure.
1983  *
1984  * Returns: size of formatted string.
1985  **/
1986 static ssize_t
lpfc_used_rpi_show(struct device * dev,struct device_attribute * attr,char * buf)1987 lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
1988 		   char *buf)
1989 {
1990 	struct Scsi_Host  *shost = class_to_shost(dev);
1991 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1992 	struct lpfc_hba   *phba = vport->phba;
1993 	uint32_t cnt, acnt;
1994 
1995 	if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
1996 		return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
1997 	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
1998 }
1999 
2000 /**
2001  * lpfc_max_xri_show - Return maximum xri
2002  * @dev: class device that is converted into a Scsi_host.
2003  * @attr: device attribute, not used.
2004  * @buf: on return contains the maximum xri count in decimal or "Unknown".
2005  *
2006  * Description:
2007  * Calls lpfc_get_hba_info() asking for just the mrpi count.
2008  * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2009  * to "Unknown" and the buffer length is returned, therefore the caller
2010  * must check for "Unknown" in the buffer to detect a failure.
2011  *
2012  * Returns: size of formatted string.
2013  **/
2014 static ssize_t
lpfc_max_xri_show(struct device * dev,struct device_attribute * attr,char * buf)2015 lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
2016 		  char *buf)
2017 {
2018 	struct Scsi_Host  *shost = class_to_shost(dev);
2019 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2020 	struct lpfc_hba   *phba = vport->phba;
2021 	uint32_t cnt;
2022 
2023 	if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
2024 		return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2025 	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2026 }
2027 
2028 /**
2029  * lpfc_used_xri_show - Return maximum xpi minus the available xpi
2030  * @dev: class device that is converted into a Scsi_host.
2031  * @attr: device attribute, not used.
2032  * @buf: on return contains the used xri count in decimal or "Unknown".
2033  *
2034  * Description:
2035  * Calls lpfc_get_hba_info() asking for just the mxri and axri counts.
2036  * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2037  * to "Unknown" and the buffer length is returned, therefore the caller
2038  * must check for "Unknown" in the buffer to detect a failure.
2039  *
2040  * Returns: size of formatted string.
2041  **/
2042 static ssize_t
lpfc_used_xri_show(struct device * dev,struct device_attribute * attr,char * buf)2043 lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
2044 		   char *buf)
2045 {
2046 	struct Scsi_Host  *shost = class_to_shost(dev);
2047 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2048 	struct lpfc_hba   *phba = vport->phba;
2049 	uint32_t cnt, acnt;
2050 
2051 	if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
2052 		return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2053 	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2054 }
2055 
2056 /**
2057  * lpfc_max_vpi_show - Return maximum vpi
2058  * @dev: class device that is converted into a Scsi_host.
2059  * @attr: device attribute, not used.
2060  * @buf: on return contains the maximum vpi count in decimal or "Unknown".
2061  *
2062  * Description:
2063  * Calls lpfc_get_hba_info() asking for just the mvpi count.
2064  * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2065  * to "Unknown" and the buffer length is returned, therefore the caller
2066  * must check for "Unknown" in the buffer to detect a failure.
2067  *
2068  * Returns: size of formatted string.
2069  **/
2070 static ssize_t
lpfc_max_vpi_show(struct device * dev,struct device_attribute * attr,char * buf)2071 lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
2072 		  char *buf)
2073 {
2074 	struct Scsi_Host  *shost = class_to_shost(dev);
2075 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2076 	struct lpfc_hba   *phba = vport->phba;
2077 	uint32_t cnt;
2078 
2079 	if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
2080 		return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2081 	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2082 }
2083 
2084 /**
2085  * lpfc_used_vpi_show - Return maximum vpi minus the available vpi
2086  * @dev: class device that is converted into a Scsi_host.
2087  * @attr: device attribute, not used.
2088  * @buf: on return contains the used vpi count in decimal or "Unknown".
2089  *
2090  * Description:
2091  * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts.
2092  * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2093  * to "Unknown" and the buffer length is returned, therefore the caller
2094  * must check for "Unknown" in the buffer to detect a failure.
2095  *
2096  * Returns: size of formatted string.
2097  **/
2098 static ssize_t
lpfc_used_vpi_show(struct device * dev,struct device_attribute * attr,char * buf)2099 lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
2100 		   char *buf)
2101 {
2102 	struct Scsi_Host  *shost = class_to_shost(dev);
2103 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2104 	struct lpfc_hba   *phba = vport->phba;
2105 	uint32_t cnt, acnt;
2106 
2107 	if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
2108 		return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2109 	return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2110 }
2111 
2112 /**
2113  * lpfc_npiv_info_show - Return text about NPIV support for the adapter
2114  * @dev: class device that is converted into a Scsi_host.
2115  * @attr: device attribute, not used.
2116  * @buf: text that must be interpreted to determine if npiv is supported.
2117  *
2118  * Description:
2119  * Buffer will contain text indicating npiv is not suppoerted on the port,
2120  * the port is an NPIV physical port, or it is an npiv virtual port with
2121  * the id of the vport.
2122  *
2123  * Returns: size of formatted string.
2124  **/
2125 static ssize_t
lpfc_npiv_info_show(struct device * dev,struct device_attribute * attr,char * buf)2126 lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
2127 		    char *buf)
2128 {
2129 	struct Scsi_Host  *shost = class_to_shost(dev);
2130 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2131 	struct lpfc_hba   *phba = vport->phba;
2132 
2133 	if (!(phba->max_vpi))
2134 		return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
2135 	if (vport->port_type == LPFC_PHYSICAL_PORT)
2136 		return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n");
2137 	return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
2138 }
2139 
2140 /**
2141  * lpfc_poll_show - Return text about poll support for the adapter
2142  * @dev: class device that is converted into a Scsi_host.
2143  * @attr: device attribute, not used.
2144  * @buf: on return contains the cfg_poll in hex.
2145  *
2146  * Notes:
2147  * cfg_poll should be a lpfc_polling_flags type.
2148  *
2149  * Returns: size of formatted string.
2150  **/
2151 static ssize_t
lpfc_poll_show(struct device * dev,struct device_attribute * attr,char * buf)2152 lpfc_poll_show(struct device *dev, struct device_attribute *attr,
2153 	       char *buf)
2154 {
2155 	struct Scsi_Host  *shost = class_to_shost(dev);
2156 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2157 	struct lpfc_hba   *phba = vport->phba;
2158 
2159 	return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
2160 }
2161 
2162 /**
2163  * lpfc_poll_store - Set the value of cfg_poll for the adapter
2164  * @dev: class device that is converted into a Scsi_host.
2165  * @attr: device attribute, not used.
2166  * @buf: one or more lpfc_polling_flags values.
2167  * @count: not used.
2168  *
2169  * Notes:
2170  * buf contents converted to integer and checked for a valid value.
2171  *
2172  * Returns:
2173  * -EINVAL if the buffer connot be converted or is out of range
2174  * length of the buf on success
2175  **/
2176 static ssize_t
lpfc_poll_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2177 lpfc_poll_store(struct device *dev, struct device_attribute *attr,
2178 		const char *buf, size_t count)
2179 {
2180 	struct Scsi_Host  *shost = class_to_shost(dev);
2181 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2182 	struct lpfc_hba   *phba = vport->phba;
2183 	uint32_t creg_val;
2184 	uint32_t old_val;
2185 	int val=0;
2186 
2187 	if (!isdigit(buf[0]))
2188 		return -EINVAL;
2189 
2190 	if (sscanf(buf, "%i", &val) != 1)
2191 		return -EINVAL;
2192 
2193 	if ((val & 0x3) != val)
2194 		return -EINVAL;
2195 
2196 	if (phba->sli_rev == LPFC_SLI_REV4)
2197 		val = 0;
2198 
2199 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2200 		"3051 lpfc_poll changed from %d to %d\n",
2201 		phba->cfg_poll, val);
2202 
2203 	spin_lock_irq(&phba->hbalock);
2204 
2205 	old_val = phba->cfg_poll;
2206 
2207 	if (val & ENABLE_FCP_RING_POLLING) {
2208 		if ((val & DISABLE_FCP_RING_INT) &&
2209 		    !(old_val & DISABLE_FCP_RING_INT)) {
2210 			if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2211 				spin_unlock_irq(&phba->hbalock);
2212 				return -EINVAL;
2213 			}
2214 			creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
2215 			writel(creg_val, phba->HCregaddr);
2216 			readl(phba->HCregaddr); /* flush */
2217 
2218 			lpfc_poll_start_timer(phba);
2219 		}
2220 	} else if (val != 0x0) {
2221 		spin_unlock_irq(&phba->hbalock);
2222 		return -EINVAL;
2223 	}
2224 
2225 	if (!(val & DISABLE_FCP_RING_INT) &&
2226 	    (old_val & DISABLE_FCP_RING_INT))
2227 	{
2228 		spin_unlock_irq(&phba->hbalock);
2229 		del_timer(&phba->fcp_poll_timer);
2230 		spin_lock_irq(&phba->hbalock);
2231 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2232 			spin_unlock_irq(&phba->hbalock);
2233 			return -EINVAL;
2234 		}
2235 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
2236 		writel(creg_val, phba->HCregaddr);
2237 		readl(phba->HCregaddr); /* flush */
2238 	}
2239 
2240 	phba->cfg_poll = val;
2241 
2242 	spin_unlock_irq(&phba->hbalock);
2243 
2244 	return strlen(buf);
2245 }
2246 
2247 /**
2248  * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
2249  * @dev: class converted to a Scsi_host structure.
2250  * @attr: device attribute, not used.
2251  * @buf: on return contains the formatted support level.
2252  *
2253  * Description:
2254  * Returns the maximum number of virtual functions a physical function can
2255  * support, 0 will be returned if called on virtual function.
2256  *
2257  * Returns: size of formatted string.
2258  **/
2259 static ssize_t
lpfc_sriov_hw_max_virtfn_show(struct device * dev,struct device_attribute * attr,char * buf)2260 lpfc_sriov_hw_max_virtfn_show(struct device *dev,
2261 			      struct device_attribute *attr,
2262 			      char *buf)
2263 {
2264 	struct Scsi_Host *shost = class_to_shost(dev);
2265 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2266 	struct lpfc_hba *phba = vport->phba;
2267 	uint16_t max_nr_virtfn;
2268 
2269 	max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
2270 	return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
2271 }
2272 
lpfc_rangecheck(uint val,uint min,uint max)2273 static inline bool lpfc_rangecheck(uint val, uint min, uint max)
2274 {
2275 	return val >= min && val <= max;
2276 }
2277 
2278 /**
2279  * lpfc_enable_bbcr_set: Sets an attribute value.
2280  * @phba: pointer the the adapter structure.
2281  * @val: integer attribute value.
2282  *
2283  * Description:
2284  * Validates the min and max values then sets the
2285  * adapter config field if in the valid range. prints error message
2286  * and does not set the parameter if invalid.
2287  *
2288  * Returns:
2289  * zero on success
2290  * -EINVAL if val is invalid
2291  */
2292 static ssize_t
lpfc_enable_bbcr_set(struct lpfc_hba * phba,uint val)2293 lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
2294 {
2295 	if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) {
2296 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2297 				"3068 %s_enable_bbcr changed from %d to %d\n",
2298 				LPFC_DRIVER_NAME, phba->cfg_enable_bbcr, val);
2299 		phba->cfg_enable_bbcr = val;
2300 		return 0;
2301 	}
2302 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2303 			"0451 %s_enable_bbcr cannot set to %d, range is 0, 1\n",
2304 			LPFC_DRIVER_NAME, val);
2305 	return -EINVAL;
2306 }
2307 
2308 /**
2309  * lpfc_param_show - Return a cfg attribute value in decimal
2310  *
2311  * Description:
2312  * Macro that given an attr e.g. hba_queue_depth expands
2313  * into a function with the name lpfc_hba_queue_depth_show.
2314  *
2315  * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field.
2316  * @dev: class device that is converted into a Scsi_host.
2317  * @attr: device attribute, not used.
2318  * @buf: on return contains the attribute value in decimal.
2319  *
2320  * Returns: size of formatted string.
2321  **/
2322 #define lpfc_param_show(attr)	\
2323 static ssize_t \
2324 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2325 		   char *buf) \
2326 { \
2327 	struct Scsi_Host  *shost = class_to_shost(dev);\
2328 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2329 	struct lpfc_hba   *phba = vport->phba;\
2330 	return scnprintf(buf, PAGE_SIZE, "%d\n",\
2331 			phba->cfg_##attr);\
2332 }
2333 
2334 /**
2335  * lpfc_param_hex_show - Return a cfg attribute value in hex
2336  *
2337  * Description:
2338  * Macro that given an attr e.g. hba_queue_depth expands
2339  * into a function with the name lpfc_hba_queue_depth_show
2340  *
2341  * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field.
2342  * @dev: class device that is converted into a Scsi_host.
2343  * @attr: device attribute, not used.
2344  * @buf: on return contains the attribute value in hexadecimal.
2345  *
2346  * Returns: size of formatted string.
2347  **/
2348 #define lpfc_param_hex_show(attr)	\
2349 static ssize_t \
2350 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2351 		   char *buf) \
2352 { \
2353 	struct Scsi_Host  *shost = class_to_shost(dev);\
2354 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2355 	struct lpfc_hba   *phba = vport->phba;\
2356 	uint val = 0;\
2357 	val = phba->cfg_##attr;\
2358 	return scnprintf(buf, PAGE_SIZE, "%#x\n",\
2359 			phba->cfg_##attr);\
2360 }
2361 
2362 /**
2363  * lpfc_param_init - Initializes a cfg attribute
2364  *
2365  * Description:
2366  * Macro that given an attr e.g. hba_queue_depth expands
2367  * into a function with the name lpfc_hba_queue_depth_init. The macro also
2368  * takes a default argument, a minimum and maximum argument.
2369  *
2370  * lpfc_##attr##_init: Initializes an attribute.
2371  * @phba: pointer the the adapter structure.
2372  * @val: integer attribute value.
2373  *
2374  * Validates the min and max values then sets the adapter config field
2375  * accordingly, or uses the default if out of range and prints an error message.
2376  *
2377  * Returns:
2378  * zero on success
2379  * -EINVAL if default used
2380  **/
2381 #define lpfc_param_init(attr, default, minval, maxval)	\
2382 static int \
2383 lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
2384 { \
2385 	if (lpfc_rangecheck(val, minval, maxval)) {\
2386 		phba->cfg_##attr = val;\
2387 		return 0;\
2388 	}\
2389 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2390 			"0449 lpfc_"#attr" attribute cannot be set to %d, "\
2391 			"allowed range is ["#minval", "#maxval"]\n", val); \
2392 	phba->cfg_##attr = default;\
2393 	return -EINVAL;\
2394 }
2395 
2396 /**
2397  * lpfc_param_set - Set a cfg attribute value
2398  *
2399  * Description:
2400  * Macro that given an attr e.g. hba_queue_depth expands
2401  * into a function with the name lpfc_hba_queue_depth_set
2402  *
2403  * lpfc_##attr##_set: Sets an attribute value.
2404  * @phba: pointer the the adapter structure.
2405  * @val: integer attribute value.
2406  *
2407  * Description:
2408  * Validates the min and max values then sets the
2409  * adapter config field if in the valid range. prints error message
2410  * and does not set the parameter if invalid.
2411  *
2412  * Returns:
2413  * zero on success
2414  * -EINVAL if val is invalid
2415  **/
2416 #define lpfc_param_set(attr, default, minval, maxval)	\
2417 static int \
2418 lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
2419 { \
2420 	if (lpfc_rangecheck(val, minval, maxval)) {\
2421 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2422 			"3052 lpfc_" #attr " changed from %d to %d\n", \
2423 			phba->cfg_##attr, val); \
2424 		phba->cfg_##attr = val;\
2425 		return 0;\
2426 	}\
2427 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2428 			"0450 lpfc_"#attr" attribute cannot be set to %d, "\
2429 			"allowed range is ["#minval", "#maxval"]\n", val); \
2430 	return -EINVAL;\
2431 }
2432 
2433 /**
2434  * lpfc_param_store - Set a vport attribute value
2435  *
2436  * Description:
2437  * Macro that given an attr e.g. hba_queue_depth expands
2438  * into a function with the name lpfc_hba_queue_depth_store.
2439  *
2440  * lpfc_##attr##_store: Set an sttribute value.
2441  * @dev: class device that is converted into a Scsi_host.
2442  * @attr: device attribute, not used.
2443  * @buf: contains the attribute value in ascii.
2444  * @count: not used.
2445  *
2446  * Description:
2447  * Convert the ascii text number to an integer, then
2448  * use the lpfc_##attr##_set function to set the value.
2449  *
2450  * Returns:
2451  * -EINVAL if val is invalid or lpfc_##attr##_set() fails
2452  * length of buffer upon success.
2453  **/
2454 #define lpfc_param_store(attr)	\
2455 static ssize_t \
2456 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2457 		    const char *buf, size_t count) \
2458 { \
2459 	struct Scsi_Host  *shost = class_to_shost(dev);\
2460 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2461 	struct lpfc_hba   *phba = vport->phba;\
2462 	uint val = 0;\
2463 	if (!isdigit(buf[0]))\
2464 		return -EINVAL;\
2465 	if (sscanf(buf, "%i", &val) != 1)\
2466 		return -EINVAL;\
2467 	if (lpfc_##attr##_set(phba, val) == 0) \
2468 		return strlen(buf);\
2469 	else \
2470 		return -EINVAL;\
2471 }
2472 
2473 /**
2474  * lpfc_vport_param_show - Return decimal formatted cfg attribute value
2475  *
2476  * Description:
2477  * Macro that given an attr e.g. hba_queue_depth expands
2478  * into a function with the name lpfc_hba_queue_depth_show
2479  *
2480  * lpfc_##attr##_show: prints the attribute value in decimal.
2481  * @dev: class device that is converted into a Scsi_host.
2482  * @attr: device attribute, not used.
2483  * @buf: on return contains the attribute value in decimal.
2484  *
2485  * Returns: length of formatted string.
2486  **/
2487 #define lpfc_vport_param_show(attr)	\
2488 static ssize_t \
2489 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2490 		   char *buf) \
2491 { \
2492 	struct Scsi_Host  *shost = class_to_shost(dev);\
2493 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2494 	return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
2495 }
2496 
2497 /**
2498  * lpfc_vport_param_hex_show - Return hex formatted attribute value
2499  *
2500  * Description:
2501  * Macro that given an attr e.g.
2502  * hba_queue_depth expands into a function with the name
2503  * lpfc_hba_queue_depth_show
2504  *
2505  * lpfc_##attr##_show: prints the attribute value in hexadecimal.
2506  * @dev: class device that is converted into a Scsi_host.
2507  * @attr: device attribute, not used.
2508  * @buf: on return contains the attribute value in hexadecimal.
2509  *
2510  * Returns: length of formatted string.
2511  **/
2512 #define lpfc_vport_param_hex_show(attr)	\
2513 static ssize_t \
2514 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2515 		   char *buf) \
2516 { \
2517 	struct Scsi_Host  *shost = class_to_shost(dev);\
2518 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2519 	return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
2520 }
2521 
2522 /**
2523  * lpfc_vport_param_init - Initialize a vport cfg attribute
2524  *
2525  * Description:
2526  * Macro that given an attr e.g. hba_queue_depth expands
2527  * into a function with the name lpfc_hba_queue_depth_init. The macro also
2528  * takes a default argument, a minimum and maximum argument.
2529  *
2530  * lpfc_##attr##_init: validates the min and max values then sets the
2531  * adapter config field accordingly, or uses the default if out of range
2532  * and prints an error message.
2533  * @phba: pointer the the adapter structure.
2534  * @val: integer attribute value.
2535  *
2536  * Returns:
2537  * zero on success
2538  * -EINVAL if default used
2539  **/
2540 #define lpfc_vport_param_init(attr, default, minval, maxval)	\
2541 static int \
2542 lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
2543 { \
2544 	if (lpfc_rangecheck(val, minval, maxval)) {\
2545 		vport->cfg_##attr = val;\
2546 		return 0;\
2547 	}\
2548 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2549 			 "0423 lpfc_"#attr" attribute cannot be set to %d, "\
2550 			 "allowed range is ["#minval", "#maxval"]\n", val); \
2551 	vport->cfg_##attr = default;\
2552 	return -EINVAL;\
2553 }
2554 
2555 /**
2556  * lpfc_vport_param_set - Set a vport cfg attribute
2557  *
2558  * Description:
2559  * Macro that given an attr e.g. hba_queue_depth expands
2560  * into a function with the name lpfc_hba_queue_depth_set
2561  *
2562  * lpfc_##attr##_set: validates the min and max values then sets the
2563  * adapter config field if in the valid range. prints error message
2564  * and does not set the parameter if invalid.
2565  * @phba: pointer the the adapter structure.
2566  * @val:	integer attribute value.
2567  *
2568  * Returns:
2569  * zero on success
2570  * -EINVAL if val is invalid
2571  **/
2572 #define lpfc_vport_param_set(attr, default, minval, maxval)	\
2573 static int \
2574 lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
2575 { \
2576 	if (lpfc_rangecheck(val, minval, maxval)) {\
2577 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2578 			"3053 lpfc_" #attr \
2579 			" changed from %d (x%x) to %d (x%x)\n", \
2580 			vport->cfg_##attr, vport->cfg_##attr, \
2581 			val, val); \
2582 		vport->cfg_##attr = val;\
2583 		return 0;\
2584 	}\
2585 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2586 			 "0424 lpfc_"#attr" attribute cannot be set to %d, "\
2587 			 "allowed range is ["#minval", "#maxval"]\n", val); \
2588 	return -EINVAL;\
2589 }
2590 
2591 /**
2592  * lpfc_vport_param_store - Set a vport attribute
2593  *
2594  * Description:
2595  * Macro that given an attr e.g. hba_queue_depth
2596  * expands into a function with the name lpfc_hba_queue_depth_store
2597  *
2598  * lpfc_##attr##_store: convert the ascii text number to an integer, then
2599  * use the lpfc_##attr##_set function to set the value.
2600  * @cdev: class device that is converted into a Scsi_host.
2601  * @buf:	contains the attribute value in decimal.
2602  * @count: not used.
2603  *
2604  * Returns:
2605  * -EINVAL if val is invalid or lpfc_##attr##_set() fails
2606  * length of buffer upon success.
2607  **/
2608 #define lpfc_vport_param_store(attr)	\
2609 static ssize_t \
2610 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2611 		    const char *buf, size_t count) \
2612 { \
2613 	struct Scsi_Host  *shost = class_to_shost(dev);\
2614 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2615 	uint val = 0;\
2616 	if (!isdigit(buf[0]))\
2617 		return -EINVAL;\
2618 	if (sscanf(buf, "%i", &val) != 1)\
2619 		return -EINVAL;\
2620 	if (lpfc_##attr##_set(vport, val) == 0) \
2621 		return strlen(buf);\
2622 	else \
2623 		return -EINVAL;\
2624 }
2625 
2626 
2627 static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL);
2628 static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL);
2629 static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
2630 static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
2631 static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
2632 static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
2633 static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
2634 static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
2635 static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
2636 static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
2637 static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
2638 static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
2639 static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
2640 static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
2641 static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
2642 		lpfc_link_state_store);
2643 static DEVICE_ATTR(option_rom_version, S_IRUGO,
2644 		   lpfc_option_rom_version_show, NULL);
2645 static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
2646 		   lpfc_num_discovered_ports_show, NULL);
2647 static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
2648 static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
2649 static DEVICE_ATTR_RO(lpfc_drvr_version);
2650 static DEVICE_ATTR_RO(lpfc_enable_fip);
2651 static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
2652 		   lpfc_board_mode_show, lpfc_board_mode_store);
2653 static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
2654 static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
2655 static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
2656 static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
2657 static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
2658 static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
2659 static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
2660 static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
2661 static DEVICE_ATTR_RO(lpfc_temp_sensor);
2662 static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
2663 static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
2664 static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
2665 		   NULL);
2666 
2667 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
2668 #define WWN_SZ 8
2669 /**
2670  * lpfc_wwn_set - Convert string to the 8 byte WWN value.
2671  * @buf: WWN string.
2672  * @cnt: Length of string.
2673  * @wwn: Array to receive converted wwn value.
2674  *
2675  * Returns:
2676  * -EINVAL if the buffer does not contain a valid wwn
2677  * 0 success
2678  **/
2679 static size_t
lpfc_wwn_set(const char * buf,size_t cnt,char wwn[])2680 lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
2681 {
2682 	unsigned int i, j;
2683 
2684 	/* Count may include a LF at end of string */
2685 	if (buf[cnt-1] == '\n')
2686 		cnt--;
2687 
2688 	if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
2689 	    ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
2690 		return -EINVAL;
2691 
2692 	memset(wwn, 0, WWN_SZ);
2693 
2694 	/* Validate and store the new name */
2695 	for (i = 0, j = 0; i < 16; i++) {
2696 		if ((*buf >= 'a') && (*buf <= 'f'))
2697 			j = ((j << 4) | ((*buf++ - 'a') + 10));
2698 		else if ((*buf >= 'A') && (*buf <= 'F'))
2699 			j = ((j << 4) | ((*buf++ - 'A') + 10));
2700 		else if ((*buf >= '0') && (*buf <= '9'))
2701 			j = ((j << 4) | (*buf++ - '0'));
2702 		else
2703 			return -EINVAL;
2704 		if (i % 2) {
2705 			wwn[i/2] = j & 0xff;
2706 			j = 0;
2707 		}
2708 	}
2709 	return 0;
2710 }
2711 /**
2712  * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
2713  * @dev: class device that is converted into a Scsi_host.
2714  * @attr: device attribute, not used.
2715  * @buf: containing the string lpfc_soft_wwn_key.
2716  * @count: must be size of lpfc_soft_wwn_key.
2717  *
2718  * Returns:
2719  * -EINVAL if the buffer does not contain lpfc_soft_wwn_key
2720  * length of buf indicates success
2721  **/
2722 static ssize_t
lpfc_soft_wwn_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2723 lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
2724 			   const char *buf, size_t count)
2725 {
2726 	struct Scsi_Host  *shost = class_to_shost(dev);
2727 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2728 	struct lpfc_hba   *phba = vport->phba;
2729 	unsigned int cnt = count;
2730 	uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
2731 	u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0];
2732 
2733 	/*
2734 	 * We're doing a simple sanity check for soft_wwpn setting.
2735 	 * We require that the user write a specific key to enable
2736 	 * the soft_wwpn attribute to be settable. Once the attribute
2737 	 * is written, the enable key resets. If further updates are
2738 	 * desired, the key must be written again to re-enable the
2739 	 * attribute.
2740 	 *
2741 	 * The "key" is not secret - it is a hardcoded string shown
2742 	 * here. The intent is to protect against the random user or
2743 	 * application that is just writing attributes.
2744 	 */
2745 	if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) {
2746 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2747 				 "0051 "LPFC_DRIVER_NAME" soft wwpn can not"
2748 				 " be enabled: fawwpn is enabled\n");
2749 		return -EINVAL;
2750 	}
2751 
2752 	/* count may include a LF at end of string */
2753 	if (buf[cnt-1] == '\n')
2754 		cnt--;
2755 
2756 	if ((cnt != strlen(lpfc_soft_wwn_key)) ||
2757 	    (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
2758 		return -EINVAL;
2759 
2760 	phba->soft_wwn_enable = 1;
2761 
2762 	dev_printk(KERN_WARNING, &phba->pcidev->dev,
2763 		   "lpfc%d: soft_wwpn assignment has been enabled.\n",
2764 		   phba->brd_no);
2765 	dev_printk(KERN_WARNING, &phba->pcidev->dev,
2766 		   "  The soft_wwpn feature is not supported by Broadcom.");
2767 
2768 	return count;
2769 }
2770 static DEVICE_ATTR_WO(lpfc_soft_wwn_enable);
2771 
2772 /**
2773  * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
2774  * @dev: class device that is converted into a Scsi_host.
2775  * @attr: device attribute, not used.
2776  * @buf: on return contains the wwpn in hexadecimal.
2777  *
2778  * Returns: size of formatted string.
2779  **/
2780 static ssize_t
lpfc_soft_wwpn_show(struct device * dev,struct device_attribute * attr,char * buf)2781 lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
2782 		    char *buf)
2783 {
2784 	struct Scsi_Host  *shost = class_to_shost(dev);
2785 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2786 	struct lpfc_hba   *phba = vport->phba;
2787 
2788 	return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2789 			(unsigned long long)phba->cfg_soft_wwpn);
2790 }
2791 
2792 /**
2793  * lpfc_soft_wwpn_store - Set the ww port name of the adapter
2794  * @dev class device that is converted into a Scsi_host.
2795  * @attr: device attribute, not used.
2796  * @buf: contains the wwpn in hexadecimal.
2797  * @count: number of wwpn bytes in buf
2798  *
2799  * Returns:
2800  * -EACCES hba reset not enabled, adapter over temp
2801  * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
2802  * -EIO error taking adapter offline or online
2803  * value of count on success
2804  **/
2805 static ssize_t
lpfc_soft_wwpn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2806 lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
2807 		     const char *buf, size_t count)
2808 {
2809 	struct Scsi_Host  *shost = class_to_shost(dev);
2810 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2811 	struct lpfc_hba   *phba = vport->phba;
2812 	struct completion online_compl;
2813 	int stat1 = 0, stat2 = 0;
2814 	unsigned int cnt = count;
2815 	u8 wwpn[WWN_SZ];
2816 	int rc;
2817 
2818 	if (!phba->cfg_enable_hba_reset)
2819 		return -EACCES;
2820 	spin_lock_irq(&phba->hbalock);
2821 	if (phba->over_temp_state == HBA_OVER_TEMP) {
2822 		spin_unlock_irq(&phba->hbalock);
2823 		return -EACCES;
2824 	}
2825 	spin_unlock_irq(&phba->hbalock);
2826 	/* count may include a LF at end of string */
2827 	if (buf[cnt-1] == '\n')
2828 		cnt--;
2829 
2830 	if (!phba->soft_wwn_enable)
2831 		return -EINVAL;
2832 
2833 	/* lock setting wwpn, wwnn down */
2834 	phba->soft_wwn_enable = 0;
2835 
2836 	rc = lpfc_wwn_set(buf, cnt, wwpn);
2837 	if (rc) {
2838 		/* not able to set wwpn, unlock it */
2839 		phba->soft_wwn_enable = 1;
2840 		return rc;
2841 	}
2842 
2843 	phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
2844 	fc_host_port_name(shost) = phba->cfg_soft_wwpn;
2845 	if (phba->cfg_soft_wwnn)
2846 		fc_host_node_name(shost) = phba->cfg_soft_wwnn;
2847 
2848 	dev_printk(KERN_NOTICE, &phba->pcidev->dev,
2849 		   "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
2850 
2851 	stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
2852 	if (stat1)
2853 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2854 				"0463 lpfc_soft_wwpn attribute set failed to "
2855 				"reinit adapter - %d\n", stat1);
2856 	init_completion(&online_compl);
2857 	rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
2858 				   LPFC_EVT_ONLINE);
2859 	if (rc == 0)
2860 		return -ENOMEM;
2861 
2862 	wait_for_completion(&online_compl);
2863 	if (stat2)
2864 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2865 				"0464 lpfc_soft_wwpn attribute set failed to "
2866 				"reinit adapter - %d\n", stat2);
2867 	return (stat1 || stat2) ? -EIO : count;
2868 }
2869 static DEVICE_ATTR_RW(lpfc_soft_wwpn);
2870 
2871 /**
2872  * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter
2873  * @dev: class device that is converted into a Scsi_host.
2874  * @attr: device attribute, not used.
2875  * @buf: on return contains the wwnn in hexadecimal.
2876  *
2877  * Returns: size of formatted string.
2878  **/
2879 static ssize_t
lpfc_soft_wwnn_show(struct device * dev,struct device_attribute * attr,char * buf)2880 lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
2881 		    char *buf)
2882 {
2883 	struct Scsi_Host *shost = class_to_shost(dev);
2884 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2885 	return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2886 			(unsigned long long)phba->cfg_soft_wwnn);
2887 }
2888 
2889 /**
2890  * lpfc_soft_wwnn_store - sets the ww node name of the adapter
2891  * @cdev: class device that is converted into a Scsi_host.
2892  * @buf: contains the ww node name in hexadecimal.
2893  * @count: number of wwnn bytes in buf.
2894  *
2895  * Returns:
2896  * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
2897  * value of count on success
2898  **/
2899 static ssize_t
lpfc_soft_wwnn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2900 lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
2901 		     const char *buf, size_t count)
2902 {
2903 	struct Scsi_Host *shost = class_to_shost(dev);
2904 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2905 	unsigned int cnt = count;
2906 	u8 wwnn[WWN_SZ];
2907 	int rc;
2908 
2909 	/* count may include a LF at end of string */
2910 	if (buf[cnt-1] == '\n')
2911 		cnt--;
2912 
2913 	if (!phba->soft_wwn_enable)
2914 		return -EINVAL;
2915 
2916 	rc = lpfc_wwn_set(buf, cnt, wwnn);
2917 	if (rc) {
2918 		/* Allow wwnn to be set many times, as long as the enable
2919 		 * is set. However, once the wwpn is set, everything locks.
2920 		 */
2921 		return rc;
2922 	}
2923 
2924 	phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
2925 
2926 	dev_printk(KERN_NOTICE, &phba->pcidev->dev,
2927 		   "lpfc%d: soft_wwnn set. Value will take effect upon "
2928 		   "setting of the soft_wwpn\n", phba->brd_no);
2929 
2930 	return count;
2931 }
2932 static DEVICE_ATTR_RW(lpfc_soft_wwnn);
2933 
2934 /**
2935  * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
2936  *		      Optimized Access Storage (OAS) operations.
2937  * @dev: class device that is converted into a Scsi_host.
2938  * @attr: device attribute, not used.
2939  * @buf: buffer for passing information.
2940  *
2941  * Returns:
2942  * value of count
2943  **/
2944 static ssize_t
lpfc_oas_tgt_show(struct device * dev,struct device_attribute * attr,char * buf)2945 lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
2946 		  char *buf)
2947 {
2948 	struct Scsi_Host *shost = class_to_shost(dev);
2949 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2950 
2951 	return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2952 			wwn_to_u64(phba->cfg_oas_tgt_wwpn));
2953 }
2954 
2955 /**
2956  * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for
2957  *		      Optimized Access Storage (OAS) operations.
2958  * @dev: class device that is converted into a Scsi_host.
2959  * @attr: device attribute, not used.
2960  * @buf: buffer for passing information.
2961  * @count: Size of the data buffer.
2962  *
2963  * Returns:
2964  * -EINVAL count is invalid, invalid wwpn byte invalid
2965  * -EPERM oas is not supported by hba
2966  * value of count on success
2967  **/
2968 static ssize_t
lpfc_oas_tgt_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2969 lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
2970 		   const char *buf, size_t count)
2971 {
2972 	struct Scsi_Host *shost = class_to_shost(dev);
2973 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2974 	unsigned int cnt = count;
2975 	uint8_t wwpn[WWN_SZ];
2976 	int rc;
2977 
2978 	if (!phba->cfg_fof)
2979 		return -EPERM;
2980 
2981 	/* count may include a LF at end of string */
2982 	if (buf[cnt-1] == '\n')
2983 		cnt--;
2984 
2985 	rc = lpfc_wwn_set(buf, cnt, wwpn);
2986 	if (rc)
2987 		return rc;
2988 
2989 	memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2990 	memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2991 	if (wwn_to_u64(wwpn) == 0)
2992 		phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
2993 	else
2994 		phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
2995 	phba->cfg_oas_flags &= ~OAS_LUN_VALID;
2996 	phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
2997 	return count;
2998 }
2999 static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
3000 		   lpfc_oas_tgt_show, lpfc_oas_tgt_store);
3001 
3002 /**
3003  * lpfc_oas_priority_show - Return wwpn of target whose luns maybe enabled for
3004  *		      Optimized Access Storage (OAS) operations.
3005  * @dev: class device that is converted into a Scsi_host.
3006  * @attr: device attribute, not used.
3007  * @buf: buffer for passing information.
3008  *
3009  * Returns:
3010  * value of count
3011  **/
3012 static ssize_t
lpfc_oas_priority_show(struct device * dev,struct device_attribute * attr,char * buf)3013 lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr,
3014 		       char *buf)
3015 {
3016 	struct Scsi_Host *shost = class_to_shost(dev);
3017 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3018 
3019 	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
3020 }
3021 
3022 /**
3023  * lpfc_oas_priority_store - Store wwpn of target whose luns maybe enabled for
3024  *		      Optimized Access Storage (OAS) operations.
3025  * @dev: class device that is converted into a Scsi_host.
3026  * @attr: device attribute, not used.
3027  * @buf: buffer for passing information.
3028  * @count: Size of the data buffer.
3029  *
3030  * Returns:
3031  * -EINVAL count is invalid, invalid wwpn byte invalid
3032  * -EPERM oas is not supported by hba
3033  * value of count on success
3034  **/
3035 static ssize_t
lpfc_oas_priority_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3036 lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr,
3037 			const char *buf, size_t count)
3038 {
3039 	struct Scsi_Host *shost = class_to_shost(dev);
3040 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3041 	unsigned int cnt = count;
3042 	unsigned long val;
3043 	int ret;
3044 
3045 	if (!phba->cfg_fof)
3046 		return -EPERM;
3047 
3048 	/* count may include a LF at end of string */
3049 	if (buf[cnt-1] == '\n')
3050 		cnt--;
3051 
3052 	ret = kstrtoul(buf, 0, &val);
3053 	if (ret || (val > 0x7f))
3054 		return -EINVAL;
3055 
3056 	if (val)
3057 		phba->cfg_oas_priority = (uint8_t)val;
3058 	else
3059 		phba->cfg_oas_priority = phba->cfg_XLanePriority;
3060 	return count;
3061 }
3062 static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR,
3063 		   lpfc_oas_priority_show, lpfc_oas_priority_store);
3064 
3065 /**
3066  * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
3067  *		      for Optimized Access Storage (OAS) operations.
3068  * @dev: class device that is converted into a Scsi_host.
3069  * @attr: device attribute, not used.
3070  * @buf: buffer for passing information.
3071  *
3072  * Returns:
3073  * value of count on success
3074  **/
3075 static ssize_t
lpfc_oas_vpt_show(struct device * dev,struct device_attribute * attr,char * buf)3076 lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
3077 		  char *buf)
3078 {
3079 	struct Scsi_Host *shost = class_to_shost(dev);
3080 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3081 
3082 	return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
3083 			wwn_to_u64(phba->cfg_oas_vpt_wwpn));
3084 }
3085 
3086 /**
3087  * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled
3088  *		      for Optimized Access Storage (OAS) operations.
3089  * @dev: class device that is converted into a Scsi_host.
3090  * @attr: device attribute, not used.
3091  * @buf: buffer for passing information.
3092  * @count: Size of the data buffer.
3093  *
3094  * Returns:
3095  * -EINVAL count is invalid, invalid wwpn byte invalid
3096  * -EPERM oas is not supported by hba
3097  * value of count on success
3098  **/
3099 static ssize_t
lpfc_oas_vpt_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3100 lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
3101 		   const char *buf, size_t count)
3102 {
3103 	struct Scsi_Host *shost = class_to_shost(dev);
3104 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3105 	unsigned int cnt = count;
3106 	uint8_t wwpn[WWN_SZ];
3107 	int rc;
3108 
3109 	if (!phba->cfg_fof)
3110 		return -EPERM;
3111 
3112 	/* count may include a LF at end of string */
3113 	if (buf[cnt-1] == '\n')
3114 		cnt--;
3115 
3116 	rc = lpfc_wwn_set(buf, cnt, wwpn);
3117 	if (rc)
3118 		return rc;
3119 
3120 	memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3121 	memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3122 	if (wwn_to_u64(wwpn) == 0)
3123 		phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
3124 	else
3125 		phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
3126 	phba->cfg_oas_flags &= ~OAS_LUN_VALID;
3127 	if (phba->cfg_oas_priority == 0)
3128 		phba->cfg_oas_priority = phba->cfg_XLanePriority;
3129 	phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
3130 	return count;
3131 }
3132 static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
3133 		   lpfc_oas_vpt_show, lpfc_oas_vpt_store);
3134 
3135 /**
3136  * lpfc_oas_lun_state_show - Return the current state (enabled or disabled)
3137  *			    of whether luns will be enabled or disabled
3138  *			    for Optimized Access Storage (OAS) operations.
3139  * @dev: class device that is converted into a Scsi_host.
3140  * @attr: device attribute, not used.
3141  * @buf: buffer for passing information.
3142  *
3143  * Returns:
3144  * size of formatted string.
3145  **/
3146 static ssize_t
lpfc_oas_lun_state_show(struct device * dev,struct device_attribute * attr,char * buf)3147 lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
3148 			char *buf)
3149 {
3150 	struct Scsi_Host *shost = class_to_shost(dev);
3151 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3152 
3153 	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
3154 }
3155 
3156 /**
3157  * lpfc_oas_lun_state_store - Store the state (enabled or disabled)
3158  *			    of whether luns will be enabled or disabled
3159  *			    for Optimized Access Storage (OAS) operations.
3160  * @dev: class device that is converted into a Scsi_host.
3161  * @attr: device attribute, not used.
3162  * @buf: buffer for passing information.
3163  * @count: Size of the data buffer.
3164  *
3165  * Returns:
3166  * -EINVAL count is invalid, invalid wwpn byte invalid
3167  * -EPERM oas is not supported by hba
3168  * value of count on success
3169  **/
3170 static ssize_t
lpfc_oas_lun_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3171 lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
3172 			 const char *buf, size_t count)
3173 {
3174 	struct Scsi_Host *shost = class_to_shost(dev);
3175 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3176 	int val = 0;
3177 
3178 	if (!phba->cfg_fof)
3179 		return -EPERM;
3180 
3181 	if (!isdigit(buf[0]))
3182 		return -EINVAL;
3183 
3184 	if (sscanf(buf, "%i", &val) != 1)
3185 		return -EINVAL;
3186 
3187 	if ((val != 0) && (val != 1))
3188 		return -EINVAL;
3189 
3190 	phba->cfg_oas_lun_state = val;
3191 	return strlen(buf);
3192 }
3193 static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
3194 		   lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
3195 
3196 /**
3197  * lpfc_oas_lun_status_show - Return the status of the Optimized Access
3198  *                          Storage (OAS) lun returned by the
3199  *                          lpfc_oas_lun_show function.
3200  * @dev: class device that is converted into a Scsi_host.
3201  * @attr: device attribute, not used.
3202  * @buf: buffer for passing information.
3203  *
3204  * Returns:
3205  * size of formatted string.
3206  **/
3207 static ssize_t
lpfc_oas_lun_status_show(struct device * dev,struct device_attribute * attr,char * buf)3208 lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
3209 			 char *buf)
3210 {
3211 	struct Scsi_Host *shost = class_to_shost(dev);
3212 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3213 
3214 	if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
3215 		return -EFAULT;
3216 
3217 	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
3218 }
3219 static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
3220 		   lpfc_oas_lun_status_show, NULL);
3221 
3222 
3223 /**
3224  * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
3225  *			   (OAS) operations.
3226  * @phba: lpfc_hba pointer.
3227  * @ndlp: pointer to fcp target node.
3228  * @lun: the fc lun for setting oas state.
3229  * @oas_state: the oas state to be set to the lun.
3230  *
3231  * Returns:
3232  * SUCCESS : 0
3233  * -EPERM OAS is not enabled or not supported by this port.
3234  *
3235  */
3236 static size_t
lpfc_oas_lun_state_set(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint64_t lun,uint32_t oas_state,uint8_t pri)3237 lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3238 		       uint8_t tgt_wwpn[], uint64_t lun,
3239 		       uint32_t oas_state, uint8_t pri)
3240 {
3241 
3242 	int rc = 0;
3243 
3244 	if (!phba->cfg_fof)
3245 		return -EPERM;
3246 
3247 	if (oas_state) {
3248 		if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3249 					 (struct lpfc_name *)tgt_wwpn,
3250 					 lun, pri))
3251 			rc = -ENOMEM;
3252 	} else {
3253 		lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3254 				     (struct lpfc_name *)tgt_wwpn, lun, pri);
3255 	}
3256 	return rc;
3257 
3258 }
3259 
3260 /**
3261  * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized
3262  *			  Access Storage (OAS) operations.
3263  * @phba: lpfc_hba pointer.
3264  * @vpt_wwpn: wwpn of the vport associated with the returned lun
3265  * @tgt_wwpn: wwpn of the target associated with the returned lun
3266  * @lun_status: status of the lun returned lun
3267  *
3268  * Returns the first or next lun enabled for OAS operations for the vport/target
3269  * specified.  If a lun is found, its vport wwpn, target wwpn and status is
3270  * returned.  If the lun is not found, NOT_OAS_ENABLED_LUN is returned.
3271  *
3272  * Return:
3273  * lun that is OAS enabled for the vport/target
3274  * NOT_OAS_ENABLED_LUN when no oas enabled lun found.
3275  */
3276 static uint64_t
lpfc_oas_lun_get_next(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint32_t * lun_status,uint32_t * lun_pri)3277 lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3278 		      uint8_t tgt_wwpn[], uint32_t *lun_status,
3279 		      uint32_t *lun_pri)
3280 {
3281 	uint64_t found_lun;
3282 
3283 	if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
3284 		return NOT_OAS_ENABLED_LUN;
3285 	if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
3286 				   phba->sli4_hba.oas_next_vpt_wwpn,
3287 				   (struct lpfc_name *)
3288 				   phba->sli4_hba.oas_next_tgt_wwpn,
3289 				   &phba->sli4_hba.oas_next_lun,
3290 				   (struct lpfc_name *)vpt_wwpn,
3291 				   (struct lpfc_name *)tgt_wwpn,
3292 				   &found_lun, lun_status, lun_pri))
3293 		return found_lun;
3294 	else
3295 		return NOT_OAS_ENABLED_LUN;
3296 }
3297 
3298 /**
3299  * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations
3300  * @phba: lpfc_hba pointer.
3301  * @vpt_wwpn: vport wwpn by reference.
3302  * @tgt_wwpn: target wwpn by reference.
3303  * @lun: the fc lun for setting oas state.
3304  * @oas_state: the oas state to be set to the oas_lun.
3305  *
3306  * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
3307  * a lun for OAS operations.
3308  *
3309  * Return:
3310  * SUCCESS: 0
3311  * -ENOMEM: failed to enable an lun for OAS operations
3312  * -EPERM: OAS is not enabled
3313  */
3314 static ssize_t
lpfc_oas_lun_state_change(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint64_t lun,uint32_t oas_state,uint8_t pri)3315 lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3316 			  uint8_t tgt_wwpn[], uint64_t lun,
3317 			  uint32_t oas_state, uint8_t pri)
3318 {
3319 
3320 	int rc;
3321 
3322 	rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
3323 				    oas_state, pri);
3324 	return rc;
3325 }
3326 
3327 /**
3328  * lpfc_oas_lun_show - Return oas enabled luns from a chosen target
3329  * @dev: class device that is converted into a Scsi_host.
3330  * @attr: device attribute, not used.
3331  * @buf: buffer for passing information.
3332  *
3333  * This routine returns a lun enabled for OAS each time the function
3334  * is called.
3335  *
3336  * Returns:
3337  * SUCCESS: size of formatted string.
3338  * -EFAULT: target or vport wwpn was not set properly.
3339  * -EPERM: oas is not enabled.
3340  **/
3341 static ssize_t
lpfc_oas_lun_show(struct device * dev,struct device_attribute * attr,char * buf)3342 lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
3343 		  char *buf)
3344 {
3345 	struct Scsi_Host *shost = class_to_shost(dev);
3346 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3347 
3348 	uint64_t oas_lun;
3349 	int len = 0;
3350 
3351 	if (!phba->cfg_fof)
3352 		return -EPERM;
3353 
3354 	if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3355 		if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
3356 			return -EFAULT;
3357 
3358 	if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3359 		if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
3360 			return -EFAULT;
3361 
3362 	oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
3363 					phba->cfg_oas_tgt_wwpn,
3364 					&phba->cfg_oas_lun_status,
3365 					&phba->cfg_oas_priority);
3366 	if (oas_lun != NOT_OAS_ENABLED_LUN)
3367 		phba->cfg_oas_flags |= OAS_LUN_VALID;
3368 
3369 	len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
3370 
3371 	return len;
3372 }
3373 
3374 /**
3375  * lpfc_oas_lun_store - Sets the OAS state for lun
3376  * @dev: class device that is converted into a Scsi_host.
3377  * @attr: device attribute, not used.
3378  * @buf: buffer for passing information.
3379  *
3380  * This function sets the OAS state for lun.  Before this function is called,
3381  * the vport wwpn, target wwpn, and oas state need to be set.
3382  *
3383  * Returns:
3384  * SUCCESS: size of formatted string.
3385  * -EFAULT: target or vport wwpn was not set properly.
3386  * -EPERM: oas is not enabled.
3387  * size of formatted string.
3388  **/
3389 static ssize_t
lpfc_oas_lun_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3390 lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
3391 		   const char *buf, size_t count)
3392 {
3393 	struct Scsi_Host *shost = class_to_shost(dev);
3394 	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3395 	uint64_t scsi_lun;
3396 	uint32_t pri;
3397 	ssize_t rc;
3398 
3399 	if (!phba->cfg_fof)
3400 		return -EPERM;
3401 
3402 	if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3403 		return -EFAULT;
3404 
3405 	if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3406 		return -EFAULT;
3407 
3408 	if (!isdigit(buf[0]))
3409 		return -EINVAL;
3410 
3411 	if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
3412 		return -EINVAL;
3413 
3414 	pri = phba->cfg_oas_priority;
3415 	if (pri == 0)
3416 		pri = phba->cfg_XLanePriority;
3417 
3418 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3419 			"3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx "
3420 			"priority 0x%x with oas state %d\n",
3421 			wwn_to_u64(phba->cfg_oas_vpt_wwpn),
3422 			wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
3423 			pri, phba->cfg_oas_lun_state);
3424 
3425 	rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
3426 				       phba->cfg_oas_tgt_wwpn, scsi_lun,
3427 				       phba->cfg_oas_lun_state, pri);
3428 	if (rc)
3429 		return rc;
3430 
3431 	return count;
3432 }
3433 static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
3434 		   lpfc_oas_lun_show, lpfc_oas_lun_store);
3435 
3436 int lpfc_enable_nvmet_cnt;
3437 unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = {
3438 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3439 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3440 module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444);
3441 MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target");
3442 
3443 static int lpfc_poll = 0;
3444 module_param(lpfc_poll, int, S_IRUGO);
3445 MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
3446 		 " 0 - none,"
3447 		 " 1 - poll with interrupts enabled"
3448 		 " 3 - poll and disable FCP ring interrupts");
3449 
3450 static DEVICE_ATTR_RW(lpfc_poll);
3451 
3452 int lpfc_no_hba_reset_cnt;
3453 unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
3454 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3455 module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
3456 MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
3457 
3458 LPFC_ATTR(sli_mode, 0, 0, 3,
3459 	"SLI mode selector:"
3460 	" 0 - auto (SLI-3 if supported),"
3461 	" 2 - select SLI-2 even on SLI-3 capable HBAs,"
3462 	" 3 - select SLI-3");
3463 
3464 LPFC_ATTR_R(enable_npiv, 1, 0, 1,
3465 	"Enable NPIV functionality");
3466 
3467 LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
3468 	"FCF Fast failover=1 Priority failover=2");
3469 
3470 /*
3471 # lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
3472 #	0x0 = disabled, XRI/OXID use not tracked.
3473 #	0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
3474 #	0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
3475 */
3476 LPFC_ATTR_R(enable_rrq, 2, 0, 2,
3477 	"Enable RRQ functionality");
3478 
3479 /*
3480 # lpfc_suppress_link_up:  Bring link up at initialization
3481 #            0x0  = bring link up (issue MBX_INIT_LINK)
3482 #            0x1  = do NOT bring link up at initialization(MBX_INIT_LINK)
3483 #            0x2  = never bring up link
3484 # Default value is 0.
3485 */
3486 LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
3487 		LPFC_DELAY_INIT_LINK_INDEFINITELY,
3488 		"Suppress Link Up at initialization");
3489 
3490 static ssize_t
lpfc_pls_show(struct device * dev,struct device_attribute * attr,char * buf)3491 lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf)
3492 {
3493 	struct Scsi_Host  *shost = class_to_shost(dev);
3494 	struct lpfc_hba   *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3495 
3496 	return scnprintf(buf, PAGE_SIZE, "%d\n",
3497 			 phba->sli4_hba.pc_sli4_params.pls);
3498 }
3499 static DEVICE_ATTR(pls, 0444,
3500 			 lpfc_pls_show, NULL);
3501 
3502 static ssize_t
lpfc_pt_show(struct device * dev,struct device_attribute * attr,char * buf)3503 lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf)
3504 {
3505 	struct Scsi_Host  *shost = class_to_shost(dev);
3506 	struct lpfc_hba   *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3507 
3508 	return scnprintf(buf, PAGE_SIZE, "%d\n",
3509 			 (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0);
3510 }
3511 static DEVICE_ATTR(pt, 0444,
3512 			 lpfc_pt_show, NULL);
3513 
3514 /*
3515 # lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
3516 #       1 - (1024)
3517 #       2 - (2048)
3518 #       3 - (3072)
3519 #       4 - (4096)
3520 #       5 - (5120)
3521 */
3522 static ssize_t
lpfc_iocb_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3523 lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3524 {
3525 	struct Scsi_Host  *shost = class_to_shost(dev);
3526 	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3527 
3528 	return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
3529 }
3530 
3531 static DEVICE_ATTR(iocb_hw, S_IRUGO,
3532 			 lpfc_iocb_hw_show, NULL);
3533 static ssize_t
lpfc_txq_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3534 lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3535 {
3536 	struct Scsi_Host  *shost = class_to_shost(dev);
3537 	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3538 	struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3539 
3540 	return scnprintf(buf, PAGE_SIZE, "%d\n",
3541 			pring ? pring->txq_max : 0);
3542 }
3543 
3544 static DEVICE_ATTR(txq_hw, S_IRUGO,
3545 			 lpfc_txq_hw_show, NULL);
3546 static ssize_t
lpfc_txcmplq_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3547 lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
3548  char *buf)
3549 {
3550 	struct Scsi_Host  *shost = class_to_shost(dev);
3551 	struct lpfc_hba   *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3552 	struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3553 
3554 	return scnprintf(buf, PAGE_SIZE, "%d\n",
3555 			pring ? pring->txcmplq_max : 0);
3556 }
3557 
3558 static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
3559 			 lpfc_txcmplq_hw_show, NULL);
3560 
3561 /*
3562 # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
3563 # until the timer expires. Value range is [0,255]. Default value is 30.
3564 */
3565 static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3566 static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO;
3567 module_param(lpfc_nodev_tmo, int, 0);
3568 MODULE_PARM_DESC(lpfc_nodev_tmo,
3569 		 "Seconds driver will hold I/O waiting "
3570 		 "for a device to come back");
3571 
3572 /**
3573  * lpfc_nodev_tmo_show - Return the hba dev loss timeout value
3574  * @dev: class converted to a Scsi_host structure.
3575  * @attr: device attribute, not used.
3576  * @buf: on return contains the dev loss timeout in decimal.
3577  *
3578  * Returns: size of formatted string.
3579  **/
3580 static ssize_t
lpfc_nodev_tmo_show(struct device * dev,struct device_attribute * attr,char * buf)3581 lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
3582 		    char *buf)
3583 {
3584 	struct Scsi_Host  *shost = class_to_shost(dev);
3585 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3586 
3587 	return scnprintf(buf, PAGE_SIZE, "%d\n",	vport->cfg_devloss_tmo);
3588 }
3589 
3590 /**
3591  * lpfc_nodev_tmo_init - Set the hba nodev timeout value
3592  * @vport: lpfc vport structure pointer.
3593  * @val: contains the nodev timeout value.
3594  *
3595  * Description:
3596  * If the devloss tmo is already set then nodev tmo is set to devloss tmo,
3597  * a kernel error message is printed and zero is returned.
3598  * Else if val is in range then nodev tmo and devloss tmo are set to val.
3599  * Otherwise nodev tmo is set to the default value.
3600  *
3601  * Returns:
3602  * zero if already set or if val is in range
3603  * -EINVAL val out of range
3604  **/
3605 static int
lpfc_nodev_tmo_init(struct lpfc_vport * vport,int val)3606 lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
3607 {
3608 	if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
3609 		vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
3610 		if (val != LPFC_DEF_DEVLOSS_TMO)
3611 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3612 					 "0407 Ignoring lpfc_nodev_tmo module "
3613 					 "parameter because lpfc_devloss_tmo "
3614 					 "is set.\n");
3615 		return 0;
3616 	}
3617 
3618 	if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3619 		vport->cfg_nodev_tmo = val;
3620 		vport->cfg_devloss_tmo = val;
3621 		return 0;
3622 	}
3623 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3624 			 "0400 lpfc_nodev_tmo attribute cannot be set to"
3625 			 " %d, allowed range is [%d, %d]\n",
3626 			 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3627 	vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3628 	return -EINVAL;
3629 }
3630 
3631 /**
3632  * lpfc_update_rport_devloss_tmo - Update dev loss tmo value
3633  * @vport: lpfc vport structure pointer.
3634  *
3635  * Description:
3636  * Update all the ndlp's dev loss tmo with the vport devloss tmo value.
3637  **/
3638 static void
lpfc_update_rport_devloss_tmo(struct lpfc_vport * vport)3639 lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
3640 {
3641 	struct Scsi_Host  *shost;
3642 	struct lpfc_nodelist  *ndlp;
3643 #if (IS_ENABLED(CONFIG_NVME_FC))
3644 	struct lpfc_nvme_rport *rport;
3645 	struct nvme_fc_remote_port *remoteport = NULL;
3646 #endif
3647 
3648 	shost = lpfc_shost_from_vport(vport);
3649 	spin_lock_irq(shost->host_lock);
3650 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3651 		if (!NLP_CHK_NODE_ACT(ndlp))
3652 			continue;
3653 		if (ndlp->rport)
3654 			ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3655 #if (IS_ENABLED(CONFIG_NVME_FC))
3656 		spin_lock(&vport->phba->hbalock);
3657 		rport = lpfc_ndlp_get_nrport(ndlp);
3658 		if (rport)
3659 			remoteport = rport->remoteport;
3660 		spin_unlock(&vport->phba->hbalock);
3661 		if (rport && remoteport)
3662 			nvme_fc_set_remoteport_devloss(remoteport,
3663 						       vport->cfg_devloss_tmo);
3664 #endif
3665 	}
3666 	spin_unlock_irq(shost->host_lock);
3667 }
3668 
3669 /**
3670  * lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values
3671  * @vport: lpfc vport structure pointer.
3672  * @val: contains the tmo value.
3673  *
3674  * Description:
3675  * If the devloss tmo is already set or the vport dev loss tmo has changed
3676  * then a kernel error message is printed and zero is returned.
3677  * Else if val is in range then nodev tmo and devloss tmo are set to val.
3678  * Otherwise nodev tmo is set to the default value.
3679  *
3680  * Returns:
3681  * zero if already set or if val is in range
3682  * -EINVAL val out of range
3683  **/
3684 static int
lpfc_nodev_tmo_set(struct lpfc_vport * vport,int val)3685 lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
3686 {
3687 	if (vport->dev_loss_tmo_changed ||
3688 	    (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
3689 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3690 				 "0401 Ignoring change to lpfc_nodev_tmo "
3691 				 "because lpfc_devloss_tmo is set.\n");
3692 		return 0;
3693 	}
3694 	if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3695 		vport->cfg_nodev_tmo = val;
3696 		vport->cfg_devloss_tmo = val;
3697 		/*
3698 		 * For compat: set the fc_host dev loss so new rports
3699 		 * will get the value.
3700 		 */
3701 		fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3702 		lpfc_update_rport_devloss_tmo(vport);
3703 		return 0;
3704 	}
3705 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3706 			 "0403 lpfc_nodev_tmo attribute cannot be set to "
3707 			 "%d, allowed range is [%d, %d]\n",
3708 			 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3709 	return -EINVAL;
3710 }
3711 
3712 lpfc_vport_param_store(nodev_tmo)
3713 
3714 static DEVICE_ATTR_RW(lpfc_nodev_tmo);
3715 
3716 /*
3717 # lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
3718 # disappear until the timer expires. Value range is [0,255]. Default
3719 # value is 30.
3720 */
3721 module_param(lpfc_devloss_tmo, int, S_IRUGO);
3722 MODULE_PARM_DESC(lpfc_devloss_tmo,
3723 		 "Seconds driver will hold I/O waiting "
3724 		 "for a device to come back");
lpfc_vport_param_init(devloss_tmo,LPFC_DEF_DEVLOSS_TMO,LPFC_MIN_DEVLOSS_TMO,LPFC_MAX_DEVLOSS_TMO)3725 lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
3726 		      LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
3727 lpfc_vport_param_show(devloss_tmo)
3728 
3729 /**
3730  * lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit
3731  * @vport: lpfc vport structure pointer.
3732  * @val: contains the tmo value.
3733  *
3734  * Description:
3735  * If val is in a valid range then set the vport nodev tmo,
3736  * devloss tmo, also set the vport dev loss tmo changed flag.
3737  * Else a kernel error message is printed.
3738  *
3739  * Returns:
3740  * zero if val is in range
3741  * -EINVAL val out of range
3742  **/
3743 static int
3744 lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
3745 {
3746 	if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3747 		vport->cfg_nodev_tmo = val;
3748 		vport->cfg_devloss_tmo = val;
3749 		vport->dev_loss_tmo_changed = 1;
3750 		fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3751 		lpfc_update_rport_devloss_tmo(vport);
3752 		return 0;
3753 	}
3754 
3755 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3756 			 "0404 lpfc_devloss_tmo attribute cannot be set to "
3757 			 "%d, allowed range is [%d, %d]\n",
3758 			 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3759 	return -EINVAL;
3760 }
3761 
3762 lpfc_vport_param_store(devloss_tmo)
3763 static DEVICE_ATTR_RW(lpfc_devloss_tmo);
3764 
3765 /*
3766  * lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it
3767  * lpfc_suppress_rsp = 0  Disable
3768  * lpfc_suppress_rsp = 1  Enable (default)
3769  *
3770  */
3771 LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
3772 	    "Enable suppress rsp feature is firmware supports it");
3773 
3774 /*
3775  * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
3776  * lpfc_nvmet_mrq = 0  driver will calcualte optimal number of RQ pairs
3777  * lpfc_nvmet_mrq = 1  use a single RQ pair
3778  * lpfc_nvmet_mrq >= 2  use specified RQ pairs for MRQ
3779  *
3780  */
3781 LPFC_ATTR_R(nvmet_mrq,
3782 	    LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
3783 	    "Specify number of RQ pairs for processing NVMET cmds");
3784 
3785 /*
3786  * lpfc_nvmet_mrq_post: Specify number of RQ buffer to initially post
3787  * to each NVMET RQ. Range 64 to 2048, default is 512.
3788  */
3789 LPFC_ATTR_R(nvmet_mrq_post,
3790 	    LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST,
3791 	    LPFC_NVMET_RQE_DEF_COUNT,
3792 	    "Specify number of RQ buffers to initially post");
3793 
3794 /*
3795  * lpfc_enable_fc4_type: Defines what FC4 types are supported.
3796  * Supported Values:  1 - register just FCP
3797  *                    3 - register both FCP and NVME
3798  * Supported values are [1,3]. Default value is 3
3799  */
3800 LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
3801 	    LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
3802 	    "Enable FC4 Protocol support - FCP / NVME");
3803 
3804 /*
3805 # lpfc_log_verbose: Only turn this flag on if you are willing to risk being
3806 # deluged with LOTS of information.
3807 # You can set a bit mask to record specific types of verbose messages:
3808 # See lpfc_logmsh.h for definitions.
3809 */
3810 LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
3811 		       "Verbose logging bit-mask");
3812 
3813 /*
3814 # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
3815 # objects that have been registered with the nameserver after login.
3816 */
3817 LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
3818 		  "Deregister nameserver objects before LOGO");
3819 
3820 /*
3821 # lun_queue_depth:  This parameter is used to limit the number of outstanding
3822 # commands per FCP LUN.
3823 */
3824 LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512,
3825 		  "Max number of FCP commands we can queue to a specific LUN");
3826 
3827 /*
3828 # tgt_queue_depth:  This parameter is used to limit the number of outstanding
3829 # commands per target port. Value range is [10,65535]. Default value is 65535.
3830 */
3831 static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH;
3832 module_param(lpfc_tgt_queue_depth, uint, 0444);
3833 MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth");
3834 lpfc_vport_param_show(tgt_queue_depth);
3835 lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
3836 		      LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH);
3837 
3838 /**
3839  * lpfc_tgt_queue_depth_store: Sets an attribute value.
3840  * @phba: pointer the the adapter structure.
3841  * @val: integer attribute value.
3842  *
3843  * Description: Sets the parameter to the new value.
3844  *
3845  * Returns:
3846  * zero on success
3847  * -EINVAL if val is invalid
3848  */
3849 static int
lpfc_tgt_queue_depth_set(struct lpfc_vport * vport,uint val)3850 lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
3851 {
3852 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3853 	struct lpfc_nodelist *ndlp;
3854 
3855 	if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
3856 		return -EINVAL;
3857 
3858 	if (val == vport->cfg_tgt_queue_depth)
3859 		return 0;
3860 
3861 	spin_lock_irq(shost->host_lock);
3862 	vport->cfg_tgt_queue_depth = val;
3863 
3864 	/* Next loop thru nodelist and change cmd_qdepth */
3865 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
3866 		ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
3867 
3868 	spin_unlock_irq(shost->host_lock);
3869 	return 0;
3870 }
3871 
3872 lpfc_vport_param_store(tgt_queue_depth);
3873 static DEVICE_ATTR_RW(lpfc_tgt_queue_depth);
3874 
3875 /*
3876 # hba_queue_depth:  This parameter is used to limit the number of outstanding
3877 # commands per lpfc HBA. Value range is [32,8192]. If this parameter
3878 # value is greater than the maximum number of exchanges supported by the HBA,
3879 # then maximum number of exchanges supported by the HBA is used to determine
3880 # the hba_queue_depth.
3881 */
3882 LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
3883 	    "Max number of FCP commands we can queue to a lpfc HBA");
3884 
3885 /*
3886 # peer_port_login:  This parameter allows/prevents logins
3887 # between peer ports hosted on the same physical port.
3888 # When this parameter is set 0 peer ports of same physical port
3889 # are not allowed to login to each other.
3890 # When this parameter is set 1 peer ports of same physical port
3891 # are allowed to login to each other.
3892 # Default value of this parameter is 0.
3893 */
3894 LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
3895 		  "Allow peer ports on the same physical port to login to each "
3896 		  "other.");
3897 
3898 /*
3899 # restrict_login:  This parameter allows/prevents logins
3900 # between Virtual Ports and remote initiators.
3901 # When this parameter is not set (0) Virtual Ports will accept PLOGIs from
3902 # other initiators and will attempt to PLOGI all remote ports.
3903 # When this parameter is set (1) Virtual Ports will reject PLOGIs from
3904 # remote ports and will not attempt to PLOGI to other initiators.
3905 # This parameter does not restrict to the physical port.
3906 # This parameter does not restrict logins to Fabric resident remote ports.
3907 # Default value of this parameter is 1.
3908 */
3909 static int lpfc_restrict_login = 1;
3910 module_param(lpfc_restrict_login, int, S_IRUGO);
3911 MODULE_PARM_DESC(lpfc_restrict_login,
3912 		 "Restrict virtual ports login to remote initiators.");
3913 lpfc_vport_param_show(restrict_login);
3914 
3915 /**
3916  * lpfc_restrict_login_init - Set the vport restrict login flag
3917  * @vport: lpfc vport structure pointer.
3918  * @val: contains the restrict login value.
3919  *
3920  * Description:
3921  * If val is not in a valid range then log a kernel error message and set
3922  * the vport restrict login to one.
3923  * If the port type is physical clear the restrict login flag and return.
3924  * Else set the restrict login flag to val.
3925  *
3926  * Returns:
3927  * zero if val is in range
3928  * -EINVAL val out of range
3929  **/
3930 static int
lpfc_restrict_login_init(struct lpfc_vport * vport,int val)3931 lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
3932 {
3933 	if (val < 0 || val > 1) {
3934 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3935 				 "0422 lpfc_restrict_login attribute cannot "
3936 				 "be set to %d, allowed range is [0, 1]\n",
3937 				 val);
3938 		vport->cfg_restrict_login = 1;
3939 		return -EINVAL;
3940 	}
3941 	if (vport->port_type == LPFC_PHYSICAL_PORT) {
3942 		vport->cfg_restrict_login = 0;
3943 		return 0;
3944 	}
3945 	vport->cfg_restrict_login = val;
3946 	return 0;
3947 }
3948 
3949 /**
3950  * lpfc_restrict_login_set - Set the vport restrict login flag
3951  * @vport: lpfc vport structure pointer.
3952  * @val: contains the restrict login value.
3953  *
3954  * Description:
3955  * If val is not in a valid range then log a kernel error message and set
3956  * the vport restrict login to one.
3957  * If the port type is physical and the val is not zero log a kernel
3958  * error message, clear the restrict login flag and return zero.
3959  * Else set the restrict login flag to val.
3960  *
3961  * Returns:
3962  * zero if val is in range
3963  * -EINVAL val out of range
3964  **/
3965 static int
lpfc_restrict_login_set(struct lpfc_vport * vport,int val)3966 lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
3967 {
3968 	if (val < 0 || val > 1) {
3969 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3970 				 "0425 lpfc_restrict_login attribute cannot "
3971 				 "be set to %d, allowed range is [0, 1]\n",
3972 				 val);
3973 		vport->cfg_restrict_login = 1;
3974 		return -EINVAL;
3975 	}
3976 	if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) {
3977 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3978 				 "0468 lpfc_restrict_login must be 0 for "
3979 				 "Physical ports.\n");
3980 		vport->cfg_restrict_login = 0;
3981 		return 0;
3982 	}
3983 	vport->cfg_restrict_login = val;
3984 	return 0;
3985 }
3986 lpfc_vport_param_store(restrict_login);
3987 static DEVICE_ATTR_RW(lpfc_restrict_login);
3988 
3989 /*
3990 # Some disk devices have a "select ID" or "select Target" capability.
3991 # From a protocol standpoint "select ID" usually means select the
3992 # Fibre channel "ALPA".  In the FC-AL Profile there is an "informative
3993 # annex" which contains a table that maps a "select ID" (a number
3994 # between 0 and 7F) to an ALPA.  By default, for compatibility with
3995 # older drivers, the lpfc driver scans this table from low ALPA to high
3996 # ALPA.
3997 #
3998 # Turning on the scan-down variable (on  = 1, off = 0) will
3999 # cause the lpfc driver to use an inverted table, effectively
4000 # scanning ALPAs from high to low. Value range is [0,1]. Default value is 1.
4001 #
4002 # (Note: This "select ID" functionality is a LOOP ONLY characteristic
4003 # and will not work across a fabric. Also this parameter will take
4004 # effect only in the case when ALPA map is not available.)
4005 */
4006 LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
4007 		  "Start scanning for devices from highest ALPA to lowest");
4008 
4009 /*
4010 # lpfc_topology:  link topology for init link
4011 #            0x0  = attempt loop mode then point-to-point
4012 #            0x01 = internal loopback mode
4013 #            0x02 = attempt point-to-point mode only
4014 #            0x04 = attempt loop mode only
4015 #            0x06 = attempt point-to-point mode then loop
4016 # Set point-to-point mode if you want to run as an N_Port.
4017 # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
4018 # Default value is 0.
4019 */
4020 LPFC_ATTR(topology, 0, 0, 6,
4021 	"Select Fibre Channel topology");
4022 
4023 /**
4024  * lpfc_topology_set - Set the adapters topology field
4025  * @phba: lpfc_hba pointer.
4026  * @val: topology value.
4027  *
4028  * Description:
4029  * If val is in a valid range then set the adapter's topology field and
4030  * issue a lip; if the lip fails reset the topology to the old value.
4031  *
4032  * If the value is not in range log a kernel error message and return an error.
4033  *
4034  * Returns:
4035  * zero if val is in range and lip okay
4036  * non-zero return value from lpfc_issue_lip()
4037  * -EINVAL val out of range
4038  **/
4039 static ssize_t
lpfc_topology_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4040 lpfc_topology_store(struct device *dev, struct device_attribute *attr,
4041 			const char *buf, size_t count)
4042 {
4043 	struct Scsi_Host  *shost = class_to_shost(dev);
4044 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4045 	struct lpfc_hba   *phba = vport->phba;
4046 	int val = 0;
4047 	int nolip = 0;
4048 	const char *val_buf = buf;
4049 	int err;
4050 	uint32_t prev_val;
4051 
4052 	if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4053 		nolip = 1;
4054 		val_buf = &buf[strlen("nolip ")];
4055 	}
4056 
4057 	if (!isdigit(val_buf[0]))
4058 		return -EINVAL;
4059 	if (sscanf(val_buf, "%i", &val) != 1)
4060 		return -EINVAL;
4061 
4062 	if (val >= 0 && val <= 6) {
4063 		prev_val = phba->cfg_topology;
4064 		if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
4065 			val == 4) {
4066 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4067 				"3113 Loop mode not supported at speed %d\n",
4068 				val);
4069 			return -EINVAL;
4070 		}
4071 		/*
4072 		 * The 'topology' is not a configurable parameter if :
4073 		 *   - persistent topology enabled
4074 		 *   - G7/G6 with no private loop support
4075 		 */
4076 
4077 		if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
4078 		     (!phba->sli4_hba.pc_sli4_params.pls &&
4079 		     (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
4080 		     phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) &&
4081 		    val == 4) {
4082 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4083 				"3114 Loop mode not supported\n");
4084 			return -EINVAL;
4085 		}
4086 		phba->cfg_topology = val;
4087 		if (nolip)
4088 			return strlen(buf);
4089 
4090 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4091 			"3054 lpfc_topology changed from %d to %d\n",
4092 			prev_val, val);
4093 		if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
4094 			phba->fc_topology_changed = 1;
4095 		err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4096 		if (err) {
4097 			phba->cfg_topology = prev_val;
4098 			return -EINVAL;
4099 		} else
4100 			return strlen(buf);
4101 	}
4102 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4103 		"%d:0467 lpfc_topology attribute cannot be set to %d, "
4104 		"allowed range is [0, 6]\n",
4105 		phba->brd_no, val);
4106 	return -EINVAL;
4107 }
4108 
4109 lpfc_param_show(topology)
4110 static DEVICE_ATTR_RW(lpfc_topology);
4111 
4112 /**
4113  * lpfc_static_vport_show: Read callback function for
4114  *   lpfc_static_vport sysfs file.
4115  * @dev: Pointer to class device object.
4116  * @attr: device attribute structure.
4117  * @buf: Data buffer.
4118  *
4119  * This function is the read call back function for
4120  * lpfc_static_vport sysfs file. The lpfc_static_vport
4121  * sysfs file report the mageability of the vport.
4122  **/
4123 static ssize_t
lpfc_static_vport_show(struct device * dev,struct device_attribute * attr,char * buf)4124 lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
4125 			 char *buf)
4126 {
4127 	struct Scsi_Host  *shost = class_to_shost(dev);
4128 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4129 	if (vport->vport_flag & STATIC_VPORT)
4130 		sprintf(buf, "1\n");
4131 	else
4132 		sprintf(buf, "0\n");
4133 
4134 	return strlen(buf);
4135 }
4136 
4137 /*
4138  * Sysfs attribute to control the statistical data collection.
4139  */
4140 static DEVICE_ATTR_RO(lpfc_static_vport);
4141 
4142 /**
4143  * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
4144  * @dev: Pointer to class device.
4145  * @buf: Data buffer.
4146  * @count: Size of the data buffer.
4147  *
4148  * This function get called when a user write to the lpfc_stat_data_ctrl
4149  * sysfs file. This function parse the command written to the sysfs file
4150  * and take appropriate action. These commands are used for controlling
4151  * driver statistical data collection.
4152  * Following are the command this function handles.
4153  *
4154  *    setbucket <bucket_type> <base> <step>
4155  *			       = Set the latency buckets.
4156  *    destroybucket            = destroy all the buckets.
4157  *    start                    = start data collection
4158  *    stop                     = stop data collection
4159  *    reset                    = reset the collected data
4160  **/
4161 static ssize_t
lpfc_stat_data_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4162 lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
4163 			  const char *buf, size_t count)
4164 {
4165 	struct Scsi_Host  *shost = class_to_shost(dev);
4166 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4167 	struct lpfc_hba   *phba = vport->phba;
4168 #define LPFC_MAX_DATA_CTRL_LEN 1024
4169 	static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
4170 	unsigned long i;
4171 	char *str_ptr, *token;
4172 	struct lpfc_vport **vports;
4173 	struct Scsi_Host *v_shost;
4174 	char *bucket_type_str, *base_str, *step_str;
4175 	unsigned long base, step, bucket_type;
4176 
4177 	if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
4178 		if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
4179 			return -EINVAL;
4180 
4181 		strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
4182 		str_ptr = &bucket_data[0];
4183 		/* Ignore this token - this is command token */
4184 		token = strsep(&str_ptr, "\t ");
4185 		if (!token)
4186 			return -EINVAL;
4187 
4188 		bucket_type_str = strsep(&str_ptr, "\t ");
4189 		if (!bucket_type_str)
4190 			return -EINVAL;
4191 
4192 		if (!strncmp(bucket_type_str, "linear", strlen("linear")))
4193 			bucket_type = LPFC_LINEAR_BUCKET;
4194 		else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
4195 			bucket_type = LPFC_POWER2_BUCKET;
4196 		else
4197 			return -EINVAL;
4198 
4199 		base_str = strsep(&str_ptr, "\t ");
4200 		if (!base_str)
4201 			return -EINVAL;
4202 		base = simple_strtoul(base_str, NULL, 0);
4203 
4204 		step_str = strsep(&str_ptr, "\t ");
4205 		if (!step_str)
4206 			return -EINVAL;
4207 		step = simple_strtoul(step_str, NULL, 0);
4208 		if (!step)
4209 			return -EINVAL;
4210 
4211 		/* Block the data collection for every vport */
4212 		vports = lpfc_create_vport_work_array(phba);
4213 		if (vports == NULL)
4214 			return -ENOMEM;
4215 
4216 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4217 			v_shost = lpfc_shost_from_vport(vports[i]);
4218 			spin_lock_irq(v_shost->host_lock);
4219 			/* Block and reset data collection */
4220 			vports[i]->stat_data_blocked = 1;
4221 			if (vports[i]->stat_data_enabled)
4222 				lpfc_vport_reset_stat_data(vports[i]);
4223 			spin_unlock_irq(v_shost->host_lock);
4224 		}
4225 
4226 		/* Set the bucket attributes */
4227 		phba->bucket_type = bucket_type;
4228 		phba->bucket_base = base;
4229 		phba->bucket_step = step;
4230 
4231 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4232 			v_shost = lpfc_shost_from_vport(vports[i]);
4233 
4234 			/* Unblock data collection */
4235 			spin_lock_irq(v_shost->host_lock);
4236 			vports[i]->stat_data_blocked = 0;
4237 			spin_unlock_irq(v_shost->host_lock);
4238 		}
4239 		lpfc_destroy_vport_work_array(phba, vports);
4240 		return strlen(buf);
4241 	}
4242 
4243 	if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
4244 		vports = lpfc_create_vport_work_array(phba);
4245 		if (vports == NULL)
4246 			return -ENOMEM;
4247 
4248 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4249 			v_shost = lpfc_shost_from_vport(vports[i]);
4250 			spin_lock_irq(shost->host_lock);
4251 			vports[i]->stat_data_blocked = 1;
4252 			lpfc_free_bucket(vport);
4253 			vport->stat_data_enabled = 0;
4254 			vports[i]->stat_data_blocked = 0;
4255 			spin_unlock_irq(shost->host_lock);
4256 		}
4257 		lpfc_destroy_vport_work_array(phba, vports);
4258 		phba->bucket_type = LPFC_NO_BUCKET;
4259 		phba->bucket_base = 0;
4260 		phba->bucket_step = 0;
4261 		return strlen(buf);
4262 	}
4263 
4264 	if (!strncmp(buf, "start", strlen("start"))) {
4265 		/* If no buckets configured return error */
4266 		if (phba->bucket_type == LPFC_NO_BUCKET)
4267 			return -EINVAL;
4268 		spin_lock_irq(shost->host_lock);
4269 		if (vport->stat_data_enabled) {
4270 			spin_unlock_irq(shost->host_lock);
4271 			return strlen(buf);
4272 		}
4273 		lpfc_alloc_bucket(vport);
4274 		vport->stat_data_enabled = 1;
4275 		spin_unlock_irq(shost->host_lock);
4276 		return strlen(buf);
4277 	}
4278 
4279 	if (!strncmp(buf, "stop", strlen("stop"))) {
4280 		spin_lock_irq(shost->host_lock);
4281 		if (vport->stat_data_enabled == 0) {
4282 			spin_unlock_irq(shost->host_lock);
4283 			return strlen(buf);
4284 		}
4285 		lpfc_free_bucket(vport);
4286 		vport->stat_data_enabled = 0;
4287 		spin_unlock_irq(shost->host_lock);
4288 		return strlen(buf);
4289 	}
4290 
4291 	if (!strncmp(buf, "reset", strlen("reset"))) {
4292 		if ((phba->bucket_type == LPFC_NO_BUCKET)
4293 			|| !vport->stat_data_enabled)
4294 			return strlen(buf);
4295 		spin_lock_irq(shost->host_lock);
4296 		vport->stat_data_blocked = 1;
4297 		lpfc_vport_reset_stat_data(vport);
4298 		vport->stat_data_blocked = 0;
4299 		spin_unlock_irq(shost->host_lock);
4300 		return strlen(buf);
4301 	}
4302 	return -EINVAL;
4303 }
4304 
4305 
4306 /**
4307  * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
4308  * @dev: Pointer to class device object.
4309  * @buf: Data buffer.
4310  *
4311  * This function is the read call back function for
4312  * lpfc_stat_data_ctrl sysfs file. This function report the
4313  * current statistical data collection state.
4314  **/
4315 static ssize_t
lpfc_stat_data_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)4316 lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
4317 			 char *buf)
4318 {
4319 	struct Scsi_Host  *shost = class_to_shost(dev);
4320 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4321 	struct lpfc_hba   *phba = vport->phba;
4322 	int index = 0;
4323 	int i;
4324 	char *bucket_type;
4325 	unsigned long bucket_value;
4326 
4327 	switch (phba->bucket_type) {
4328 	case LPFC_LINEAR_BUCKET:
4329 		bucket_type = "linear";
4330 		break;
4331 	case LPFC_POWER2_BUCKET:
4332 		bucket_type = "power2";
4333 		break;
4334 	default:
4335 		bucket_type = "No Bucket";
4336 		break;
4337 	}
4338 
4339 	sprintf(&buf[index], "Statistical Data enabled :%d, "
4340 		"blocked :%d, Bucket type :%s, Bucket base :%d,"
4341 		" Bucket step :%d\nLatency Ranges :",
4342 		vport->stat_data_enabled, vport->stat_data_blocked,
4343 		bucket_type, phba->bucket_base, phba->bucket_step);
4344 	index = strlen(buf);
4345 	if (phba->bucket_type != LPFC_NO_BUCKET) {
4346 		for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4347 			if (phba->bucket_type == LPFC_LINEAR_BUCKET)
4348 				bucket_value = phba->bucket_base +
4349 					phba->bucket_step * i;
4350 			else
4351 				bucket_value = phba->bucket_base +
4352 				(1 << i) * phba->bucket_step;
4353 
4354 			if (index + 10 > PAGE_SIZE)
4355 				break;
4356 			sprintf(&buf[index], "%08ld ", bucket_value);
4357 			index = strlen(buf);
4358 		}
4359 	}
4360 	sprintf(&buf[index], "\n");
4361 	return strlen(buf);
4362 }
4363 
4364 /*
4365  * Sysfs attribute to control the statistical data collection.
4366  */
4367 static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
4368 
4369 /*
4370  * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
4371  */
4372 
4373 /*
4374  * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
4375  * for each target.
4376  */
4377 #define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
4378 #define MAX_STAT_DATA_SIZE_PER_TARGET \
4379 	STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
4380 
4381 
4382 /**
4383  * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
4384  * @filp: sysfs file
4385  * @kobj: Pointer to the kernel object
4386  * @bin_attr: Attribute object
4387  * @buff: Buffer pointer
4388  * @off: File offset
4389  * @count: Buffer size
4390  *
4391  * This function is the read call back function for lpfc_drvr_stat_data
4392  * sysfs file. This function export the statistical data to user
4393  * applications.
4394  **/
4395 static ssize_t
sysfs_drvr_stat_data_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)4396 sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
4397 		struct bin_attribute *bin_attr,
4398 		char *buf, loff_t off, size_t count)
4399 {
4400 	struct device *dev = container_of(kobj, struct device,
4401 		kobj);
4402 	struct Scsi_Host  *shost = class_to_shost(dev);
4403 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4404 	struct lpfc_hba   *phba = vport->phba;
4405 	int i = 0, index = 0;
4406 	unsigned long nport_index;
4407 	struct lpfc_nodelist *ndlp = NULL;
4408 	nport_index = (unsigned long)off /
4409 		MAX_STAT_DATA_SIZE_PER_TARGET;
4410 
4411 	if (!vport->stat_data_enabled || vport->stat_data_blocked
4412 		|| (phba->bucket_type == LPFC_NO_BUCKET))
4413 		return 0;
4414 
4415 	spin_lock_irq(shost->host_lock);
4416 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4417 		if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data)
4418 			continue;
4419 
4420 		if (nport_index > 0) {
4421 			nport_index--;
4422 			continue;
4423 		}
4424 
4425 		if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
4426 			> count)
4427 			break;
4428 
4429 		if (!ndlp->lat_data)
4430 			continue;
4431 
4432 		/* Print the WWN */
4433 		sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
4434 			ndlp->nlp_portname.u.wwn[0],
4435 			ndlp->nlp_portname.u.wwn[1],
4436 			ndlp->nlp_portname.u.wwn[2],
4437 			ndlp->nlp_portname.u.wwn[3],
4438 			ndlp->nlp_portname.u.wwn[4],
4439 			ndlp->nlp_portname.u.wwn[5],
4440 			ndlp->nlp_portname.u.wwn[6],
4441 			ndlp->nlp_portname.u.wwn[7]);
4442 
4443 		index = strlen(buf);
4444 
4445 		for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4446 			sprintf(&buf[index], "%010u,",
4447 				ndlp->lat_data[i].cmd_count);
4448 			index = strlen(buf);
4449 		}
4450 		sprintf(&buf[index], "\n");
4451 		index = strlen(buf);
4452 	}
4453 	spin_unlock_irq(shost->host_lock);
4454 	return index;
4455 }
4456 
4457 static struct bin_attribute sysfs_drvr_stat_data_attr = {
4458 	.attr = {
4459 		.name = "lpfc_drvr_stat_data",
4460 		.mode = S_IRUSR,
4461 	},
4462 	.size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
4463 	.read = sysfs_drvr_stat_data_read,
4464 	.write = NULL,
4465 };
4466 
4467 /*
4468 # lpfc_link_speed: Link speed selection for initializing the Fibre Channel
4469 # connection.
4470 # Value range is [0,16]. Default value is 0.
4471 */
4472 /**
4473  * lpfc_link_speed_set - Set the adapters link speed
4474  * @phba: lpfc_hba pointer.
4475  * @val: link speed value.
4476  *
4477  * Description:
4478  * If val is in a valid range then set the adapter's link speed field and
4479  * issue a lip; if the lip fails reset the link speed to the old value.
4480  *
4481  * Notes:
4482  * If the value is not in range log a kernel error message and return an error.
4483  *
4484  * Returns:
4485  * zero if val is in range and lip okay.
4486  * non-zero return value from lpfc_issue_lip()
4487  * -EINVAL val out of range
4488  **/
4489 static ssize_t
lpfc_link_speed_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4490 lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
4491 		const char *buf, size_t count)
4492 {
4493 	struct Scsi_Host  *shost = class_to_shost(dev);
4494 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4495 	struct lpfc_hba   *phba = vport->phba;
4496 	int val = LPFC_USER_LINK_SPEED_AUTO;
4497 	int nolip = 0;
4498 	const char *val_buf = buf;
4499 	int err;
4500 	uint32_t prev_val, if_type;
4501 
4502 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
4503 	if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 &&
4504 	    phba->hba_flag & HBA_FORCED_LINK_SPEED)
4505 		return -EPERM;
4506 
4507 	if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4508 		nolip = 1;
4509 		val_buf = &buf[strlen("nolip ")];
4510 	}
4511 
4512 	if (!isdigit(val_buf[0]))
4513 		return -EINVAL;
4514 	if (sscanf(val_buf, "%i", &val) != 1)
4515 		return -EINVAL;
4516 
4517 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4518 		"3055 lpfc_link_speed changed from %d to %d %s\n",
4519 		phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
4520 
4521 	if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
4522 	    ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
4523 	    ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
4524 	    ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
4525 	    ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
4526 	    ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) ||
4527 	    ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb)) ||
4528 	    ((val == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) {
4529 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4530 				"2879 lpfc_link_speed attribute cannot be set "
4531 				"to %d. Speed is not supported by this port.\n",
4532 				val);
4533 		return -EINVAL;
4534 	}
4535 	if (val >= LPFC_USER_LINK_SPEED_16G &&
4536 	    phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4537 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4538 				"3112 lpfc_link_speed attribute cannot be set "
4539 				"to %d. Speed is not supported in loop mode.\n",
4540 				val);
4541 		return -EINVAL;
4542 	}
4543 
4544 	switch (val) {
4545 	case LPFC_USER_LINK_SPEED_AUTO:
4546 	case LPFC_USER_LINK_SPEED_1G:
4547 	case LPFC_USER_LINK_SPEED_2G:
4548 	case LPFC_USER_LINK_SPEED_4G:
4549 	case LPFC_USER_LINK_SPEED_8G:
4550 	case LPFC_USER_LINK_SPEED_16G:
4551 	case LPFC_USER_LINK_SPEED_32G:
4552 	case LPFC_USER_LINK_SPEED_64G:
4553 		prev_val = phba->cfg_link_speed;
4554 		phba->cfg_link_speed = val;
4555 		if (nolip)
4556 			return strlen(buf);
4557 
4558 		err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4559 		if (err) {
4560 			phba->cfg_link_speed = prev_val;
4561 			return -EINVAL;
4562 		}
4563 		return strlen(buf);
4564 	default:
4565 		break;
4566 	}
4567 
4568 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4569 			"0469 lpfc_link_speed attribute cannot be set to %d, "
4570 			"allowed values are [%s]\n",
4571 			val, LPFC_LINK_SPEED_STRING);
4572 	return -EINVAL;
4573 
4574 }
4575 
4576 static int lpfc_link_speed = 0;
4577 module_param(lpfc_link_speed, int, S_IRUGO);
4578 MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
lpfc_param_show(link_speed)4579 lpfc_param_show(link_speed)
4580 
4581 /**
4582  * lpfc_link_speed_init - Set the adapters link speed
4583  * @phba: lpfc_hba pointer.
4584  * @val: link speed value.
4585  *
4586  * Description:
4587  * If val is in a valid range then set the adapter's link speed field.
4588  *
4589  * Notes:
4590  * If the value is not in range log a kernel error message, clear the link
4591  * speed and return an error.
4592  *
4593  * Returns:
4594  * zero if val saved.
4595  * -EINVAL val out of range
4596  **/
4597 static int
4598 lpfc_link_speed_init(struct lpfc_hba *phba, int val)
4599 {
4600 	if (val >= LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
4601 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4602 			"3111 lpfc_link_speed of %d cannot "
4603 			"support loop mode, setting topology to default.\n",
4604 			 val);
4605 		phba->cfg_topology = 0;
4606 	}
4607 
4608 	switch (val) {
4609 	case LPFC_USER_LINK_SPEED_AUTO:
4610 	case LPFC_USER_LINK_SPEED_1G:
4611 	case LPFC_USER_LINK_SPEED_2G:
4612 	case LPFC_USER_LINK_SPEED_4G:
4613 	case LPFC_USER_LINK_SPEED_8G:
4614 	case LPFC_USER_LINK_SPEED_16G:
4615 	case LPFC_USER_LINK_SPEED_32G:
4616 	case LPFC_USER_LINK_SPEED_64G:
4617 		phba->cfg_link_speed = val;
4618 		return 0;
4619 	default:
4620 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4621 				"0405 lpfc_link_speed attribute cannot "
4622 				"be set to %d, allowed values are "
4623 				"["LPFC_LINK_SPEED_STRING"]\n", val);
4624 		phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
4625 		return -EINVAL;
4626 	}
4627 }
4628 
4629 static DEVICE_ATTR_RW(lpfc_link_speed);
4630 
4631 /*
4632 # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
4633 #       0  = aer disabled or not supported
4634 #       1  = aer supported and enabled (default)
4635 # Value range is [0,1]. Default value is 1.
4636 */
4637 LPFC_ATTR(aer_support, 1, 0, 1,
4638 	"Enable PCIe device AER support");
lpfc_param_show(aer_support)4639 lpfc_param_show(aer_support)
4640 
4641 /**
4642  * lpfc_aer_support_store - Set the adapter for aer support
4643  *
4644  * @dev: class device that is converted into a Scsi_host.
4645  * @attr: device attribute, not used.
4646  * @buf: containing enable or disable aer flag.
4647  * @count: unused variable.
4648  *
4649  * Description:
4650  * If the val is 1 and currently the device's AER capability was not
4651  * enabled, invoke the kernel's enable AER helper routine, trying to
4652  * enable the device's AER capability. If the helper routine enabling
4653  * AER returns success, update the device's cfg_aer_support flag to
4654  * indicate AER is supported by the device; otherwise, if the device
4655  * AER capability is already enabled to support AER, then do nothing.
4656  *
4657  * If the val is 0 and currently the device's AER support was enabled,
4658  * invoke the kernel's disable AER helper routine. After that, update
4659  * the device's cfg_aer_support flag to indicate AER is not supported
4660  * by the device; otherwise, if the device AER capability is already
4661  * disabled from supporting AER, then do nothing.
4662  *
4663  * Returns:
4664  * length of the buf on success if val is in range the intended mode
4665  * is supported.
4666  * -EINVAL if val out of range or intended mode is not supported.
4667  **/
4668 static ssize_t
4669 lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
4670 		       const char *buf, size_t count)
4671 {
4672 	struct Scsi_Host *shost = class_to_shost(dev);
4673 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4674 	struct lpfc_hba *phba = vport->phba;
4675 	int val = 0, rc = -EINVAL;
4676 
4677 	if (!isdigit(buf[0]))
4678 		return -EINVAL;
4679 	if (sscanf(buf, "%i", &val) != 1)
4680 		return -EINVAL;
4681 
4682 	switch (val) {
4683 	case 0:
4684 		if (phba->hba_flag & HBA_AER_ENABLED) {
4685 			rc = pci_disable_pcie_error_reporting(phba->pcidev);
4686 			if (!rc) {
4687 				spin_lock_irq(&phba->hbalock);
4688 				phba->hba_flag &= ~HBA_AER_ENABLED;
4689 				spin_unlock_irq(&phba->hbalock);
4690 				phba->cfg_aer_support = 0;
4691 				rc = strlen(buf);
4692 			} else
4693 				rc = -EPERM;
4694 		} else {
4695 			phba->cfg_aer_support = 0;
4696 			rc = strlen(buf);
4697 		}
4698 		break;
4699 	case 1:
4700 		if (!(phba->hba_flag & HBA_AER_ENABLED)) {
4701 			rc = pci_enable_pcie_error_reporting(phba->pcidev);
4702 			if (!rc) {
4703 				spin_lock_irq(&phba->hbalock);
4704 				phba->hba_flag |= HBA_AER_ENABLED;
4705 				spin_unlock_irq(&phba->hbalock);
4706 				phba->cfg_aer_support = 1;
4707 				rc = strlen(buf);
4708 			} else
4709 				 rc = -EPERM;
4710 		} else {
4711 			phba->cfg_aer_support = 1;
4712 			rc = strlen(buf);
4713 		}
4714 		break;
4715 	default:
4716 		rc = -EINVAL;
4717 		break;
4718 	}
4719 	return rc;
4720 }
4721 
4722 static DEVICE_ATTR_RW(lpfc_aer_support);
4723 
4724 /**
4725  * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
4726  * @dev: class device that is converted into a Scsi_host.
4727  * @attr: device attribute, not used.
4728  * @buf: containing flag 1 for aer cleanup state.
4729  * @count: unused variable.
4730  *
4731  * Description:
4732  * If the @buf contains 1 and the device currently has the AER support
4733  * enabled, then invokes the kernel AER helper routine
4734  * pci_aer_clear_nonfatal_status() to clean up the uncorrectable
4735  * error status register.
4736  *
4737  * Notes:
4738  *
4739  * Returns:
4740  * -EINVAL if the buf does not contain the 1 or the device is not currently
4741  * enabled with the AER support.
4742  **/
4743 static ssize_t
lpfc_aer_cleanup_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4744 lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
4745 		       const char *buf, size_t count)
4746 {
4747 	struct Scsi_Host  *shost = class_to_shost(dev);
4748 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4749 	struct lpfc_hba   *phba = vport->phba;
4750 	int val, rc = -1;
4751 
4752 	if (!isdigit(buf[0]))
4753 		return -EINVAL;
4754 	if (sscanf(buf, "%i", &val) != 1)
4755 		return -EINVAL;
4756 	if (val != 1)
4757 		return -EINVAL;
4758 
4759 	if (phba->hba_flag & HBA_AER_ENABLED)
4760 		rc = pci_aer_clear_nonfatal_status(phba->pcidev);
4761 
4762 	if (rc == 0)
4763 		return strlen(buf);
4764 	else
4765 		return -EPERM;
4766 }
4767 
4768 static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
4769 		   lpfc_aer_cleanup_state);
4770 
4771 /**
4772  * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
4773  *
4774  * @dev: class device that is converted into a Scsi_host.
4775  * @attr: device attribute, not used.
4776  * @buf: containing the string the number of vfs to be enabled.
4777  * @count: unused variable.
4778  *
4779  * Description:
4780  * When this api is called either through user sysfs, the driver shall
4781  * try to enable or disable SR-IOV virtual functions according to the
4782  * following:
4783  *
4784  * If zero virtual function has been enabled to the physical function,
4785  * the driver shall invoke the pci enable virtual function api trying
4786  * to enable the virtual functions. If the nr_vfn provided is greater
4787  * than the maximum supported, the maximum virtual function number will
4788  * be used for invoking the api; otherwise, the nr_vfn provided shall
4789  * be used for invoking the api. If the api call returned success, the
4790  * actual number of virtual functions enabled will be set to the driver
4791  * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
4792  * cfg_sriov_nr_virtfn remains zero.
4793  *
4794  * If none-zero virtual functions have already been enabled to the
4795  * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
4796  * -EINVAL will be returned and the driver does nothing;
4797  *
4798  * If the nr_vfn provided is zero and none-zero virtual functions have
4799  * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
4800  * disabling virtual function api shall be invoded to disable all the
4801  * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
4802  * zero. Otherwise, if zero virtual function has been enabled, do
4803  * nothing.
4804  *
4805  * Returns:
4806  * length of the buf on success if val is in range the intended mode
4807  * is supported.
4808  * -EINVAL if val out of range or intended mode is not supported.
4809  **/
4810 static ssize_t
lpfc_sriov_nr_virtfn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4811 lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
4812 			 const char *buf, size_t count)
4813 {
4814 	struct Scsi_Host *shost = class_to_shost(dev);
4815 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4816 	struct lpfc_hba *phba = vport->phba;
4817 	struct pci_dev *pdev = phba->pcidev;
4818 	int val = 0, rc = -EINVAL;
4819 
4820 	/* Sanity check on user data */
4821 	if (!isdigit(buf[0]))
4822 		return -EINVAL;
4823 	if (sscanf(buf, "%i", &val) != 1)
4824 		return -EINVAL;
4825 	if (val < 0)
4826 		return -EINVAL;
4827 
4828 	/* Request disabling virtual functions */
4829 	if (val == 0) {
4830 		if (phba->cfg_sriov_nr_virtfn > 0) {
4831 			pci_disable_sriov(pdev);
4832 			phba->cfg_sriov_nr_virtfn = 0;
4833 		}
4834 		return strlen(buf);
4835 	}
4836 
4837 	/* Request enabling virtual functions */
4838 	if (phba->cfg_sriov_nr_virtfn > 0) {
4839 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4840 				"3018 There are %d virtual functions "
4841 				"enabled on physical function.\n",
4842 				phba->cfg_sriov_nr_virtfn);
4843 		return -EEXIST;
4844 	}
4845 
4846 	if (val <= LPFC_MAX_VFN_PER_PFN)
4847 		phba->cfg_sriov_nr_virtfn = val;
4848 	else {
4849 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4850 				"3019 Enabling %d virtual functions is not "
4851 				"allowed.\n", val);
4852 		return -EINVAL;
4853 	}
4854 
4855 	rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
4856 	if (rc) {
4857 		phba->cfg_sriov_nr_virtfn = 0;
4858 		rc = -EPERM;
4859 	} else
4860 		rc = strlen(buf);
4861 
4862 	return rc;
4863 }
4864 
4865 LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
4866 	"Enable PCIe device SR-IOV virtual fn");
4867 
4868 lpfc_param_show(sriov_nr_virtfn)
4869 static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn);
4870 
4871 /**
4872  * lpfc_request_firmware_store - Request for Linux generic firmware upgrade
4873  *
4874  * @dev: class device that is converted into a Scsi_host.
4875  * @attr: device attribute, not used.
4876  * @buf: containing the string the number of vfs to be enabled.
4877  * @count: unused variable.
4878  *
4879  * Description:
4880  *
4881  * Returns:
4882  * length of the buf on success if val is in range the intended mode
4883  * is supported.
4884  * -EINVAL if val out of range or intended mode is not supported.
4885  **/
4886 static ssize_t
lpfc_request_firmware_upgrade_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4887 lpfc_request_firmware_upgrade_store(struct device *dev,
4888 				    struct device_attribute *attr,
4889 				    const char *buf, size_t count)
4890 {
4891 	struct Scsi_Host *shost = class_to_shost(dev);
4892 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4893 	struct lpfc_hba *phba = vport->phba;
4894 	int val = 0, rc;
4895 
4896 	/* Sanity check on user data */
4897 	if (!isdigit(buf[0]))
4898 		return -EINVAL;
4899 	if (sscanf(buf, "%i", &val) != 1)
4900 		return -EINVAL;
4901 	if (val != 1)
4902 		return -EINVAL;
4903 
4904 	rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE);
4905 	if (rc)
4906 		rc = -EPERM;
4907 	else
4908 		rc = strlen(buf);
4909 	return rc;
4910 }
4911 
4912 static int lpfc_req_fw_upgrade;
4913 module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR);
4914 MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade");
lpfc_param_show(request_firmware_upgrade)4915 lpfc_param_show(request_firmware_upgrade)
4916 
4917 /**
4918  * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade
4919  * @phba: lpfc_hba pointer.
4920  * @val: 0 or 1.
4921  *
4922  * Description:
4923  * Set the initial Linux generic firmware upgrade enable or disable flag.
4924  *
4925  * Returns:
4926  * zero if val saved.
4927  * -EINVAL val out of range
4928  **/
4929 static int
4930 lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val)
4931 {
4932 	if (val >= 0 && val <= 1) {
4933 		phba->cfg_request_firmware_upgrade = val;
4934 		return 0;
4935 	}
4936 	return -EINVAL;
4937 }
4938 static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR,
4939 		   lpfc_request_firmware_upgrade_show,
4940 		   lpfc_request_firmware_upgrade_store);
4941 
4942 /**
4943  * lpfc_force_rscn_store
4944  *
4945  * @dev: class device that is converted into a Scsi_host.
4946  * @attr: device attribute, not used.
4947  * @buf: unused string
4948  * @count: unused variable.
4949  *
4950  * Description:
4951  * Force the switch to send a RSCN to all other NPorts in our zone
4952  * If we are direct connect pt2pt, build the RSCN command ourself
4953  * and send to the other NPort. Not supported for private loop.
4954  *
4955  * Returns:
4956  * 0      - on success
4957  * -EIO   - if command is not sent
4958  **/
4959 static ssize_t
lpfc_force_rscn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4960 lpfc_force_rscn_store(struct device *dev, struct device_attribute *attr,
4961 		      const char *buf, size_t count)
4962 {
4963 	struct Scsi_Host *shost = class_to_shost(dev);
4964 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4965 	int i;
4966 
4967 	i = lpfc_issue_els_rscn(vport, 0);
4968 	if (i)
4969 		return -EIO;
4970 	return strlen(buf);
4971 }
4972 
4973 /*
4974  * lpfc_force_rscn: Force an RSCN to be sent to all remote NPorts
4975  * connected to  the HBA.
4976  *
4977  * Value range is any ascii value
4978  */
4979 static int lpfc_force_rscn;
4980 module_param(lpfc_force_rscn, int, 0644);
4981 MODULE_PARM_DESC(lpfc_force_rscn,
4982 		 "Force an RSCN to be sent to all remote NPorts");
lpfc_param_show(force_rscn)4983 lpfc_param_show(force_rscn)
4984 
4985 /**
4986  * lpfc_force_rscn_init - Force an RSCN to be sent to all remote NPorts
4987  * @phba: lpfc_hba pointer.
4988  * @val: unused value.
4989  *
4990  * Returns:
4991  * zero if val saved.
4992  **/
4993 static int
4994 lpfc_force_rscn_init(struct lpfc_hba *phba, int val)
4995 {
4996 	return 0;
4997 }
4998 static DEVICE_ATTR_RW(lpfc_force_rscn);
4999 
5000 /**
5001  * lpfc_fcp_imax_store
5002  *
5003  * @dev: class device that is converted into a Scsi_host.
5004  * @attr: device attribute, not used.
5005  * @buf: string with the number of fast-path FCP interrupts per second.
5006  * @count: unused variable.
5007  *
5008  * Description:
5009  * If val is in a valid range [636,651042], then set the adapter's
5010  * maximum number of fast-path FCP interrupts per second.
5011  *
5012  * Returns:
5013  * length of the buf on success if val is in range the intended mode
5014  * is supported.
5015  * -EINVAL if val out of range or intended mode is not supported.
5016  **/
5017 static ssize_t
lpfc_fcp_imax_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5018 lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
5019 			 const char *buf, size_t count)
5020 {
5021 	struct Scsi_Host *shost = class_to_shost(dev);
5022 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5023 	struct lpfc_hba *phba = vport->phba;
5024 	struct lpfc_eq_intr_info *eqi;
5025 	uint32_t usdelay;
5026 	int val = 0, i;
5027 
5028 	/* fcp_imax is only valid for SLI4 */
5029 	if (phba->sli_rev != LPFC_SLI_REV4)
5030 		return -EINVAL;
5031 
5032 	/* Sanity check on user data */
5033 	if (!isdigit(buf[0]))
5034 		return -EINVAL;
5035 	if (sscanf(buf, "%i", &val) != 1)
5036 		return -EINVAL;
5037 
5038 	/*
5039 	 * Value range for the HBA is [5000,5000000]
5040 	 * The value for each EQ depends on how many EQs are configured.
5041 	 * Allow value == 0
5042 	 */
5043 	if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX))
5044 		return -EINVAL;
5045 
5046 	phba->cfg_auto_imax = (val) ? 0 : 1;
5047 	if (phba->cfg_fcp_imax && !val) {
5048 		queue_delayed_work(phba->wq, &phba->eq_delay_work,
5049 				   msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
5050 
5051 		for_each_present_cpu(i) {
5052 			eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
5053 			eqi->icnt = 0;
5054 		}
5055 	}
5056 
5057 	phba->cfg_fcp_imax = (uint32_t)val;
5058 
5059 	if (phba->cfg_fcp_imax)
5060 		usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
5061 	else
5062 		usdelay = 0;
5063 
5064 	for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
5065 		lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
5066 					 usdelay);
5067 
5068 	return strlen(buf);
5069 }
5070 
5071 /*
5072 # lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
5073 # for the HBA.
5074 #
5075 # Value range is [5,000 to 5,000,000]. Default value is 50,000.
5076 */
5077 static int lpfc_fcp_imax = LPFC_DEF_IMAX;
5078 module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
5079 MODULE_PARM_DESC(lpfc_fcp_imax,
5080 	    "Set the maximum number of FCP interrupts per second per HBA");
lpfc_param_show(fcp_imax)5081 lpfc_param_show(fcp_imax)
5082 
5083 /**
5084  * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable
5085  * @phba: lpfc_hba pointer.
5086  * @val: link speed value.
5087  *
5088  * Description:
5089  * If val is in a valid range [636,651042], then initialize the adapter's
5090  * maximum number of fast-path FCP interrupts per second.
5091  *
5092  * Returns:
5093  * zero if val saved.
5094  * -EINVAL val out of range
5095  **/
5096 static int
5097 lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
5098 {
5099 	if (phba->sli_rev != LPFC_SLI_REV4) {
5100 		phba->cfg_fcp_imax = 0;
5101 		return 0;
5102 	}
5103 
5104 	if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) ||
5105 	    (val == 0)) {
5106 		phba->cfg_fcp_imax = val;
5107 		return 0;
5108 	}
5109 
5110 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5111 			"3016 lpfc_fcp_imax: %d out of range, using default\n",
5112 			val);
5113 	phba->cfg_fcp_imax = LPFC_DEF_IMAX;
5114 
5115 	return 0;
5116 }
5117 
5118 static DEVICE_ATTR_RW(lpfc_fcp_imax);
5119 
5120 /**
5121  * lpfc_cq_max_proc_limit_store
5122  *
5123  * @dev: class device that is converted into a Scsi_host.
5124  * @attr: device attribute, not used.
5125  * @buf: string with the cq max processing limit of cqes
5126  * @count: unused variable.
5127  *
5128  * Description:
5129  * If val is in a valid range, then set value on each cq
5130  *
5131  * Returns:
5132  * The length of the buf: if successful
5133  * -ERANGE: if val is not in the valid range
5134  * -EINVAL: if bad value format or intended mode is not supported.
5135  **/
5136 static ssize_t
lpfc_cq_max_proc_limit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5137 lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr,
5138 			 const char *buf, size_t count)
5139 {
5140 	struct Scsi_Host *shost = class_to_shost(dev);
5141 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5142 	struct lpfc_hba *phba = vport->phba;
5143 	struct lpfc_queue *eq, *cq;
5144 	unsigned long val;
5145 	int i;
5146 
5147 	/* cq_max_proc_limit is only valid for SLI4 */
5148 	if (phba->sli_rev != LPFC_SLI_REV4)
5149 		return -EINVAL;
5150 
5151 	/* Sanity check on user data */
5152 	if (!isdigit(buf[0]))
5153 		return -EINVAL;
5154 	if (kstrtoul(buf, 0, &val))
5155 		return -EINVAL;
5156 
5157 	if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT)
5158 		return -ERANGE;
5159 
5160 	phba->cfg_cq_max_proc_limit = (uint32_t)val;
5161 
5162 	/* set the values on the cq's */
5163 	for (i = 0; i < phba->cfg_irq_chann; i++) {
5164 		/* Get the EQ corresponding to the IRQ vector */
5165 		eq = phba->sli4_hba.hba_eq_hdl[i].eq;
5166 		if (!eq)
5167 			continue;
5168 
5169 		list_for_each_entry(cq, &eq->child_list, list)
5170 			cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
5171 						 cq->entry_count);
5172 	}
5173 
5174 	return strlen(buf);
5175 }
5176 
5177 /*
5178  * lpfc_cq_max_proc_limit: The maximum number CQE entries processed in an
5179  *   itteration of CQ processing.
5180  */
5181 static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5182 module_param(lpfc_cq_max_proc_limit, int, 0644);
5183 MODULE_PARM_DESC(lpfc_cq_max_proc_limit,
5184 	    "Set the maximum number CQEs processed in an iteration of "
5185 	    "CQ processing");
5186 lpfc_param_show(cq_max_proc_limit)
5187 
5188 /*
5189  * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a
5190  *   single handler call which should request a polled completion rather
5191  *   than re-enabling interrupts.
5192  */
5193 LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL,
5194 	     LPFC_CQ_MIN_THRESHOLD_TO_POLL,
5195 	     LPFC_CQ_MAX_THRESHOLD_TO_POLL,
5196 	     "CQE Processing Threshold to enable Polling");
5197 
5198 /**
5199  * lpfc_cq_max_proc_limit_init - Set the initial cq max_proc_limit
5200  * @phba: lpfc_hba pointer.
5201  * @val: entry limit
5202  *
5203  * Description:
5204  * If val is in a valid range, then initialize the adapter's maximum
5205  * value.
5206  *
5207  * Returns:
5208  *  Always returns 0 for success, even if value not always set to
5209  *  requested value. If value out of range or not supported, will fall
5210  *  back to default.
5211  **/
5212 static int
lpfc_cq_max_proc_limit_init(struct lpfc_hba * phba,int val)5213 lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val)
5214 {
5215 	phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5216 
5217 	if (phba->sli_rev != LPFC_SLI_REV4)
5218 		return 0;
5219 
5220 	if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) {
5221 		phba->cfg_cq_max_proc_limit = val;
5222 		return 0;
5223 	}
5224 
5225 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5226 			"0371 "LPFC_DRIVER_NAME"_cq_max_proc_limit: "
5227 			"%d out of range, using default\n",
5228 			phba->cfg_cq_max_proc_limit);
5229 
5230 	return 0;
5231 }
5232 
5233 static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit);
5234 
5235 /**
5236  * lpfc_state_show - Display current driver CPU affinity
5237  * @dev: class converted to a Scsi_host structure.
5238  * @attr: device attribute, not used.
5239  * @buf: on return contains text describing the state of the link.
5240  *
5241  * Returns: size of formatted string.
5242  **/
5243 static ssize_t
lpfc_fcp_cpu_map_show(struct device * dev,struct device_attribute * attr,char * buf)5244 lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
5245 		      char *buf)
5246 {
5247 	struct Scsi_Host  *shost = class_to_shost(dev);
5248 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5249 	struct lpfc_hba   *phba = vport->phba;
5250 	struct lpfc_vector_map_info *cpup;
5251 	int  len = 0;
5252 
5253 	if ((phba->sli_rev != LPFC_SLI_REV4) ||
5254 	    (phba->intr_type != MSIX))
5255 		return len;
5256 
5257 	switch (phba->cfg_fcp_cpu_map) {
5258 	case 0:
5259 		len += scnprintf(buf + len, PAGE_SIZE-len,
5260 				"fcp_cpu_map: No mapping (%d)\n",
5261 				phba->cfg_fcp_cpu_map);
5262 		return len;
5263 	case 1:
5264 		len += scnprintf(buf + len, PAGE_SIZE-len,
5265 				"fcp_cpu_map: HBA centric mapping (%d): "
5266 				"%d of %d CPUs online from %d possible CPUs\n",
5267 				phba->cfg_fcp_cpu_map, num_online_cpus(),
5268 				num_present_cpus(),
5269 				phba->sli4_hba.num_possible_cpu);
5270 		break;
5271 	}
5272 
5273 	while (phba->sli4_hba.curr_disp_cpu <
5274 	       phba->sli4_hba.num_possible_cpu) {
5275 		cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
5276 
5277 		if (!cpu_present(phba->sli4_hba.curr_disp_cpu))
5278 			len += scnprintf(buf + len, PAGE_SIZE - len,
5279 					"CPU %02d not present\n",
5280 					phba->sli4_hba.curr_disp_cpu);
5281 		else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
5282 			if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5283 				len += scnprintf(
5284 					buf + len, PAGE_SIZE - len,
5285 					"CPU %02d hdwq None "
5286 					"physid %d coreid %d ht %d ua %d\n",
5287 					phba->sli4_hba.curr_disp_cpu,
5288 					cpup->phys_id, cpup->core_id,
5289 					(cpup->flag & LPFC_CPU_MAP_HYPER),
5290 					(cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5291 			else
5292 				len += scnprintf(
5293 					buf + len, PAGE_SIZE - len,
5294 					"CPU %02d EQ None hdwq %04d "
5295 					"physid %d coreid %d ht %d ua %d\n",
5296 					phba->sli4_hba.curr_disp_cpu,
5297 					cpup->hdwq, cpup->phys_id,
5298 					cpup->core_id,
5299 					(cpup->flag & LPFC_CPU_MAP_HYPER),
5300 					(cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5301 		} else {
5302 			if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5303 				len += scnprintf(
5304 					buf + len, PAGE_SIZE - len,
5305 					"CPU %02d hdwq None "
5306 					"physid %d coreid %d ht %d ua %d IRQ %d\n",
5307 					phba->sli4_hba.curr_disp_cpu,
5308 					cpup->phys_id,
5309 					cpup->core_id,
5310 					(cpup->flag & LPFC_CPU_MAP_HYPER),
5311 					(cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5312 					lpfc_get_irq(cpup->eq));
5313 			else
5314 				len += scnprintf(
5315 					buf + len, PAGE_SIZE - len,
5316 					"CPU %02d EQ %04d hdwq %04d "
5317 					"physid %d coreid %d ht %d ua %d IRQ %d\n",
5318 					phba->sli4_hba.curr_disp_cpu,
5319 					cpup->eq, cpup->hdwq, cpup->phys_id,
5320 					cpup->core_id,
5321 					(cpup->flag & LPFC_CPU_MAP_HYPER),
5322 					(cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5323 					lpfc_get_irq(cpup->eq));
5324 		}
5325 
5326 		phba->sli4_hba.curr_disp_cpu++;
5327 
5328 		/* display max number of CPUs keeping some margin */
5329 		if (phba->sli4_hba.curr_disp_cpu <
5330 				phba->sli4_hba.num_possible_cpu &&
5331 				(len >= (PAGE_SIZE - 64))) {
5332 			len += scnprintf(buf + len,
5333 					PAGE_SIZE - len, "more...\n");
5334 			break;
5335 		}
5336 	}
5337 
5338 	if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu)
5339 		phba->sli4_hba.curr_disp_cpu = 0;
5340 
5341 	return len;
5342 }
5343 
5344 /**
5345  * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
5346  * @dev: class device that is converted into a Scsi_host.
5347  * @attr: device attribute, not used.
5348  * @buf: one or more lpfc_polling_flags values.
5349  * @count: not used.
5350  *
5351  * Returns:
5352  * -EINVAL  - Not implemented yet.
5353  **/
5354 static ssize_t
lpfc_fcp_cpu_map_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5355 lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
5356 		       const char *buf, size_t count)
5357 {
5358 	return -EINVAL;
5359 }
5360 
5361 /*
5362 # lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
5363 # for the HBA.
5364 #
5365 # Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1).
5366 #	0 - Do not affinitze IRQ vectors
5367 #	1 - Affintize HBA vectors with respect to each HBA
5368 #	    (start with CPU0 for each HBA)
5369 # This also defines how Hardware Queues are mapped to specific CPUs.
5370 */
5371 static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5372 module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
5373 MODULE_PARM_DESC(lpfc_fcp_cpu_map,
5374 		 "Defines how to map CPUs to IRQ vectors per HBA");
5375 
5376 /**
5377  * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
5378  * @phba: lpfc_hba pointer.
5379  * @val: link speed value.
5380  *
5381  * Description:
5382  * If val is in a valid range [0-2], then affinitze the adapter's
5383  * MSIX vectors.
5384  *
5385  * Returns:
5386  * zero if val saved.
5387  * -EINVAL val out of range
5388  **/
5389 static int
lpfc_fcp_cpu_map_init(struct lpfc_hba * phba,int val)5390 lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
5391 {
5392 	if (phba->sli_rev != LPFC_SLI_REV4) {
5393 		phba->cfg_fcp_cpu_map = 0;
5394 		return 0;
5395 	}
5396 
5397 	if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
5398 		phba->cfg_fcp_cpu_map = val;
5399 		return 0;
5400 	}
5401 
5402 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5403 			"3326 lpfc_fcp_cpu_map: %d out of range, using "
5404 			"default\n", val);
5405 	phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5406 
5407 	return 0;
5408 }
5409 
5410 static DEVICE_ATTR_RW(lpfc_fcp_cpu_map);
5411 
5412 /*
5413 # lpfc_fcp_class:  Determines FC class to use for the FCP protocol.
5414 # Value range is [2,3]. Default value is 3.
5415 */
5416 LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
5417 		  "Select Fibre Channel class of service for FCP sequences");
5418 
5419 /*
5420 # lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
5421 # is [0,1]. Default value is 0.
5422 */
5423 LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
5424 		   "Use ADISC on rediscovery to authenticate FCP devices");
5425 
5426 /*
5427 # lpfc_first_burst_size: First burst size to use on the NPorts
5428 # that support first burst.
5429 # Value range is [0,65536]. Default value is 0.
5430 */
5431 LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
5432 		   "First burst size for Targets that support first burst");
5433 
5434 /*
5435 * lpfc_nvmet_fb_size: NVME Target mode supported first burst size.
5436 * When the driver is configured as an NVME target, this value is
5437 * communicated to the NVME initiator in the PRLI response.  It is
5438 * used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support
5439 * parameters are set and the target is sending the PRLI RSP.
5440 * Parameter supported on physical port only - no NPIV support.
5441 * Value range is [0,65536]. Default value is 0.
5442 */
5443 LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
5444 	     "NVME Target mode first burst size in 512B increments.");
5445 
5446 /*
5447  * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
5448  * For the Initiator (I), enabling this parameter means that an NVMET
5449  * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
5450  * processed by the initiator for subsequent NVME FCP IO.
5451  * Currently, this feature is not supported on the NVME target
5452  * Value range is [0,1]. Default value is 0 (disabled).
5453  */
5454 LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
5455 	     "Enable First Burst feature for NVME Initiator.");
5456 
5457 /*
5458 # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
5459 # depth. Default value is 0. When the value of this parameter is zero the
5460 # SCSI command completion time is not used for controlling I/O queue depth. When
5461 # the parameter is set to a non-zero value, the I/O queue depth is controlled
5462 # to limit the I/O completion time to the parameter value.
5463 # The value is set in milliseconds.
5464 */
5465 LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000,
5466 	"Use command completion time to control queue depth");
5467 
5468 lpfc_vport_param_show(max_scsicmpl_time);
5469 static int
lpfc_max_scsicmpl_time_set(struct lpfc_vport * vport,int val)5470 lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
5471 {
5472 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5473 	struct lpfc_nodelist *ndlp, *next_ndlp;
5474 
5475 	if (val == vport->cfg_max_scsicmpl_time)
5476 		return 0;
5477 	if ((val < 0) || (val > 60000))
5478 		return -EINVAL;
5479 	vport->cfg_max_scsicmpl_time = val;
5480 
5481 	spin_lock_irq(shost->host_lock);
5482 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
5483 		if (!NLP_CHK_NODE_ACT(ndlp))
5484 			continue;
5485 		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
5486 			continue;
5487 		ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
5488 	}
5489 	spin_unlock_irq(shost->host_lock);
5490 	return 0;
5491 }
5492 lpfc_vport_param_store(max_scsicmpl_time);
5493 static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time);
5494 
5495 /*
5496 # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
5497 # range is [0,1]. Default value is 0.
5498 */
5499 LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
5500 
5501 /*
5502 # lpfc_xri_rebalancing: enable or disable XRI rebalancing feature
5503 # range is [0,1]. Default value is 1.
5504 */
5505 LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing");
5506 
5507 /*
5508  * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds
5509  * range is [0,1]. Default value is 0.
5510  * For [0], FCP commands are issued to Work Queues based on upper layer
5511  * hardware queue index.
5512  * For [1], FCP commands are issued to a Work Queue associated with the
5513  *          current CPU.
5514  *
5515  * LPFC_FCP_SCHED_BY_HDWQ == 0
5516  * LPFC_FCP_SCHED_BY_CPU == 1
5517  *
5518  * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu
5519  * affinity for FCP/NVME I/Os through Work Queues associated with the current
5520  * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os
5521  * through WQs will be used.
5522  */
5523 LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU,
5524 	     LPFC_FCP_SCHED_BY_HDWQ,
5525 	     LPFC_FCP_SCHED_BY_CPU,
5526 	     "Determine scheduling algorithm for "
5527 	     "issuing commands [0] - Hardware Queue, [1] - Current CPU");
5528 
5529 /*
5530  * lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN
5531  * range is [0,1]. Default value is 0.
5532  * For [0], GID_FT is used for NameServer queries after RSCN (default)
5533  * For [1], GID_PT is used for NameServer queries after RSCN
5534  *
5535  */
5536 LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT,
5537 	     LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT,
5538 	     "Determine algorithm NameServer queries after RSCN "
5539 	     "[0] - GID_FT, [1] - GID_PT");
5540 
5541 /*
5542 # lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
5543 # range is [0,1]. Default value is 0.
5544 # For [0], bus reset issues target reset to ALL devices
5545 # For [1], bus reset issues target reset to non-FCP2 devices
5546 */
5547 LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for "
5548 	     "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset");
5549 
5550 
5551 /*
5552 # lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
5553 # cr_delay (msec) or cr_count outstanding commands. cr_delay can take
5554 # value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
5555 # is 0. Default value of cr_count is 1. The cr_count feature is disabled if
5556 # cr_delay is set to 0.
5557 */
5558 LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an "
5559 		"interrupt response is generated");
5560 
5561 LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an "
5562 		"interrupt response is generated");
5563 
5564 /*
5565 # lpfc_multi_ring_support:  Determines how many rings to spread available
5566 # cmd/rsp IOCB entries across.
5567 # Value range is [1,2]. Default value is 1.
5568 */
5569 LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
5570 		"SLI rings to spread IOCB entries across");
5571 
5572 /*
5573 # lpfc_multi_ring_rctl:  If lpfc_multi_ring_support is enabled, this
5574 # identifies what rctl value to configure the additional ring for.
5575 # Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
5576 */
5577 LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1,
5578 	     255, "Identifies RCTL for additional ring configuration");
5579 
5580 /*
5581 # lpfc_multi_ring_type:  If lpfc_multi_ring_support is enabled, this
5582 # identifies what type value to configure the additional ring for.
5583 # Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
5584 */
5585 LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
5586 	     255, "Identifies TYPE for additional ring configuration");
5587 
5588 /*
5589 # lpfc_enable_SmartSAN: Sets up FDMI support for SmartSAN
5590 #       0  = SmartSAN functionality disabled (default)
5591 #       1  = SmartSAN functionality enabled
5592 # This parameter will override the value of lpfc_fdmi_on module parameter.
5593 # Value range is [0,1]. Default value is 0.
5594 */
5595 LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
5596 
5597 /*
5598 # lpfc_fdmi_on: Controls FDMI support.
5599 #       0       No FDMI support
5600 #       1       Traditional FDMI support (default)
5601 # Traditional FDMI support means the driver will assume FDMI-2 support;
5602 # however, if that fails, it will fallback to FDMI-1.
5603 # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.
5604 # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of
5605 # lpfc_fdmi_on.
5606 # Value range [0,1]. Default value is 1.
5607 */
5608 LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support");
5609 
5610 /*
5611 # Specifies the maximum number of ELS cmds we can have outstanding (for
5612 # discovery). Value range is [1,64]. Default value = 32.
5613 */
5614 LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
5615 		 "during discovery");
5616 
5617 /*
5618 # lpfc_max_luns: maximum allowed LUN ID. This is the highest LUN ID that
5619 #    will be scanned by the SCSI midlayer when sequential scanning is
5620 #    used; and is also the highest LUN ID allowed when the SCSI midlayer
5621 #    parses REPORT_LUN responses. The lpfc driver has no LUN count or
5622 #    LUN ID limit, but the SCSI midlayer requires this field for the uses
5623 #    above. The lpfc driver limits the default value to 255 for two reasons.
5624 #    As it bounds the sequential scan loop, scanning for thousands of luns
5625 #    on a target can take minutes of wall clock time.  Additionally,
5626 #    there are FC targets, such as JBODs, that only recognize 8-bits of
5627 #    LUN ID. When they receive a value greater than 8 bits, they chop off
5628 #    the high order bits. In other words, they see LUN IDs 0, 256, 512,
5629 #    and so on all as LUN ID 0. This causes the linux kernel, which sees
5630 #    valid responses at each of the LUN IDs, to believe there are multiple
5631 #    devices present, when in fact, there is only 1.
5632 #    A customer that is aware of their target behaviors, and the results as
5633 #    indicated above, is welcome to increase the lpfc_max_luns value.
5634 #    As mentioned, this value is not used by the lpfc driver, only the
5635 #    SCSI midlayer.
5636 # Value range is [0,65535]. Default value is 255.
5637 # NOTE: The SCSI layer might probe all allowed LUN on some old targets.
5638 */
5639 LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID");
5640 
5641 /*
5642 # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
5643 # Value range is [1,255], default value is 10.
5644 */
5645 LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
5646 	     "Milliseconds driver will wait between polling FCP ring");
5647 
5648 /*
5649 # lpfc_task_mgmt_tmo: Maximum time to wait for task management commands
5650 # to complete in seconds. Value range is [5,180], default value is 60.
5651 */
5652 LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
5653 	     "Maximum time to wait for task management commands to complete");
5654 /*
5655 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
5656 #		support this feature
5657 #       0  = MSI disabled
5658 #       1  = MSI enabled
5659 #       2  = MSI-X enabled (default)
5660 # Value range is [0,2]. Default value is 2.
5661 */
5662 LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
5663 	    "MSI-X (2), if possible");
5664 
5665 /*
5666  * lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs
5667  *
5668  *      0  = NVME OAS disabled
5669  *      1  = NVME OAS enabled
5670  *
5671  * Value range is [0,1]. Default value is 0.
5672  */
5673 LPFC_ATTR_RW(nvme_oas, 0, 0, 1,
5674 	     "Use OAS bit on NVME IOs");
5675 
5676 /*
5677  * lpfc_nvme_embed_cmd: Use the oas bit when sending NVME/NVMET IOs
5678  *
5679  *      0  = Put NVME Command in SGL
5680  *      1  = Embed NVME Command in WQE (unless G7)
5681  *      2 =  Embed NVME Command in WQE (force)
5682  *
5683  * Value range is [0,2]. Default value is 1.
5684  */
5685 LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
5686 	     "Embed NVME Command in WQE");
5687 
5688 /*
5689  * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
5690  * the driver will advertise it supports to the SCSI layer.
5691  *
5692  *      0    = Set nr_hw_queues by the number of CPUs or HW queues.
5693  *      1,256 = Manually specify nr_hw_queue value to be advertised,
5694  *
5695  * Value range is [0,256]. Default value is 8.
5696  */
5697 LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
5698 	    LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
5699 	    "Set the number of SCSI Queues advertised");
5700 
5701 /*
5702  * lpfc_hdw_queue: Set the number of Hardware Queues the driver
5703  * will advertise it supports to the NVME and  SCSI layers. This also
5704  * will map to the number of CQ/WQ pairs the driver will create.
5705  *
5706  * The NVME Layer will try to create this many, plus 1 administrative
5707  * hardware queue. The administrative queue will always map to WQ 0
5708  * A hardware IO queue maps (qidx) to a specific driver CQ/WQ.
5709  *
5710  *      0    = Configure the number of hdw queues to the number of active CPUs.
5711  *      1,256 = Manually specify how many hdw queues to use.
5712  *
5713  * Value range is [0,256]. Default value is 0.
5714  */
5715 LPFC_ATTR_R(hdw_queue,
5716 	    LPFC_HBA_HDWQ_DEF,
5717 	    LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
5718 	    "Set the number of I/O Hardware Queues");
5719 
5720 #if IS_ENABLED(CONFIG_X86)
5721 /**
5722  * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on
5723  *				irq_chann_mode
5724  * @phba: Pointer to HBA context object.
5725  **/
5726 static void
lpfc_cpumask_irq_mode_init(struct lpfc_hba * phba)5727 lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba)
5728 {
5729 	unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE;
5730 	const struct cpumask *sibling_mask;
5731 	struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask;
5732 
5733 	cpumask_clear(aff_mask);
5734 
5735 	if (phba->irq_chann_mode == NUMA_MODE) {
5736 		/* Check if we're a NUMA architecture */
5737 		numa_node = dev_to_node(&phba->pcidev->dev);
5738 		if (numa_node == NUMA_NO_NODE) {
5739 			phba->irq_chann_mode = NORMAL_MODE;
5740 			return;
5741 		}
5742 	}
5743 
5744 	for_each_possible_cpu(cpu) {
5745 		switch (phba->irq_chann_mode) {
5746 		case NUMA_MODE:
5747 			if (cpu_to_node(cpu) == numa_node)
5748 				cpumask_set_cpu(cpu, aff_mask);
5749 			break;
5750 		case NHT_MODE:
5751 			sibling_mask = topology_sibling_cpumask(cpu);
5752 			first_cpu = cpumask_first(sibling_mask);
5753 			if (first_cpu < nr_cpu_ids)
5754 				cpumask_set_cpu(first_cpu, aff_mask);
5755 			break;
5756 		default:
5757 			break;
5758 		}
5759 	}
5760 }
5761 #endif
5762 
5763 static void
lpfc_assign_default_irq_chann(struct lpfc_hba * phba)5764 lpfc_assign_default_irq_chann(struct lpfc_hba *phba)
5765 {
5766 #if IS_ENABLED(CONFIG_X86)
5767 	switch (boot_cpu_data.x86_vendor) {
5768 	case X86_VENDOR_AMD:
5769 		/* If AMD architecture, then default is NUMA_MODE */
5770 		phba->irq_chann_mode = NUMA_MODE;
5771 		break;
5772 	case X86_VENDOR_INTEL:
5773 		/* If Intel architecture, then default is no hyperthread mode */
5774 		phba->irq_chann_mode = NHT_MODE;
5775 		break;
5776 	default:
5777 		phba->irq_chann_mode = NORMAL_MODE;
5778 		break;
5779 	}
5780 	lpfc_cpumask_irq_mode_init(phba);
5781 #else
5782 	phba->irq_chann_mode = NORMAL_MODE;
5783 #endif
5784 }
5785 
5786 /*
5787  * lpfc_irq_chann: Set the number of IRQ vectors that are available
5788  * for Hardware Queues to utilize.  This also will map to the number
5789  * of EQ / MSI-X vectors the driver will create. This should never be
5790  * more than the number of Hardware Queues
5791  *
5792  *	0		= Configure number of IRQ Channels to:
5793  *			  if AMD architecture, number of CPUs on HBA's NUMA node
5794  *			  if Intel architecture, number of physical CPUs.
5795  *			  otherwise, number of active CPUs.
5796  *	[1,256]		= Manually specify how many IRQ Channels to use.
5797  *
5798  * Value range is [0,256]. Default value is [0].
5799  */
5800 static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF;
5801 module_param(lpfc_irq_chann, uint, 0444);
5802 MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
5803 
5804 /* lpfc_irq_chann_init - Set the hba irq_chann initial value
5805  * @phba: lpfc_hba pointer.
5806  * @val: contains the initial value
5807  *
5808  * Description:
5809  * Validates the initial value is within range and assigns it to the
5810  * adapter. If not in range, an error message is posted and the
5811  * default value is assigned.
5812  *
5813  * Returns:
5814  * zero if value is in range and is set
5815  * -EINVAL if value was out of range
5816  **/
5817 static int
lpfc_irq_chann_init(struct lpfc_hba * phba,uint32_t val)5818 lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
5819 {
5820 	const struct cpumask *aff_mask;
5821 
5822 	if (phba->cfg_use_msi != 2) {
5823 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5824 				"8532 use_msi = %u ignoring cfg_irq_numa\n",
5825 				phba->cfg_use_msi);
5826 		phba->irq_chann_mode = NORMAL_MODE;
5827 		phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5828 		return 0;
5829 	}
5830 
5831 	/* Check if default setting was passed */
5832 	if (val == LPFC_IRQ_CHANN_DEF)
5833 		lpfc_assign_default_irq_chann(phba);
5834 
5835 	if (phba->irq_chann_mode != NORMAL_MODE) {
5836 		aff_mask = &phba->sli4_hba.irq_aff_mask;
5837 
5838 		if (cpumask_empty(aff_mask)) {
5839 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5840 					"8533 Could not identify CPUS for "
5841 					"mode %d, ignoring\n",
5842 					phba->irq_chann_mode);
5843 			phba->irq_chann_mode = NORMAL_MODE;
5844 			phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5845 		} else {
5846 			phba->cfg_irq_chann = cpumask_weight(aff_mask);
5847 
5848 			/* If no hyperthread mode, then set hdwq count to
5849 			 * aff_mask weight as well
5850 			 */
5851 			if (phba->irq_chann_mode == NHT_MODE)
5852 				phba->cfg_hdw_queue = phba->cfg_irq_chann;
5853 
5854 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5855 					"8543 lpfc_irq_chann set to %u "
5856 					"(mode: %d)\n", phba->cfg_irq_chann,
5857 					phba->irq_chann_mode);
5858 		}
5859 	} else {
5860 		if (val > LPFC_IRQ_CHANN_MAX) {
5861 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5862 					"8545 lpfc_irq_chann attribute cannot "
5863 					"be set to %u, allowed range is "
5864 					"[%u,%u]\n",
5865 					val,
5866 					LPFC_IRQ_CHANN_MIN,
5867 					LPFC_IRQ_CHANN_MAX);
5868 			phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5869 			return -EINVAL;
5870 		}
5871 		phba->cfg_irq_chann = val;
5872 	}
5873 
5874 	return 0;
5875 }
5876 
5877 /**
5878  * lpfc_irq_chann_show - Display value of irq_chann
5879  * @dev: class converted to a Scsi_host structure.
5880  * @attr: device attribute, not used.
5881  * @buf: on return contains a string with the list sizes
5882  *
5883  * Returns: size of formatted string.
5884  **/
5885 static ssize_t
lpfc_irq_chann_show(struct device * dev,struct device_attribute * attr,char * buf)5886 lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr,
5887 		    char *buf)
5888 {
5889 	struct Scsi_Host *shost = class_to_shost(dev);
5890 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5891 	struct lpfc_hba *phba = vport->phba;
5892 
5893 	return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann);
5894 }
5895 
5896 static DEVICE_ATTR_RO(lpfc_irq_chann);
5897 
5898 /*
5899 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
5900 #       0  = HBA resets disabled
5901 #       1  = HBA resets enabled (default)
5902 #       2  = HBA reset via PCI bus reset enabled
5903 # Value range is [0,2]. Default value is 1.
5904 */
5905 LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver.");
5906 
5907 /*
5908 # lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
5909 #       0  = HBA Heartbeat disabled
5910 #       1  = HBA Heartbeat enabled (default)
5911 # Value range is [0,1]. Default value is 1.
5912 */
5913 LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
5914 
5915 /*
5916 # lpfc_EnableXLane: Enable Express Lane Feature
5917 #      0x0   Express Lane Feature disabled
5918 #      0x1   Express Lane Feature enabled
5919 # Value range is [0,1]. Default value is 0.
5920 */
5921 LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
5922 
5923 /*
5924 # lpfc_XLanePriority:  Define CS_CTL priority for Express Lane Feature
5925 #       0x0 - 0x7f  = CS_CTL field in FC header (high 7 bits)
5926 # Value range is [0x0,0x7f]. Default value is 0
5927 */
5928 LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
5929 
5930 /*
5931 # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
5932 #       0  = BlockGuard disabled (default)
5933 #       1  = BlockGuard enabled
5934 # Value range is [0,1]. Default value is 0.
5935 */
5936 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
5937 
5938 /*
5939 # lpfc_prot_mask: i
5940 #	- Bit mask of host protection capabilities used to register with the
5941 #	  SCSI mid-layer
5942 # 	- Only meaningful if BG is turned on (lpfc_enable_bg=1).
5943 #	- Allows you to ultimately specify which profiles to use
5944 #	- Default will result in registering capabilities for all profiles.
5945 #	- SHOST_DIF_TYPE1_PROTECTION	1
5946 #		HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection
5947 #	- SHOST_DIX_TYPE0_PROTECTION	8
5948 #		HBA supports DIX Type 0: Host to HBA protection only
5949 #	- SHOST_DIX_TYPE1_PROTECTION	16
5950 #		HBA supports DIX Type 1: Host to HBA  Type 1 protection
5951 #
5952 */
5953 LPFC_ATTR(prot_mask,
5954 	(SHOST_DIF_TYPE1_PROTECTION |
5955 	SHOST_DIX_TYPE0_PROTECTION |
5956 	SHOST_DIX_TYPE1_PROTECTION),
5957 	0,
5958 	(SHOST_DIF_TYPE1_PROTECTION |
5959 	SHOST_DIX_TYPE0_PROTECTION |
5960 	SHOST_DIX_TYPE1_PROTECTION),
5961 	"T10-DIF host protection capabilities mask");
5962 
5963 /*
5964 # lpfc_prot_guard: i
5965 #	- Bit mask of protection guard types to register with the SCSI mid-layer
5966 #	- Guard types are currently either 1) T10-DIF CRC 2) IP checksum
5967 #	- Allows you to ultimately specify which profiles to use
5968 #	- Default will result in registering capabilities for all guard types
5969 #
5970 */
5971 LPFC_ATTR(prot_guard,
5972 	SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP,
5973 	"T10-DIF host protection guard type");
5974 
5975 /*
5976  * Delay initial NPort discovery when Clean Address bit is cleared in
5977  * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed.
5978  * This parameter can have value 0 or 1.
5979  * When this parameter is set to 0, no delay is added to the initial
5980  * discovery.
5981  * When this parameter is set to non-zero value, initial Nport discovery is
5982  * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC
5983  * accept and FCID/Fabric name/Fabric portname is changed.
5984  * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion
5985  * when Clean Address bit is cleared in FLOGI/FDISC
5986  * accept and FCID/Fabric name/Fabric portname is changed.
5987  * Default value is 0.
5988  */
5989 LPFC_ATTR(delay_discovery, 0, 0, 1,
5990 	"Delay NPort discovery when Clean Address bit is cleared.");
5991 
5992 /*
5993  * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
5994  * This value can be set to values between 64 and 4096. The default value
5995  * is 64, but may be increased to allow for larger Max I/O sizes. The scsi
5996  * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE).
5997  * Because of the additional overhead involved in setting up T10-DIF,
5998  * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
5999  * and will be limited to 512 if BlockGuard is enabled under SLI3.
6000  */
6001 static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
6002 module_param(lpfc_sg_seg_cnt, uint, 0444);
6003 MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count");
6004 
6005 /**
6006  * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes
6007  *    configured for the adapter
6008  * @dev: class converted to a Scsi_host structure.
6009  * @attr: device attribute, not used.
6010  * @buf: on return contains a string with the list sizes
6011  *
6012  * Returns: size of formatted string.
6013  **/
6014 static ssize_t
lpfc_sg_seg_cnt_show(struct device * dev,struct device_attribute * attr,char * buf)6015 lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
6016 		     char *buf)
6017 {
6018 	struct Scsi_Host  *shost = class_to_shost(dev);
6019 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6020 	struct lpfc_hba   *phba = vport->phba;
6021 	int len;
6022 
6023 	len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d  total SGEs: %d\n",
6024 		       phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
6025 
6026 	len += scnprintf(buf + len, PAGE_SIZE - len,
6027 			"Cfg: %d  SCSI: %d  NVME: %d\n",
6028 			phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
6029 			phba->cfg_nvme_seg_cnt);
6030 	return len;
6031 }
6032 
6033 static DEVICE_ATTR_RO(lpfc_sg_seg_cnt);
6034 
6035 /**
6036  * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value
6037  * @phba: lpfc_hba pointer.
6038  * @val: contains the initial value
6039  *
6040  * Description:
6041  * Validates the initial value is within range and assigns it to the
6042  * adapter. If not in range, an error message is posted and the
6043  * default value is assigned.
6044  *
6045  * Returns:
6046  * zero if value is in range and is set
6047  * -EINVAL if value was out of range
6048  **/
6049 static int
lpfc_sg_seg_cnt_init(struct lpfc_hba * phba,int val)6050 lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
6051 {
6052 	if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) {
6053 		phba->cfg_sg_seg_cnt = val;
6054 		return 0;
6055 	}
6056 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6057 			"0409 "LPFC_DRIVER_NAME"_sg_seg_cnt attribute cannot "
6058 			"be set to %d, allowed range is [%d, %d]\n",
6059 			val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
6060 	phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
6061 	return -EINVAL;
6062 }
6063 
6064 /*
6065  * lpfc_enable_mds_diags: Enable MDS Diagnostics
6066  *       0  = MDS Diagnostics disabled (default)
6067  *       1  = MDS Diagnostics enabled
6068  * Value range is [0,1]. Default value is 0.
6069  */
6070 LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
6071 
6072 /*
6073  * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
6074  *	0 = Disable firmware logging (default)
6075  *	[1-4] = Multiple of 1/4th Mb of host memory for FW logging
6076  * Value range [0..4]. Default value is 0
6077  */
6078 LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
6079 lpfc_param_show(ras_fwlog_buffsize);
6080 
6081 static ssize_t
lpfc_ras_fwlog_buffsize_set(struct lpfc_hba * phba,uint val)6082 lpfc_ras_fwlog_buffsize_set(struct lpfc_hba  *phba, uint val)
6083 {
6084 	int ret = 0;
6085 	enum ras_state state;
6086 
6087 	if (!lpfc_rangecheck(val, 0, 4))
6088 		return -EINVAL;
6089 
6090 	if (phba->cfg_ras_fwlog_buffsize == val)
6091 		return 0;
6092 
6093 	if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
6094 		return -EINVAL;
6095 
6096 	spin_lock_irq(&phba->hbalock);
6097 	state = phba->ras_fwlog.state;
6098 	spin_unlock_irq(&phba->hbalock);
6099 
6100 	if (state == REG_INPROGRESS) {
6101 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
6102 				"registration is in progress\n");
6103 		return -EBUSY;
6104 	}
6105 
6106 	/* For disable logging: stop the logs and free the DMA.
6107 	 * For ras_fwlog_buffsize size change we still need to free and
6108 	 * reallocate the DMA in lpfc_sli4_ras_fwlog_init.
6109 	 */
6110 	phba->cfg_ras_fwlog_buffsize = val;
6111 	if (state == ACTIVE) {
6112 		lpfc_ras_stop_fwlog(phba);
6113 		lpfc_sli4_ras_dma_free(phba);
6114 	}
6115 
6116 	lpfc_sli4_ras_init(phba);
6117 	if (phba->ras_fwlog.ras_enabled)
6118 		ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6119 					       LPFC_RAS_ENABLE_LOGGING);
6120 	return ret;
6121 }
6122 
6123 lpfc_param_store(ras_fwlog_buffsize);
6124 static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize);
6125 
6126 /*
6127  * lpfc_ras_fwlog_level: Firmware logging verbosity level
6128  * Valid only if firmware logging is enabled
6129  * 0(Least Verbosity) 4 (most verbosity)
6130  * Value range is [0..4]. Default value is 0
6131  */
6132 LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
6133 
6134 /*
6135  * lpfc_ras_fwlog_func: Firmware logging enabled on function number
6136  * Default function which has RAS support : 0
6137  * Value Range is [0..7].
6138  * FW logging is a global action and enablement is via a specific
6139  * port.
6140  */
6141 LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
6142 
6143 /*
6144  * lpfc_enable_bbcr: Enable BB Credit Recovery
6145  *       0  = BB Credit Recovery disabled
6146  *       1  = BB Credit Recovery enabled (default)
6147  * Value range is [0,1]. Default value is 1.
6148  */
6149 LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
6150 
6151 /*
6152  * lpfc_enable_dpp: Enable DPP on G7
6153  *       0  = DPP on G7 disabled
6154  *       1  = DPP on G7 enabled (default)
6155  * Value range is [0,1]. Default value is 1.
6156  */
6157 LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push");
6158 
6159 struct device_attribute *lpfc_hba_attrs[] = {
6160 	&dev_attr_nvme_info,
6161 	&dev_attr_scsi_stat,
6162 	&dev_attr_bg_info,
6163 	&dev_attr_bg_guard_err,
6164 	&dev_attr_bg_apptag_err,
6165 	&dev_attr_bg_reftag_err,
6166 	&dev_attr_info,
6167 	&dev_attr_serialnum,
6168 	&dev_attr_modeldesc,
6169 	&dev_attr_modelname,
6170 	&dev_attr_programtype,
6171 	&dev_attr_portnum,
6172 	&dev_attr_fwrev,
6173 	&dev_attr_hdw,
6174 	&dev_attr_option_rom_version,
6175 	&dev_attr_link_state,
6176 	&dev_attr_num_discovered_ports,
6177 	&dev_attr_menlo_mgmt_mode,
6178 	&dev_attr_lpfc_drvr_version,
6179 	&dev_attr_lpfc_enable_fip,
6180 	&dev_attr_lpfc_temp_sensor,
6181 	&dev_attr_lpfc_log_verbose,
6182 	&dev_attr_lpfc_lun_queue_depth,
6183 	&dev_attr_lpfc_tgt_queue_depth,
6184 	&dev_attr_lpfc_hba_queue_depth,
6185 	&dev_attr_lpfc_peer_port_login,
6186 	&dev_attr_lpfc_nodev_tmo,
6187 	&dev_attr_lpfc_devloss_tmo,
6188 	&dev_attr_lpfc_enable_fc4_type,
6189 	&dev_attr_lpfc_fcp_class,
6190 	&dev_attr_lpfc_use_adisc,
6191 	&dev_attr_lpfc_first_burst_size,
6192 	&dev_attr_lpfc_ack0,
6193 	&dev_attr_lpfc_xri_rebalancing,
6194 	&dev_attr_lpfc_topology,
6195 	&dev_attr_lpfc_scan_down,
6196 	&dev_attr_lpfc_link_speed,
6197 	&dev_attr_lpfc_fcp_io_sched,
6198 	&dev_attr_lpfc_ns_query,
6199 	&dev_attr_lpfc_fcp2_no_tgt_reset,
6200 	&dev_attr_lpfc_cr_delay,
6201 	&dev_attr_lpfc_cr_count,
6202 	&dev_attr_lpfc_multi_ring_support,
6203 	&dev_attr_lpfc_multi_ring_rctl,
6204 	&dev_attr_lpfc_multi_ring_type,
6205 	&dev_attr_lpfc_fdmi_on,
6206 	&dev_attr_lpfc_enable_SmartSAN,
6207 	&dev_attr_lpfc_max_luns,
6208 	&dev_attr_lpfc_enable_npiv,
6209 	&dev_attr_lpfc_fcf_failover_policy,
6210 	&dev_attr_lpfc_enable_rrq,
6211 	&dev_attr_nport_evt_cnt,
6212 	&dev_attr_board_mode,
6213 	&dev_attr_max_vpi,
6214 	&dev_attr_used_vpi,
6215 	&dev_attr_max_rpi,
6216 	&dev_attr_used_rpi,
6217 	&dev_attr_max_xri,
6218 	&dev_attr_used_xri,
6219 	&dev_attr_npiv_info,
6220 	&dev_attr_issue_reset,
6221 	&dev_attr_lpfc_poll,
6222 	&dev_attr_lpfc_poll_tmo,
6223 	&dev_attr_lpfc_task_mgmt_tmo,
6224 	&dev_attr_lpfc_use_msi,
6225 	&dev_attr_lpfc_nvme_oas,
6226 	&dev_attr_lpfc_nvme_embed_cmd,
6227 	&dev_attr_lpfc_fcp_imax,
6228 	&dev_attr_lpfc_force_rscn,
6229 	&dev_attr_lpfc_cq_poll_threshold,
6230 	&dev_attr_lpfc_cq_max_proc_limit,
6231 	&dev_attr_lpfc_fcp_cpu_map,
6232 	&dev_attr_lpfc_fcp_mq_threshold,
6233 	&dev_attr_lpfc_hdw_queue,
6234 	&dev_attr_lpfc_irq_chann,
6235 	&dev_attr_lpfc_suppress_rsp,
6236 	&dev_attr_lpfc_nvmet_mrq,
6237 	&dev_attr_lpfc_nvmet_mrq_post,
6238 	&dev_attr_lpfc_nvme_enable_fb,
6239 	&dev_attr_lpfc_nvmet_fb_size,
6240 	&dev_attr_lpfc_enable_bg,
6241 	&dev_attr_lpfc_soft_wwnn,
6242 	&dev_attr_lpfc_soft_wwpn,
6243 	&dev_attr_lpfc_soft_wwn_enable,
6244 	&dev_attr_lpfc_enable_hba_reset,
6245 	&dev_attr_lpfc_enable_hba_heartbeat,
6246 	&dev_attr_lpfc_EnableXLane,
6247 	&dev_attr_lpfc_XLanePriority,
6248 	&dev_attr_lpfc_xlane_lun,
6249 	&dev_attr_lpfc_xlane_tgt,
6250 	&dev_attr_lpfc_xlane_vpt,
6251 	&dev_attr_lpfc_xlane_lun_state,
6252 	&dev_attr_lpfc_xlane_lun_status,
6253 	&dev_attr_lpfc_xlane_priority,
6254 	&dev_attr_lpfc_sg_seg_cnt,
6255 	&dev_attr_lpfc_max_scsicmpl_time,
6256 	&dev_attr_lpfc_stat_data_ctrl,
6257 	&dev_attr_lpfc_aer_support,
6258 	&dev_attr_lpfc_aer_state_cleanup,
6259 	&dev_attr_lpfc_sriov_nr_virtfn,
6260 	&dev_attr_lpfc_req_fw_upgrade,
6261 	&dev_attr_lpfc_suppress_link_up,
6262 	&dev_attr_iocb_hw,
6263 	&dev_attr_pls,
6264 	&dev_attr_pt,
6265 	&dev_attr_txq_hw,
6266 	&dev_attr_txcmplq_hw,
6267 	&dev_attr_lpfc_sriov_hw_max_virtfn,
6268 	&dev_attr_protocol,
6269 	&dev_attr_lpfc_xlane_supported,
6270 	&dev_attr_lpfc_enable_mds_diags,
6271 	&dev_attr_lpfc_ras_fwlog_buffsize,
6272 	&dev_attr_lpfc_ras_fwlog_level,
6273 	&dev_attr_lpfc_ras_fwlog_func,
6274 	&dev_attr_lpfc_enable_bbcr,
6275 	&dev_attr_lpfc_enable_dpp,
6276 	NULL,
6277 };
6278 
6279 struct device_attribute *lpfc_vport_attrs[] = {
6280 	&dev_attr_info,
6281 	&dev_attr_link_state,
6282 	&dev_attr_num_discovered_ports,
6283 	&dev_attr_lpfc_drvr_version,
6284 	&dev_attr_lpfc_log_verbose,
6285 	&dev_attr_lpfc_lun_queue_depth,
6286 	&dev_attr_lpfc_tgt_queue_depth,
6287 	&dev_attr_lpfc_nodev_tmo,
6288 	&dev_attr_lpfc_devloss_tmo,
6289 	&dev_attr_lpfc_hba_queue_depth,
6290 	&dev_attr_lpfc_peer_port_login,
6291 	&dev_attr_lpfc_restrict_login,
6292 	&dev_attr_lpfc_fcp_class,
6293 	&dev_attr_lpfc_use_adisc,
6294 	&dev_attr_lpfc_first_burst_size,
6295 	&dev_attr_lpfc_max_luns,
6296 	&dev_attr_nport_evt_cnt,
6297 	&dev_attr_npiv_info,
6298 	&dev_attr_lpfc_enable_da_id,
6299 	&dev_attr_lpfc_max_scsicmpl_time,
6300 	&dev_attr_lpfc_stat_data_ctrl,
6301 	&dev_attr_lpfc_static_vport,
6302 	NULL,
6303 };
6304 
6305 /**
6306  * sysfs_ctlreg_write - Write method for writing to ctlreg
6307  * @filp: open sysfs file
6308  * @kobj: kernel kobject that contains the kernel class device.
6309  * @bin_attr: kernel attributes passed to us.
6310  * @buf: contains the data to be written to the adapter IOREG space.
6311  * @off: offset into buffer to beginning of data.
6312  * @count: bytes to transfer.
6313  *
6314  * Description:
6315  * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
6316  * Uses the adapter io control registers to send buf contents to the adapter.
6317  *
6318  * Returns:
6319  * -ERANGE off and count combo out of range
6320  * -EINVAL off, count or buff address invalid
6321  * -EPERM adapter is offline
6322  * value of count, buf contents written
6323  **/
6324 static ssize_t
sysfs_ctlreg_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6325 sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
6326 		   struct bin_attribute *bin_attr,
6327 		   char *buf, loff_t off, size_t count)
6328 {
6329 	size_t buf_off;
6330 	struct device *dev = container_of(kobj, struct device, kobj);
6331 	struct Scsi_Host  *shost = class_to_shost(dev);
6332 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6333 	struct lpfc_hba   *phba = vport->phba;
6334 
6335 	if (phba->sli_rev >= LPFC_SLI_REV4)
6336 		return -EPERM;
6337 
6338 	if ((off + count) > FF_REG_AREA_SIZE)
6339 		return -ERANGE;
6340 
6341 	if (count <= LPFC_REG_WRITE_KEY_SIZE)
6342 		return 0;
6343 
6344 	if (off % 4 || count % 4 || (unsigned long)buf % 4)
6345 		return -EINVAL;
6346 
6347 	/* This is to protect HBA registers from accidental writes. */
6348 	if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
6349 		return -EINVAL;
6350 
6351 	if (!(vport->fc_flag & FC_OFFLINE_MODE))
6352 		return -EPERM;
6353 
6354 	spin_lock_irq(&phba->hbalock);
6355 	for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
6356 			buf_off += sizeof(uint32_t))
6357 		writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
6358 		       phba->ctrl_regs_memmap_p + off + buf_off);
6359 
6360 	spin_unlock_irq(&phba->hbalock);
6361 
6362 	return count;
6363 }
6364 
6365 /**
6366  * sysfs_ctlreg_read - Read method for reading from ctlreg
6367  * @filp: open sysfs file
6368  * @kobj: kernel kobject that contains the kernel class device.
6369  * @bin_attr: kernel attributes passed to us.
6370  * @buf: if successful contains the data from the adapter IOREG space.
6371  * @off: offset into buffer to beginning of data.
6372  * @count: bytes to transfer.
6373  *
6374  * Description:
6375  * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
6376  * Uses the adapter io control registers to read data into buf.
6377  *
6378  * Returns:
6379  * -ERANGE off and count combo out of range
6380  * -EINVAL off, count or buff address invalid
6381  * value of count, buf contents read
6382  **/
6383 static ssize_t
sysfs_ctlreg_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6384 sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
6385 		  struct bin_attribute *bin_attr,
6386 		  char *buf, loff_t off, size_t count)
6387 {
6388 	size_t buf_off;
6389 	uint32_t * tmp_ptr;
6390 	struct device *dev = container_of(kobj, struct device, kobj);
6391 	struct Scsi_Host  *shost = class_to_shost(dev);
6392 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6393 	struct lpfc_hba   *phba = vport->phba;
6394 
6395 	if (phba->sli_rev >= LPFC_SLI_REV4)
6396 		return -EPERM;
6397 
6398 	if (off > FF_REG_AREA_SIZE)
6399 		return -ERANGE;
6400 
6401 	if ((off + count) > FF_REG_AREA_SIZE)
6402 		count = FF_REG_AREA_SIZE - off;
6403 
6404 	if (count == 0) return 0;
6405 
6406 	if (off % 4 || count % 4 || (unsigned long)buf % 4)
6407 		return -EINVAL;
6408 
6409 	spin_lock_irq(&phba->hbalock);
6410 
6411 	for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
6412 		tmp_ptr = (uint32_t *)(buf + buf_off);
6413 		*tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
6414 	}
6415 
6416 	spin_unlock_irq(&phba->hbalock);
6417 
6418 	return count;
6419 }
6420 
6421 static struct bin_attribute sysfs_ctlreg_attr = {
6422 	.attr = {
6423 		.name = "ctlreg",
6424 		.mode = S_IRUSR | S_IWUSR,
6425 	},
6426 	.size = 256,
6427 	.read = sysfs_ctlreg_read,
6428 	.write = sysfs_ctlreg_write,
6429 };
6430 
6431 /**
6432  * sysfs_mbox_write - Write method for writing information via mbox
6433  * @filp: open sysfs file
6434  * @kobj: kernel kobject that contains the kernel class device.
6435  * @bin_attr: kernel attributes passed to us.
6436  * @buf: contains the data to be written to sysfs mbox.
6437  * @off: offset into buffer to beginning of data.
6438  * @count: bytes to transfer.
6439  *
6440  * Description:
6441  * Deprecated function. All mailbox access from user space is performed via the
6442  * bsg interface.
6443  *
6444  * Returns:
6445  * -EPERM operation not permitted
6446  **/
6447 static ssize_t
sysfs_mbox_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6448 sysfs_mbox_write(struct file *filp, struct kobject *kobj,
6449 		 struct bin_attribute *bin_attr,
6450 		 char *buf, loff_t off, size_t count)
6451 {
6452 	return -EPERM;
6453 }
6454 
6455 /**
6456  * sysfs_mbox_read - Read method for reading information via mbox
6457  * @filp: open sysfs file
6458  * @kobj: kernel kobject that contains the kernel class device.
6459  * @bin_attr: kernel attributes passed to us.
6460  * @buf: contains the data to be read from sysfs mbox.
6461  * @off: offset into buffer to beginning of data.
6462  * @count: bytes to transfer.
6463  *
6464  * Description:
6465  * Deprecated function. All mailbox access from user space is performed via the
6466  * bsg interface.
6467  *
6468  * Returns:
6469  * -EPERM operation not permitted
6470  **/
6471 static ssize_t
sysfs_mbox_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6472 sysfs_mbox_read(struct file *filp, struct kobject *kobj,
6473 		struct bin_attribute *bin_attr,
6474 		char *buf, loff_t off, size_t count)
6475 {
6476 	return -EPERM;
6477 }
6478 
6479 static struct bin_attribute sysfs_mbox_attr = {
6480 	.attr = {
6481 		.name = "mbox",
6482 		.mode = S_IRUSR | S_IWUSR,
6483 	},
6484 	.size = MAILBOX_SYSFS_MAX,
6485 	.read = sysfs_mbox_read,
6486 	.write = sysfs_mbox_write,
6487 };
6488 
6489 /**
6490  * lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries
6491  * @vport: address of lpfc vport structure.
6492  *
6493  * Return codes:
6494  * zero on success
6495  * error return code from sysfs_create_bin_file()
6496  **/
6497 int
lpfc_alloc_sysfs_attr(struct lpfc_vport * vport)6498 lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
6499 {
6500 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6501 	int error;
6502 
6503 	error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6504 				      &sysfs_drvr_stat_data_attr);
6505 
6506 	/* Virtual ports do not need ctrl_reg and mbox */
6507 	if (error || vport->port_type == LPFC_NPIV_PORT)
6508 		goto out;
6509 
6510 	error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6511 				      &sysfs_ctlreg_attr);
6512 	if (error)
6513 		goto out_remove_stat_attr;
6514 
6515 	error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6516 				      &sysfs_mbox_attr);
6517 	if (error)
6518 		goto out_remove_ctlreg_attr;
6519 
6520 	return 0;
6521 out_remove_ctlreg_attr:
6522 	sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6523 out_remove_stat_attr:
6524 	sysfs_remove_bin_file(&shost->shost_dev.kobj,
6525 			&sysfs_drvr_stat_data_attr);
6526 out:
6527 	return error;
6528 }
6529 
6530 /**
6531  * lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries
6532  * @vport: address of lpfc vport structure.
6533  **/
6534 void
lpfc_free_sysfs_attr(struct lpfc_vport * vport)6535 lpfc_free_sysfs_attr(struct lpfc_vport *vport)
6536 {
6537 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6538 	sysfs_remove_bin_file(&shost->shost_dev.kobj,
6539 		&sysfs_drvr_stat_data_attr);
6540 	/* Virtual ports do not need ctrl_reg and mbox */
6541 	if (vport->port_type == LPFC_NPIV_PORT)
6542 		return;
6543 	sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
6544 	sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6545 }
6546 
6547 /*
6548  * Dynamic FC Host Attributes Support
6549  */
6550 
6551 /**
6552  * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host
6553  * @shost: kernel scsi host pointer.
6554  **/
6555 static void
lpfc_get_host_symbolic_name(struct Scsi_Host * shost)6556 lpfc_get_host_symbolic_name(struct Scsi_Host *shost)
6557 {
6558 	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6559 
6560 	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
6561 				      sizeof fc_host_symbolic_name(shost));
6562 }
6563 
6564 /**
6565  * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id
6566  * @shost: kernel scsi host pointer.
6567  **/
6568 static void
lpfc_get_host_port_id(struct Scsi_Host * shost)6569 lpfc_get_host_port_id(struct Scsi_Host *shost)
6570 {
6571 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6572 
6573 	/* note: fc_myDID already in cpu endianness */
6574 	fc_host_port_id(shost) = vport->fc_myDID;
6575 }
6576 
6577 /**
6578  * lpfc_get_host_port_type - Set the value of the scsi host port type
6579  * @shost: kernel scsi host pointer.
6580  **/
6581 static void
lpfc_get_host_port_type(struct Scsi_Host * shost)6582 lpfc_get_host_port_type(struct Scsi_Host *shost)
6583 {
6584 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6585 	struct lpfc_hba   *phba = vport->phba;
6586 
6587 	spin_lock_irq(shost->host_lock);
6588 
6589 	if (vport->port_type == LPFC_NPIV_PORT) {
6590 		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
6591 	} else if (lpfc_is_link_up(phba)) {
6592 		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6593 			if (vport->fc_flag & FC_PUBLIC_LOOP)
6594 				fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
6595 			else
6596 				fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
6597 		} else {
6598 			if (vport->fc_flag & FC_FABRIC)
6599 				fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
6600 			else
6601 				fc_host_port_type(shost) = FC_PORTTYPE_PTP;
6602 		}
6603 	} else
6604 		fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
6605 
6606 	spin_unlock_irq(shost->host_lock);
6607 }
6608 
6609 /**
6610  * lpfc_get_host_port_state - Set the value of the scsi host port state
6611  * @shost: kernel scsi host pointer.
6612  **/
6613 static void
lpfc_get_host_port_state(struct Scsi_Host * shost)6614 lpfc_get_host_port_state(struct Scsi_Host *shost)
6615 {
6616 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6617 	struct lpfc_hba   *phba = vport->phba;
6618 
6619 	spin_lock_irq(shost->host_lock);
6620 
6621 	if (vport->fc_flag & FC_OFFLINE_MODE)
6622 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
6623 	else {
6624 		switch (phba->link_state) {
6625 		case LPFC_LINK_UNKNOWN:
6626 		case LPFC_LINK_DOWN:
6627 			fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
6628 			break;
6629 		case LPFC_LINK_UP:
6630 		case LPFC_CLEAR_LA:
6631 		case LPFC_HBA_READY:
6632 			/* Links up, reports port state accordingly */
6633 			if (vport->port_state < LPFC_VPORT_READY)
6634 				fc_host_port_state(shost) =
6635 							FC_PORTSTATE_BYPASSED;
6636 			else
6637 				fc_host_port_state(shost) =
6638 							FC_PORTSTATE_ONLINE;
6639 			break;
6640 		case LPFC_HBA_ERROR:
6641 			fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
6642 			break;
6643 		default:
6644 			fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
6645 			break;
6646 		}
6647 	}
6648 
6649 	spin_unlock_irq(shost->host_lock);
6650 }
6651 
6652 /**
6653  * lpfc_get_host_speed - Set the value of the scsi host speed
6654  * @shost: kernel scsi host pointer.
6655  **/
6656 static void
lpfc_get_host_speed(struct Scsi_Host * shost)6657 lpfc_get_host_speed(struct Scsi_Host *shost)
6658 {
6659 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6660 	struct lpfc_hba   *phba = vport->phba;
6661 
6662 	spin_lock_irq(shost->host_lock);
6663 
6664 	if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) {
6665 		switch(phba->fc_linkspeed) {
6666 		case LPFC_LINK_SPEED_1GHZ:
6667 			fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6668 			break;
6669 		case LPFC_LINK_SPEED_2GHZ:
6670 			fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
6671 			break;
6672 		case LPFC_LINK_SPEED_4GHZ:
6673 			fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
6674 			break;
6675 		case LPFC_LINK_SPEED_8GHZ:
6676 			fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
6677 			break;
6678 		case LPFC_LINK_SPEED_10GHZ:
6679 			fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6680 			break;
6681 		case LPFC_LINK_SPEED_16GHZ:
6682 			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
6683 			break;
6684 		case LPFC_LINK_SPEED_32GHZ:
6685 			fc_host_speed(shost) = FC_PORTSPEED_32GBIT;
6686 			break;
6687 		case LPFC_LINK_SPEED_64GHZ:
6688 			fc_host_speed(shost) = FC_PORTSPEED_64GBIT;
6689 			break;
6690 		case LPFC_LINK_SPEED_128GHZ:
6691 			fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
6692 			break;
6693 		default:
6694 			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6695 			break;
6696 		}
6697 	} else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
6698 		switch (phba->fc_linkspeed) {
6699 		case LPFC_ASYNC_LINK_SPEED_1GBPS:
6700 			fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6701 			break;
6702 		case LPFC_ASYNC_LINK_SPEED_10GBPS:
6703 			fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6704 			break;
6705 		case LPFC_ASYNC_LINK_SPEED_20GBPS:
6706 			fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
6707 			break;
6708 		case LPFC_ASYNC_LINK_SPEED_25GBPS:
6709 			fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
6710 			break;
6711 		case LPFC_ASYNC_LINK_SPEED_40GBPS:
6712 			fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
6713 			break;
6714 		case LPFC_ASYNC_LINK_SPEED_100GBPS:
6715 			fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
6716 			break;
6717 		default:
6718 			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6719 			break;
6720 		}
6721 	} else
6722 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6723 
6724 	spin_unlock_irq(shost->host_lock);
6725 }
6726 
6727 /**
6728  * lpfc_get_host_fabric_name - Set the value of the scsi host fabric name
6729  * @shost: kernel scsi host pointer.
6730  **/
6731 static void
lpfc_get_host_fabric_name(struct Scsi_Host * shost)6732 lpfc_get_host_fabric_name (struct Scsi_Host *shost)
6733 {
6734 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6735 	struct lpfc_hba   *phba = vport->phba;
6736 	u64 node_name;
6737 
6738 	spin_lock_irq(shost->host_lock);
6739 
6740 	if ((vport->port_state > LPFC_FLOGI) &&
6741 	    ((vport->fc_flag & FC_FABRIC) ||
6742 	     ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
6743 	      (vport->fc_flag & FC_PUBLIC_LOOP))))
6744 		node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
6745 	else
6746 		/* fabric is local port if there is no F/FL_Port */
6747 		node_name = 0;
6748 
6749 	spin_unlock_irq(shost->host_lock);
6750 
6751 	fc_host_fabric_name(shost) = node_name;
6752 }
6753 
6754 /**
6755  * lpfc_get_stats - Return statistical information about the adapter
6756  * @shost: kernel scsi host pointer.
6757  *
6758  * Notes:
6759  * NULL on error for link down, no mbox pool, sli2 active,
6760  * management not allowed, memory allocation error, or mbox error.
6761  *
6762  * Returns:
6763  * NULL for error
6764  * address of the adapter host statistics
6765  **/
6766 static struct fc_host_statistics *
lpfc_get_stats(struct Scsi_Host * shost)6767 lpfc_get_stats(struct Scsi_Host *shost)
6768 {
6769 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6770 	struct lpfc_hba   *phba = vport->phba;
6771 	struct lpfc_sli   *psli = &phba->sli;
6772 	struct fc_host_statistics *hs = &phba->link_stats;
6773 	struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
6774 	LPFC_MBOXQ_t *pmboxq;
6775 	MAILBOX_t *pmb;
6776 	int rc = 0;
6777 
6778 	/*
6779 	 * prevent udev from issuing mailbox commands until the port is
6780 	 * configured.
6781 	 */
6782 	if (phba->link_state < LPFC_LINK_DOWN ||
6783 	    !phba->mbox_mem_pool ||
6784 	    (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
6785 		return NULL;
6786 
6787 	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
6788 		return NULL;
6789 
6790 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6791 	if (!pmboxq)
6792 		return NULL;
6793 	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
6794 
6795 	pmb = &pmboxq->u.mb;
6796 	pmb->mbxCommand = MBX_READ_STATUS;
6797 	pmb->mbxOwner = OWN_HOST;
6798 	pmboxq->ctx_buf = NULL;
6799 	pmboxq->vport = vport;
6800 
6801 	if (vport->fc_flag & FC_OFFLINE_MODE) {
6802 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6803 		if (rc != MBX_SUCCESS) {
6804 			mempool_free(pmboxq, phba->mbox_mem_pool);
6805 			return NULL;
6806 		}
6807 	} else {
6808 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6809 		if (rc != MBX_SUCCESS) {
6810 			if (rc != MBX_TIMEOUT)
6811 				mempool_free(pmboxq, phba->mbox_mem_pool);
6812 			return NULL;
6813 		}
6814 	}
6815 
6816 	memset(hs, 0, sizeof (struct fc_host_statistics));
6817 
6818 	hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
6819 	/*
6820 	 * The MBX_READ_STATUS returns tx_k_bytes which has to
6821 	 * converted to words
6822 	 */
6823 	hs->tx_words = (uint64_t)
6824 			((uint64_t)pmb->un.varRdStatus.xmitByteCnt
6825 			* (uint64_t)256);
6826 	hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
6827 	hs->rx_words = (uint64_t)
6828 			((uint64_t)pmb->un.varRdStatus.rcvByteCnt
6829 			 * (uint64_t)256);
6830 
6831 	memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
6832 	pmb->mbxCommand = MBX_READ_LNK_STAT;
6833 	pmb->mbxOwner = OWN_HOST;
6834 	pmboxq->ctx_buf = NULL;
6835 	pmboxq->vport = vport;
6836 
6837 	if (vport->fc_flag & FC_OFFLINE_MODE) {
6838 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6839 		if (rc != MBX_SUCCESS) {
6840 			mempool_free(pmboxq, phba->mbox_mem_pool);
6841 			return NULL;
6842 		}
6843 	} else {
6844 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6845 		if (rc != MBX_SUCCESS) {
6846 			if (rc != MBX_TIMEOUT)
6847 				mempool_free(pmboxq, phba->mbox_mem_pool);
6848 			return NULL;
6849 		}
6850 	}
6851 
6852 	hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
6853 	hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
6854 	hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
6855 	hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
6856 	hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
6857 	hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
6858 	hs->error_frames = pmb->un.varRdLnk.crcCnt;
6859 
6860 	hs->link_failure_count -= lso->link_failure_count;
6861 	hs->loss_of_sync_count -= lso->loss_of_sync_count;
6862 	hs->loss_of_signal_count -= lso->loss_of_signal_count;
6863 	hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
6864 	hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
6865 	hs->invalid_crc_count -= lso->invalid_crc_count;
6866 	hs->error_frames -= lso->error_frames;
6867 
6868 	if (phba->hba_flag & HBA_FCOE_MODE) {
6869 		hs->lip_count = -1;
6870 		hs->nos_count = (phba->link_events >> 1);
6871 		hs->nos_count -= lso->link_events;
6872 	} else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6873 		hs->lip_count = (phba->fc_eventTag >> 1);
6874 		hs->lip_count -= lso->link_events;
6875 		hs->nos_count = -1;
6876 	} else {
6877 		hs->lip_count = -1;
6878 		hs->nos_count = (phba->fc_eventTag >> 1);
6879 		hs->nos_count -= lso->link_events;
6880 	}
6881 
6882 	hs->dumped_frames = -1;
6883 
6884 	hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start;
6885 
6886 	mempool_free(pmboxq, phba->mbox_mem_pool);
6887 
6888 	return hs;
6889 }
6890 
6891 /**
6892  * lpfc_reset_stats - Copy the adapter link stats information
6893  * @shost: kernel scsi host pointer.
6894  **/
6895 static void
lpfc_reset_stats(struct Scsi_Host * shost)6896 lpfc_reset_stats(struct Scsi_Host *shost)
6897 {
6898 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6899 	struct lpfc_hba   *phba = vport->phba;
6900 	struct lpfc_sli   *psli = &phba->sli;
6901 	struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets;
6902 	LPFC_MBOXQ_t *pmboxq;
6903 	MAILBOX_t *pmb;
6904 	int rc = 0;
6905 
6906 	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
6907 		return;
6908 
6909 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6910 	if (!pmboxq)
6911 		return;
6912 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
6913 
6914 	pmb = &pmboxq->u.mb;
6915 	pmb->mbxCommand = MBX_READ_STATUS;
6916 	pmb->mbxOwner = OWN_HOST;
6917 	pmb->un.varWords[0] = 0x1; /* reset request */
6918 	pmboxq->ctx_buf = NULL;
6919 	pmboxq->vport = vport;
6920 
6921 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
6922 		(!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
6923 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6924 		if (rc != MBX_SUCCESS) {
6925 			mempool_free(pmboxq, phba->mbox_mem_pool);
6926 			return;
6927 		}
6928 	} else {
6929 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6930 		if (rc != MBX_SUCCESS) {
6931 			if (rc != MBX_TIMEOUT)
6932 				mempool_free(pmboxq, phba->mbox_mem_pool);
6933 			return;
6934 		}
6935 	}
6936 
6937 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
6938 	pmb->mbxCommand = MBX_READ_LNK_STAT;
6939 	pmb->mbxOwner = OWN_HOST;
6940 	pmboxq->ctx_buf = NULL;
6941 	pmboxq->vport = vport;
6942 
6943 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
6944 	    (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
6945 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6946 		if (rc != MBX_SUCCESS) {
6947 			mempool_free(pmboxq, phba->mbox_mem_pool);
6948 			return;
6949 		}
6950 	} else {
6951 		rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6952 		if (rc != MBX_SUCCESS) {
6953 			if (rc != MBX_TIMEOUT)
6954 				mempool_free(pmboxq, phba->mbox_mem_pool);
6955 			return;
6956 		}
6957 	}
6958 
6959 	lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
6960 	lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
6961 	lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
6962 	lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
6963 	lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
6964 	lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
6965 	lso->error_frames = pmb->un.varRdLnk.crcCnt;
6966 	if (phba->hba_flag & HBA_FCOE_MODE)
6967 		lso->link_events = (phba->link_events >> 1);
6968 	else
6969 		lso->link_events = (phba->fc_eventTag >> 1);
6970 
6971 	psli->stats_start = ktime_get_seconds();
6972 
6973 	mempool_free(pmboxq, phba->mbox_mem_pool);
6974 
6975 	return;
6976 }
6977 
6978 /*
6979  * The LPFC driver treats linkdown handling as target loss events so there
6980  * are no sysfs handlers for link_down_tmo.
6981  */
6982 
6983 /**
6984  * lpfc_get_node_by_target - Return the nodelist for a target
6985  * @starget: kernel scsi target pointer.
6986  *
6987  * Returns:
6988  * address of the node list if found
6989  * NULL target not found
6990  **/
6991 static struct lpfc_nodelist *
lpfc_get_node_by_target(struct scsi_target * starget)6992 lpfc_get_node_by_target(struct scsi_target *starget)
6993 {
6994 	struct Scsi_Host  *shost = dev_to_shost(starget->dev.parent);
6995 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6996 	struct lpfc_nodelist *ndlp;
6997 
6998 	spin_lock_irq(shost->host_lock);
6999 	/* Search for this, mapped, target ID */
7000 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
7001 		if (NLP_CHK_NODE_ACT(ndlp) &&
7002 		    ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
7003 		    starget->id == ndlp->nlp_sid) {
7004 			spin_unlock_irq(shost->host_lock);
7005 			return ndlp;
7006 		}
7007 	}
7008 	spin_unlock_irq(shost->host_lock);
7009 	return NULL;
7010 }
7011 
7012 /**
7013  * lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1
7014  * @starget: kernel scsi target pointer.
7015  **/
7016 static void
lpfc_get_starget_port_id(struct scsi_target * starget)7017 lpfc_get_starget_port_id(struct scsi_target *starget)
7018 {
7019 	struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7020 
7021 	fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
7022 }
7023 
7024 /**
7025  * lpfc_get_starget_node_name - Set the target node name
7026  * @starget: kernel scsi target pointer.
7027  *
7028  * Description: Set the target node name to the ndlp node name wwn or zero.
7029  **/
7030 static void
lpfc_get_starget_node_name(struct scsi_target * starget)7031 lpfc_get_starget_node_name(struct scsi_target *starget)
7032 {
7033 	struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7034 
7035 	fc_starget_node_name(starget) =
7036 		ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
7037 }
7038 
7039 /**
7040  * lpfc_get_starget_port_name - Set the target port name
7041  * @starget: kernel scsi target pointer.
7042  *
7043  * Description:  set the target port name to the ndlp port name wwn or zero.
7044  **/
7045 static void
lpfc_get_starget_port_name(struct scsi_target * starget)7046 lpfc_get_starget_port_name(struct scsi_target *starget)
7047 {
7048 	struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7049 
7050 	fc_starget_port_name(starget) =
7051 		ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
7052 }
7053 
7054 /**
7055  * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo
7056  * @rport: fc rport address.
7057  * @timeout: new value for dev loss tmo.
7058  *
7059  * Description:
7060  * If timeout is non zero set the dev_loss_tmo to timeout, else set
7061  * dev_loss_tmo to one.
7062  **/
7063 static void
lpfc_set_rport_loss_tmo(struct fc_rport * rport,uint32_t timeout)7064 lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
7065 {
7066 	struct lpfc_rport_data *rdata = rport->dd_data;
7067 	struct lpfc_nodelist *ndlp = rdata->pnode;
7068 #if (IS_ENABLED(CONFIG_NVME_FC))
7069 	struct lpfc_nvme_rport *nrport = NULL;
7070 #endif
7071 
7072 	if (timeout)
7073 		rport->dev_loss_tmo = timeout;
7074 	else
7075 		rport->dev_loss_tmo = 1;
7076 
7077 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
7078 		dev_info(&rport->dev, "Cannot find remote node to "
7079 				      "set rport dev loss tmo, port_id x%x\n",
7080 				      rport->port_id);
7081 		return;
7082 	}
7083 
7084 #if (IS_ENABLED(CONFIG_NVME_FC))
7085 	nrport = lpfc_ndlp_get_nrport(ndlp);
7086 
7087 	if (nrport && nrport->remoteport)
7088 		nvme_fc_set_remoteport_devloss(nrport->remoteport,
7089 					       rport->dev_loss_tmo);
7090 #endif
7091 }
7092 
7093 /**
7094  * lpfc_rport_show_function - Return rport target information
7095  *
7096  * Description:
7097  * Macro that uses field to generate a function with the name lpfc_show_rport_
7098  *
7099  * lpfc_show_rport_##field: returns the bytes formatted in buf
7100  * @cdev: class converted to an fc_rport.
7101  * @buf: on return contains the target_field or zero.
7102  *
7103  * Returns: size of formatted string.
7104  **/
7105 #define lpfc_rport_show_function(field, format_string, sz, cast)	\
7106 static ssize_t								\
7107 lpfc_show_rport_##field (struct device *dev,				\
7108 			 struct device_attribute *attr,			\
7109 			 char *buf)					\
7110 {									\
7111 	struct fc_rport *rport = transport_class_to_rport(dev);		\
7112 	struct lpfc_rport_data *rdata = rport->hostdata;		\
7113 	return scnprintf(buf, sz, format_string,			\
7114 		(rdata->target) ? cast rdata->target->field : 0);	\
7115 }
7116 
7117 #define lpfc_rport_rd_attr(field, format_string, sz)			\
7118 	lpfc_rport_show_function(field, format_string, sz, )		\
7119 static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
7120 
7121 /**
7122  * lpfc_set_vport_symbolic_name - Set the vport's symbolic name
7123  * @fc_vport: The fc_vport who's symbolic name has been changed.
7124  *
7125  * Description:
7126  * This function is called by the transport after the @fc_vport's symbolic name
7127  * has been changed. This function re-registers the symbolic name with the
7128  * switch to propagate the change into the fabric if the vport is active.
7129  **/
7130 static void
lpfc_set_vport_symbolic_name(struct fc_vport * fc_vport)7131 lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
7132 {
7133 	struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
7134 
7135 	if (vport->port_state == LPFC_VPORT_READY)
7136 		lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
7137 }
7138 
7139 /**
7140  * lpfc_hba_log_verbose_init - Set hba's log verbose level
7141  * @phba: Pointer to lpfc_hba struct.
7142  *
7143  * This function is called by the lpfc_get_cfgparam() routine to set the
7144  * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
7145  * log message according to the module's lpfc_log_verbose parameter setting
7146  * before hba port or vport created.
7147  **/
7148 static void
lpfc_hba_log_verbose_init(struct lpfc_hba * phba,uint32_t verbose)7149 lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
7150 {
7151 	phba->cfg_log_verbose = verbose;
7152 }
7153 
7154 struct fc_function_template lpfc_transport_functions = {
7155 	/* fixed attributes the driver supports */
7156 	.show_host_node_name = 1,
7157 	.show_host_port_name = 1,
7158 	.show_host_supported_classes = 1,
7159 	.show_host_supported_fc4s = 1,
7160 	.show_host_supported_speeds = 1,
7161 	.show_host_maxframe_size = 1,
7162 
7163 	.get_host_symbolic_name = lpfc_get_host_symbolic_name,
7164 	.show_host_symbolic_name = 1,
7165 
7166 	/* dynamic attributes the driver supports */
7167 	.get_host_port_id = lpfc_get_host_port_id,
7168 	.show_host_port_id = 1,
7169 
7170 	.get_host_port_type = lpfc_get_host_port_type,
7171 	.show_host_port_type = 1,
7172 
7173 	.get_host_port_state = lpfc_get_host_port_state,
7174 	.show_host_port_state = 1,
7175 
7176 	/* active_fc4s is shown but doesn't change (thus no get function) */
7177 	.show_host_active_fc4s = 1,
7178 
7179 	.get_host_speed = lpfc_get_host_speed,
7180 	.show_host_speed = 1,
7181 
7182 	.get_host_fabric_name = lpfc_get_host_fabric_name,
7183 	.show_host_fabric_name = 1,
7184 
7185 	/*
7186 	 * The LPFC driver treats linkdown handling as target loss events
7187 	 * so there are no sysfs handlers for link_down_tmo.
7188 	 */
7189 
7190 	.get_fc_host_stats = lpfc_get_stats,
7191 	.reset_fc_host_stats = lpfc_reset_stats,
7192 
7193 	.dd_fcrport_size = sizeof(struct lpfc_rport_data),
7194 	.show_rport_maxframe_size = 1,
7195 	.show_rport_supported_classes = 1,
7196 
7197 	.set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
7198 	.show_rport_dev_loss_tmo = 1,
7199 
7200 	.get_starget_port_id  = lpfc_get_starget_port_id,
7201 	.show_starget_port_id = 1,
7202 
7203 	.get_starget_node_name = lpfc_get_starget_node_name,
7204 	.show_starget_node_name = 1,
7205 
7206 	.get_starget_port_name = lpfc_get_starget_port_name,
7207 	.show_starget_port_name = 1,
7208 
7209 	.issue_fc_host_lip = lpfc_issue_lip,
7210 	.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
7211 	.terminate_rport_io = lpfc_terminate_rport_io,
7212 
7213 	.dd_fcvport_size = sizeof(struct lpfc_vport *),
7214 
7215 	.vport_disable = lpfc_vport_disable,
7216 
7217 	.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
7218 
7219 	.bsg_request = lpfc_bsg_request,
7220 	.bsg_timeout = lpfc_bsg_timeout,
7221 };
7222 
7223 struct fc_function_template lpfc_vport_transport_functions = {
7224 	/* fixed attributes the driver supports */
7225 	.show_host_node_name = 1,
7226 	.show_host_port_name = 1,
7227 	.show_host_supported_classes = 1,
7228 	.show_host_supported_fc4s = 1,
7229 	.show_host_supported_speeds = 1,
7230 	.show_host_maxframe_size = 1,
7231 
7232 	.get_host_symbolic_name = lpfc_get_host_symbolic_name,
7233 	.show_host_symbolic_name = 1,
7234 
7235 	/* dynamic attributes the driver supports */
7236 	.get_host_port_id = lpfc_get_host_port_id,
7237 	.show_host_port_id = 1,
7238 
7239 	.get_host_port_type = lpfc_get_host_port_type,
7240 	.show_host_port_type = 1,
7241 
7242 	.get_host_port_state = lpfc_get_host_port_state,
7243 	.show_host_port_state = 1,
7244 
7245 	/* active_fc4s is shown but doesn't change (thus no get function) */
7246 	.show_host_active_fc4s = 1,
7247 
7248 	.get_host_speed = lpfc_get_host_speed,
7249 	.show_host_speed = 1,
7250 
7251 	.get_host_fabric_name = lpfc_get_host_fabric_name,
7252 	.show_host_fabric_name = 1,
7253 
7254 	/*
7255 	 * The LPFC driver treats linkdown handling as target loss events
7256 	 * so there are no sysfs handlers for link_down_tmo.
7257 	 */
7258 
7259 	.get_fc_host_stats = lpfc_get_stats,
7260 	.reset_fc_host_stats = lpfc_reset_stats,
7261 
7262 	.dd_fcrport_size = sizeof(struct lpfc_rport_data),
7263 	.show_rport_maxframe_size = 1,
7264 	.show_rport_supported_classes = 1,
7265 
7266 	.set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
7267 	.show_rport_dev_loss_tmo = 1,
7268 
7269 	.get_starget_port_id  = lpfc_get_starget_port_id,
7270 	.show_starget_port_id = 1,
7271 
7272 	.get_starget_node_name = lpfc_get_starget_node_name,
7273 	.show_starget_node_name = 1,
7274 
7275 	.get_starget_port_name = lpfc_get_starget_port_name,
7276 	.show_starget_port_name = 1,
7277 
7278 	.dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
7279 	.terminate_rport_io = lpfc_terminate_rport_io,
7280 
7281 	.vport_disable = lpfc_vport_disable,
7282 
7283 	.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
7284 };
7285 
7286 /**
7287  * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE
7288  * Mode
7289  * @phba: lpfc_hba pointer.
7290  **/
7291 static void
lpfc_get_hba_function_mode(struct lpfc_hba * phba)7292 lpfc_get_hba_function_mode(struct lpfc_hba *phba)
7293 {
7294 	/* If the adapter supports FCoE mode */
7295 	switch (phba->pcidev->device) {
7296 	case PCI_DEVICE_ID_SKYHAWK:
7297 	case PCI_DEVICE_ID_SKYHAWK_VF:
7298 	case PCI_DEVICE_ID_LANCER_FCOE:
7299 	case PCI_DEVICE_ID_LANCER_FCOE_VF:
7300 	case PCI_DEVICE_ID_ZEPHYR_DCSP:
7301 	case PCI_DEVICE_ID_HORNET:
7302 	case PCI_DEVICE_ID_TIGERSHARK:
7303 	case PCI_DEVICE_ID_TOMCAT:
7304 		phba->hba_flag |= HBA_FCOE_MODE;
7305 		break;
7306 	default:
7307 	/* for others, clear the flag */
7308 		phba->hba_flag &= ~HBA_FCOE_MODE;
7309 	}
7310 }
7311 
7312 /**
7313  * lpfc_get_cfgparam - Used during probe_one to init the adapter structure
7314  * @phba: lpfc_hba pointer.
7315  **/
7316 void
lpfc_get_cfgparam(struct lpfc_hba * phba)7317 lpfc_get_cfgparam(struct lpfc_hba *phba)
7318 {
7319 	lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
7320 	lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
7321 	lpfc_ns_query_init(phba, lpfc_ns_query);
7322 	lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
7323 	lpfc_cr_delay_init(phba, lpfc_cr_delay);
7324 	lpfc_cr_count_init(phba, lpfc_cr_count);
7325 	lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
7326 	lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
7327 	lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
7328 	lpfc_ack0_init(phba, lpfc_ack0);
7329 	lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing);
7330 	lpfc_topology_init(phba, lpfc_topology);
7331 	lpfc_link_speed_init(phba, lpfc_link_speed);
7332 	lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
7333 	lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
7334 	lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
7335 	lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
7336 	lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
7337 	lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
7338 	lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
7339 	lpfc_use_msi_init(phba, lpfc_use_msi);
7340 	lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
7341 	lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd);
7342 	lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
7343 	lpfc_force_rscn_init(phba, lpfc_force_rscn);
7344 	lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold);
7345 	lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit);
7346 	lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
7347 	lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
7348 	lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
7349 
7350 	lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
7351 	if (phba->sli_rev != LPFC_SLI_REV4)
7352 		phba->cfg_EnableXLane = 0;
7353 	lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
7354 
7355 	memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
7356 	memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
7357 	phba->cfg_oas_lun_state = 0;
7358 	phba->cfg_oas_lun_status = 0;
7359 	phba->cfg_oas_flags = 0;
7360 	phba->cfg_oas_priority = 0;
7361 	lpfc_enable_bg_init(phba, lpfc_enable_bg);
7362 	lpfc_prot_mask_init(phba, lpfc_prot_mask);
7363 	lpfc_prot_guard_init(phba, lpfc_prot_guard);
7364 	if (phba->sli_rev == LPFC_SLI_REV4)
7365 		phba->cfg_poll = 0;
7366 	else
7367 		phba->cfg_poll = lpfc_poll;
7368 
7369 	/* Get the function mode */
7370 	lpfc_get_hba_function_mode(phba);
7371 
7372 	/* BlockGuard allowed for FC only. */
7373 	if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) {
7374 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7375 				"0581 BlockGuard feature not supported\n");
7376 		/* If set, clear the BlockGuard support param */
7377 		phba->cfg_enable_bg = 0;
7378 	} else if (phba->cfg_enable_bg) {
7379 		phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
7380 	}
7381 
7382 	lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
7383 
7384 	lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
7385 	lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
7386 	lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
7387 
7388 	/* Initialize first burst. Target vs Initiator are different. */
7389 	lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
7390 	lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
7391 	lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
7392 	lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
7393 	lpfc_irq_chann_init(phba, lpfc_irq_chann);
7394 	lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
7395 	lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
7396 
7397 	if (phba->sli_rev != LPFC_SLI_REV4) {
7398 		/* NVME only supported on SLI4 */
7399 		phba->nvmet_support = 0;
7400 		phba->cfg_nvmet_mrq = 0;
7401 		phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
7402 		phba->cfg_enable_bbcr = 0;
7403 		phba->cfg_xri_rebalancing = 0;
7404 	} else {
7405 		/* We MUST have FCP support */
7406 		if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
7407 			phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
7408 	}
7409 
7410 	phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1;
7411 
7412 	phba->cfg_enable_pbde = 0;
7413 
7414 	/* A value of 0 means use the number of CPUs found in the system */
7415 	if (phba->cfg_hdw_queue == 0)
7416 		phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7417 	if (phba->cfg_irq_chann == 0)
7418 		phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7419 	if (phba->cfg_irq_chann > phba->cfg_hdw_queue)
7420 		phba->cfg_irq_chann = phba->cfg_hdw_queue;
7421 
7422 	phba->cfg_soft_wwnn = 0L;
7423 	phba->cfg_soft_wwpn = 0L;
7424 	lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
7425 	lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
7426 	lpfc_aer_support_init(phba, lpfc_aer_support);
7427 	lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
7428 	lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
7429 	lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
7430 	lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
7431 	lpfc_sli_mode_init(phba, lpfc_sli_mode);
7432 	lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
7433 	lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
7434 	lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
7435 	lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
7436 
7437 	return;
7438 }
7439 
7440 /**
7441  * lpfc_nvme_mod_param_dep - Adjust module parameter value based on
7442  * dependencies between protocols and roles.
7443  * @phba: lpfc_hba pointer.
7444  **/
7445 void
lpfc_nvme_mod_param_dep(struct lpfc_hba * phba)7446 lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
7447 {
7448 	int  logit = 0;
7449 
7450 	if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) {
7451 		phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7452 		logit = 1;
7453 	}
7454 	if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) {
7455 		phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7456 		logit = 1;
7457 	}
7458 	if (phba->cfg_irq_chann > phba->cfg_hdw_queue) {
7459 		phba->cfg_irq_chann = phba->cfg_hdw_queue;
7460 		logit = 1;
7461 	}
7462 	if (logit)
7463 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7464 				"2006 Reducing Queues - CPU limitation: "
7465 				"IRQ %d HDWQ %d\n",
7466 				phba->cfg_irq_chann,
7467 				phba->cfg_hdw_queue);
7468 
7469 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
7470 	    phba->nvmet_support) {
7471 		phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
7472 
7473 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
7474 				"6013 %s x%x fb_size x%x, fb_max x%x\n",
7475 				"NVME Target PRLI ACC enable_fb ",
7476 				phba->cfg_nvme_enable_fb,
7477 				phba->cfg_nvmet_fb_size,
7478 				LPFC_NVMET_FB_SZ_MAX);
7479 
7480 		if (phba->cfg_nvme_enable_fb == 0)
7481 			phba->cfg_nvmet_fb_size = 0;
7482 		else {
7483 			if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX)
7484 				phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
7485 		}
7486 
7487 		if (!phba->cfg_nvmet_mrq)
7488 			phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
7489 
7490 		/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
7491 		if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
7492 			phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
7493 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
7494 					"6018 Adjust lpfc_nvmet_mrq to %d\n",
7495 					phba->cfg_nvmet_mrq);
7496 		}
7497 		if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
7498 			phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
7499 
7500 	} else {
7501 		/* Not NVME Target mode.  Turn off Target parameters. */
7502 		phba->nvmet_support = 0;
7503 		phba->cfg_nvmet_mrq = 0;
7504 		phba->cfg_nvmet_fb_size = 0;
7505 	}
7506 }
7507 
7508 /**
7509  * lpfc_get_vport_cfgparam - Used during port create, init the vport structure
7510  * @vport: lpfc_vport pointer.
7511  **/
7512 void
lpfc_get_vport_cfgparam(struct lpfc_vport * vport)7513 lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
7514 {
7515 	lpfc_log_verbose_init(vport, lpfc_log_verbose);
7516 	lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth);
7517 	lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth);
7518 	lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo);
7519 	lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo);
7520 	lpfc_peer_port_login_init(vport, lpfc_peer_port_login);
7521 	lpfc_restrict_login_init(vport, lpfc_restrict_login);
7522 	lpfc_fcp_class_init(vport, lpfc_fcp_class);
7523 	lpfc_use_adisc_init(vport, lpfc_use_adisc);
7524 	lpfc_first_burst_size_init(vport, lpfc_first_burst_size);
7525 	lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
7526 	lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
7527 	lpfc_max_luns_init(vport, lpfc_max_luns);
7528 	lpfc_scan_down_init(vport, lpfc_scan_down);
7529 	lpfc_enable_da_id_init(vport, lpfc_enable_da_id);
7530 	return;
7531 }
7532