1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6 #include "qla_def.h"
7 #include "qla_target.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/slab.h>
12 #include <linux/delay.h>
13
14 static int qla24xx_vport_disable(struct fc_vport *, bool);
15
16 /* SYSFS attributes --------------------------------------------------------- */
17
18 static ssize_t
qla2x00_sysfs_read_fw_dump(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)19 qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
20 struct bin_attribute *bin_attr,
21 char *buf, loff_t off, size_t count)
22 {
23 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
24 struct device, kobj)));
25 struct qla_hw_data *ha = vha->hw;
26 int rval = 0;
27
28 if (!(ha->fw_dump_reading || ha->mctp_dump_reading ||
29 ha->mpi_fw_dump_reading))
30 return 0;
31
32 mutex_lock(&ha->optrom_mutex);
33 if (IS_P3P_TYPE(ha)) {
34 if (off < ha->md_template_size) {
35 rval = memory_read_from_buffer(buf, count,
36 &off, ha->md_tmplt_hdr, ha->md_template_size);
37 } else {
38 off -= ha->md_template_size;
39 rval = memory_read_from_buffer(buf, count,
40 &off, ha->md_dump, ha->md_dump_size);
41 }
42 } else if (ha->mctp_dumped && ha->mctp_dump_reading) {
43 rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
44 MCTP_DUMP_SIZE);
45 } else if (ha->mpi_fw_dumped && ha->mpi_fw_dump_reading) {
46 rval = memory_read_from_buffer(buf, count, &off,
47 ha->mpi_fw_dump,
48 ha->mpi_fw_dump_len);
49 } else if (ha->fw_dump_reading) {
50 rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
51 ha->fw_dump_len);
52 } else {
53 rval = 0;
54 }
55 mutex_unlock(&ha->optrom_mutex);
56 return rval;
57 }
58
59 static ssize_t
qla2x00_sysfs_write_fw_dump(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)60 qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
61 struct bin_attribute *bin_attr,
62 char *buf, loff_t off, size_t count)
63 {
64 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
65 struct device, kobj)));
66 struct qla_hw_data *ha = vha->hw;
67 int reading;
68
69 if (off != 0)
70 return (0);
71
72 reading = simple_strtol(buf, NULL, 10);
73 switch (reading) {
74 case 0:
75 if (!ha->fw_dump_reading)
76 break;
77
78 ql_log(ql_log_info, vha, 0x705d,
79 "Firmware dump cleared on (%ld).\n", vha->host_no);
80
81 if (IS_P3P_TYPE(ha)) {
82 qla82xx_md_free(vha);
83 qla82xx_md_prep(vha);
84 }
85 ha->fw_dump_reading = 0;
86 ha->fw_dumped = false;
87 break;
88 case 1:
89 if (ha->fw_dumped && !ha->fw_dump_reading) {
90 ha->fw_dump_reading = 1;
91
92 ql_log(ql_log_info, vha, 0x705e,
93 "Raw firmware dump ready for read on (%ld).\n",
94 vha->host_no);
95 }
96 break;
97 case 2:
98 qla2x00_alloc_fw_dump(vha);
99 break;
100 case 3:
101 if (IS_QLA82XX(ha)) {
102 qla82xx_idc_lock(ha);
103 qla82xx_set_reset_owner(vha);
104 qla82xx_idc_unlock(ha);
105 } else if (IS_QLA8044(ha)) {
106 qla8044_idc_lock(ha);
107 qla82xx_set_reset_owner(vha);
108 qla8044_idc_unlock(ha);
109 } else {
110 qla2x00_system_error(vha);
111 }
112 break;
113 case 4:
114 if (IS_P3P_TYPE(ha)) {
115 if (ha->md_tmplt_hdr)
116 ql_dbg(ql_dbg_user, vha, 0x705b,
117 "MiniDump supported with this firmware.\n");
118 else
119 ql_dbg(ql_dbg_user, vha, 0x709d,
120 "MiniDump not supported with this firmware.\n");
121 }
122 break;
123 case 5:
124 if (IS_P3P_TYPE(ha))
125 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
126 break;
127 case 6:
128 if (!ha->mctp_dump_reading)
129 break;
130 ql_log(ql_log_info, vha, 0x70c1,
131 "MCTP dump cleared on (%ld).\n", vha->host_no);
132 ha->mctp_dump_reading = 0;
133 ha->mctp_dumped = 0;
134 break;
135 case 7:
136 if (ha->mctp_dumped && !ha->mctp_dump_reading) {
137 ha->mctp_dump_reading = 1;
138 ql_log(ql_log_info, vha, 0x70c2,
139 "Raw mctp dump ready for read on (%ld).\n",
140 vha->host_no);
141 }
142 break;
143 case 8:
144 if (!ha->mpi_fw_dump_reading)
145 break;
146 ql_log(ql_log_info, vha, 0x70e7,
147 "MPI firmware dump cleared on (%ld).\n", vha->host_no);
148 ha->mpi_fw_dump_reading = 0;
149 ha->mpi_fw_dumped = 0;
150 break;
151 case 9:
152 if (ha->mpi_fw_dumped && !ha->mpi_fw_dump_reading) {
153 ha->mpi_fw_dump_reading = 1;
154 ql_log(ql_log_info, vha, 0x70e8,
155 "Raw MPI firmware dump ready for read on (%ld).\n",
156 vha->host_no);
157 }
158 break;
159 case 10:
160 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
161 ql_log(ql_log_info, vha, 0x70e9,
162 "Issuing MPI firmware dump on host#%ld.\n",
163 vha->host_no);
164 ha->isp_ops->mpi_fw_dump(vha, 0);
165 }
166 break;
167 }
168 return count;
169 }
170
171 static struct bin_attribute sysfs_fw_dump_attr = {
172 .attr = {
173 .name = "fw_dump",
174 .mode = S_IRUSR | S_IWUSR,
175 },
176 .size = 0,
177 .read = qla2x00_sysfs_read_fw_dump,
178 .write = qla2x00_sysfs_write_fw_dump,
179 };
180
181 static ssize_t
qla2x00_sysfs_read_nvram(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)182 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
183 struct bin_attribute *bin_attr,
184 char *buf, loff_t off, size_t count)
185 {
186 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
187 struct device, kobj)));
188 struct qla_hw_data *ha = vha->hw;
189 uint32_t faddr;
190 struct active_regions active_regions = { };
191
192 if (!capable(CAP_SYS_ADMIN))
193 return 0;
194
195 mutex_lock(&ha->optrom_mutex);
196 if (qla2x00_chip_is_down(vha)) {
197 mutex_unlock(&ha->optrom_mutex);
198 return -EAGAIN;
199 }
200
201 if (!IS_NOCACHE_VPD_TYPE(ha)) {
202 mutex_unlock(&ha->optrom_mutex);
203 goto skip;
204 }
205
206 faddr = ha->flt_region_nvram;
207 if (IS_QLA28XX(ha)) {
208 qla28xx_get_aux_images(vha, &active_regions);
209 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
210 faddr = ha->flt_region_nvram_sec;
211 }
212 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
213
214 mutex_unlock(&ha->optrom_mutex);
215
216 skip:
217 return memory_read_from_buffer(buf, count, &off, ha->nvram,
218 ha->nvram_size);
219 }
220
221 static ssize_t
qla2x00_sysfs_write_nvram(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)222 qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
223 struct bin_attribute *bin_attr,
224 char *buf, loff_t off, size_t count)
225 {
226 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
227 struct device, kobj)));
228 struct qla_hw_data *ha = vha->hw;
229 uint16_t cnt;
230
231 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
232 !ha->isp_ops->write_nvram)
233 return -EINVAL;
234
235 /* Checksum NVRAM. */
236 if (IS_FWI2_CAPABLE(ha)) {
237 __le32 *iter = (__force __le32 *)buf;
238 uint32_t chksum;
239
240 chksum = 0;
241 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
242 chksum += le32_to_cpu(*iter);
243 chksum = ~chksum + 1;
244 *iter = cpu_to_le32(chksum);
245 } else {
246 uint8_t *iter;
247 uint8_t chksum;
248
249 iter = (uint8_t *)buf;
250 chksum = 0;
251 for (cnt = 0; cnt < count - 1; cnt++)
252 chksum += *iter++;
253 chksum = ~chksum + 1;
254 *iter = chksum;
255 }
256
257 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
258 ql_log(ql_log_warn, vha, 0x705f,
259 "HBA not online, failing NVRAM update.\n");
260 return -EAGAIN;
261 }
262
263 mutex_lock(&ha->optrom_mutex);
264 if (qla2x00_chip_is_down(vha)) {
265 mutex_unlock(&ha->optrom_mutex);
266 return -EAGAIN;
267 }
268
269 /* Write NVRAM. */
270 ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count);
271 ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base,
272 count);
273 mutex_unlock(&ha->optrom_mutex);
274
275 ql_dbg(ql_dbg_user, vha, 0x7060,
276 "Setting ISP_ABORT_NEEDED\n");
277 /* NVRAM settings take effect immediately. */
278 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
279 qla2xxx_wake_dpc(vha);
280 qla2x00_wait_for_chip_reset(vha);
281
282 return count;
283 }
284
285 static struct bin_attribute sysfs_nvram_attr = {
286 .attr = {
287 .name = "nvram",
288 .mode = S_IRUSR | S_IWUSR,
289 },
290 .size = 512,
291 .read = qla2x00_sysfs_read_nvram,
292 .write = qla2x00_sysfs_write_nvram,
293 };
294
295 static ssize_t
qla2x00_sysfs_read_optrom(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)296 qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
297 struct bin_attribute *bin_attr,
298 char *buf, loff_t off, size_t count)
299 {
300 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
301 struct device, kobj)));
302 struct qla_hw_data *ha = vha->hw;
303 ssize_t rval = 0;
304
305 mutex_lock(&ha->optrom_mutex);
306
307 if (ha->optrom_state != QLA_SREADING)
308 goto out;
309
310 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
311 ha->optrom_region_size);
312
313 out:
314 mutex_unlock(&ha->optrom_mutex);
315
316 return rval;
317 }
318
319 static ssize_t
qla2x00_sysfs_write_optrom(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)320 qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
321 struct bin_attribute *bin_attr,
322 char *buf, loff_t off, size_t count)
323 {
324 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
325 struct device, kobj)));
326 struct qla_hw_data *ha = vha->hw;
327
328 mutex_lock(&ha->optrom_mutex);
329
330 if (ha->optrom_state != QLA_SWRITING) {
331 mutex_unlock(&ha->optrom_mutex);
332 return -EINVAL;
333 }
334 if (off > ha->optrom_region_size) {
335 mutex_unlock(&ha->optrom_mutex);
336 return -ERANGE;
337 }
338 if (off + count > ha->optrom_region_size)
339 count = ha->optrom_region_size - off;
340
341 memcpy(&ha->optrom_buffer[off], buf, count);
342 mutex_unlock(&ha->optrom_mutex);
343
344 return count;
345 }
346
347 static struct bin_attribute sysfs_optrom_attr = {
348 .attr = {
349 .name = "optrom",
350 .mode = S_IRUSR | S_IWUSR,
351 },
352 .size = 0,
353 .read = qla2x00_sysfs_read_optrom,
354 .write = qla2x00_sysfs_write_optrom,
355 };
356
357 static ssize_t
qla2x00_sysfs_write_optrom_ctl(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)358 qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
359 struct bin_attribute *bin_attr,
360 char *buf, loff_t off, size_t count)
361 {
362 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
363 struct device, kobj)));
364 struct qla_hw_data *ha = vha->hw;
365 uint32_t start = 0;
366 uint32_t size = ha->optrom_size;
367 int val, valid;
368 ssize_t rval = count;
369
370 if (off)
371 return -EINVAL;
372
373 if (unlikely(pci_channel_offline(ha->pdev)))
374 return -EAGAIN;
375
376 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
377 return -EINVAL;
378 if (start > ha->optrom_size)
379 return -EINVAL;
380 if (size > ha->optrom_size - start)
381 size = ha->optrom_size - start;
382
383 mutex_lock(&ha->optrom_mutex);
384 if (qla2x00_chip_is_down(vha)) {
385 mutex_unlock(&ha->optrom_mutex);
386 return -EAGAIN;
387 }
388 switch (val) {
389 case 0:
390 if (ha->optrom_state != QLA_SREADING &&
391 ha->optrom_state != QLA_SWRITING) {
392 rval = -EINVAL;
393 goto out;
394 }
395 ha->optrom_state = QLA_SWAITING;
396
397 ql_dbg(ql_dbg_user, vha, 0x7061,
398 "Freeing flash region allocation -- 0x%x bytes.\n",
399 ha->optrom_region_size);
400
401 vfree(ha->optrom_buffer);
402 ha->optrom_buffer = NULL;
403 break;
404 case 1:
405 if (ha->optrom_state != QLA_SWAITING) {
406 rval = -EINVAL;
407 goto out;
408 }
409
410 ha->optrom_region_start = start;
411 ha->optrom_region_size = size;
412
413 ha->optrom_state = QLA_SREADING;
414 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
415 if (ha->optrom_buffer == NULL) {
416 ql_log(ql_log_warn, vha, 0x7062,
417 "Unable to allocate memory for optrom retrieval "
418 "(%x).\n", ha->optrom_region_size);
419
420 ha->optrom_state = QLA_SWAITING;
421 rval = -ENOMEM;
422 goto out;
423 }
424
425 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
426 ql_log(ql_log_warn, vha, 0x7063,
427 "HBA not online, failing NVRAM update.\n");
428 rval = -EAGAIN;
429 goto out;
430 }
431
432 ql_dbg(ql_dbg_user, vha, 0x7064,
433 "Reading flash region -- 0x%x/0x%x.\n",
434 ha->optrom_region_start, ha->optrom_region_size);
435
436 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
437 ha->optrom_region_start, ha->optrom_region_size);
438 break;
439 case 2:
440 if (ha->optrom_state != QLA_SWAITING) {
441 rval = -EINVAL;
442 goto out;
443 }
444
445 /*
446 * We need to be more restrictive on which FLASH regions are
447 * allowed to be updated via user-space. Regions accessible
448 * via this method include:
449 *
450 * ISP21xx/ISP22xx/ISP23xx type boards:
451 *
452 * 0x000000 -> 0x020000 -- Boot code.
453 *
454 * ISP2322/ISP24xx type boards:
455 *
456 * 0x000000 -> 0x07ffff -- Boot code.
457 * 0x080000 -> 0x0fffff -- Firmware.
458 *
459 * ISP25xx type boards:
460 *
461 * 0x000000 -> 0x07ffff -- Boot code.
462 * 0x080000 -> 0x0fffff -- Firmware.
463 * 0x120000 -> 0x12ffff -- VPD and HBA parameters.
464 *
465 * > ISP25xx type boards:
466 *
467 * None -- should go through BSG.
468 */
469 valid = 0;
470 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
471 valid = 1;
472 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
473 valid = 1;
474 if (!valid) {
475 ql_log(ql_log_warn, vha, 0x7065,
476 "Invalid start region 0x%x/0x%x.\n", start, size);
477 rval = -EINVAL;
478 goto out;
479 }
480
481 ha->optrom_region_start = start;
482 ha->optrom_region_size = size;
483
484 ha->optrom_state = QLA_SWRITING;
485 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
486 if (ha->optrom_buffer == NULL) {
487 ql_log(ql_log_warn, vha, 0x7066,
488 "Unable to allocate memory for optrom update "
489 "(%x)\n", ha->optrom_region_size);
490
491 ha->optrom_state = QLA_SWAITING;
492 rval = -ENOMEM;
493 goto out;
494 }
495
496 ql_dbg(ql_dbg_user, vha, 0x7067,
497 "Staging flash region write -- 0x%x/0x%x.\n",
498 ha->optrom_region_start, ha->optrom_region_size);
499
500 break;
501 case 3:
502 if (ha->optrom_state != QLA_SWRITING) {
503 rval = -EINVAL;
504 goto out;
505 }
506
507 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
508 ql_log(ql_log_warn, vha, 0x7068,
509 "HBA not online, failing flash update.\n");
510 rval = -EAGAIN;
511 goto out;
512 }
513
514 ql_dbg(ql_dbg_user, vha, 0x7069,
515 "Writing flash region -- 0x%x/0x%x.\n",
516 ha->optrom_region_start, ha->optrom_region_size);
517
518 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
519 ha->optrom_region_start, ha->optrom_region_size);
520 if (rval)
521 rval = -EIO;
522 break;
523 default:
524 rval = -EINVAL;
525 }
526
527 out:
528 mutex_unlock(&ha->optrom_mutex);
529 return rval;
530 }
531
532 static struct bin_attribute sysfs_optrom_ctl_attr = {
533 .attr = {
534 .name = "optrom_ctl",
535 .mode = S_IWUSR,
536 },
537 .size = 0,
538 .write = qla2x00_sysfs_write_optrom_ctl,
539 };
540
541 static ssize_t
qla2x00_sysfs_read_vpd(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)542 qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
543 struct bin_attribute *bin_attr,
544 char *buf, loff_t off, size_t count)
545 {
546 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
547 struct device, kobj)));
548 struct qla_hw_data *ha = vha->hw;
549 uint32_t faddr;
550 struct active_regions active_regions = { };
551
552 if (unlikely(pci_channel_offline(ha->pdev)))
553 return -EAGAIN;
554
555 if (!capable(CAP_SYS_ADMIN))
556 return -EINVAL;
557
558 if (IS_NOCACHE_VPD_TYPE(ha))
559 goto skip;
560
561 faddr = ha->flt_region_vpd << 2;
562
563 if (IS_QLA28XX(ha)) {
564 qla28xx_get_aux_images(vha, &active_regions);
565 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
566 faddr = ha->flt_region_vpd_sec << 2;
567
568 ql_dbg(ql_dbg_init, vha, 0x7070,
569 "Loading %s nvram image.\n",
570 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
571 "primary" : "secondary");
572 }
573
574 mutex_lock(&ha->optrom_mutex);
575 if (qla2x00_chip_is_down(vha)) {
576 mutex_unlock(&ha->optrom_mutex);
577 return -EAGAIN;
578 }
579
580 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
581 mutex_unlock(&ha->optrom_mutex);
582
583 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
584 skip:
585 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
586 }
587
588 static ssize_t
qla2x00_sysfs_write_vpd(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)589 qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
590 struct bin_attribute *bin_attr,
591 char *buf, loff_t off, size_t count)
592 {
593 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
594 struct device, kobj)));
595 struct qla_hw_data *ha = vha->hw;
596 uint8_t *tmp_data;
597
598 if (unlikely(pci_channel_offline(ha->pdev)))
599 return 0;
600
601 if (qla2x00_chip_is_down(vha))
602 return 0;
603
604 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
605 !ha->isp_ops->write_nvram)
606 return 0;
607
608 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
609 ql_log(ql_log_warn, vha, 0x706a,
610 "HBA not online, failing VPD update.\n");
611 return -EAGAIN;
612 }
613
614 mutex_lock(&ha->optrom_mutex);
615 if (qla2x00_chip_is_down(vha)) {
616 mutex_unlock(&ha->optrom_mutex);
617 return -EAGAIN;
618 }
619
620 /* Write NVRAM. */
621 ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count);
622 ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count);
623
624 /* Update flash version information for 4Gb & above. */
625 if (!IS_FWI2_CAPABLE(ha)) {
626 mutex_unlock(&ha->optrom_mutex);
627 return -EINVAL;
628 }
629
630 tmp_data = vmalloc(256);
631 if (!tmp_data) {
632 mutex_unlock(&ha->optrom_mutex);
633 ql_log(ql_log_warn, vha, 0x706b,
634 "Unable to allocate memory for VPD information update.\n");
635 return -ENOMEM;
636 }
637 ha->isp_ops->get_flash_version(vha, tmp_data);
638 vfree(tmp_data);
639
640 mutex_unlock(&ha->optrom_mutex);
641
642 return count;
643 }
644
645 static struct bin_attribute sysfs_vpd_attr = {
646 .attr = {
647 .name = "vpd",
648 .mode = S_IRUSR | S_IWUSR,
649 },
650 .size = 0,
651 .read = qla2x00_sysfs_read_vpd,
652 .write = qla2x00_sysfs_write_vpd,
653 };
654
655 static ssize_t
qla2x00_sysfs_read_sfp(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)656 qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
657 struct bin_attribute *bin_attr,
658 char *buf, loff_t off, size_t count)
659 {
660 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
661 struct device, kobj)));
662 int rval;
663
664 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
665 return 0;
666
667 mutex_lock(&vha->hw->optrom_mutex);
668 if (qla2x00_chip_is_down(vha)) {
669 mutex_unlock(&vha->hw->optrom_mutex);
670 return 0;
671 }
672
673 rval = qla2x00_read_sfp_dev(vha, buf, count);
674 mutex_unlock(&vha->hw->optrom_mutex);
675
676 if (rval)
677 return -EIO;
678
679 return count;
680 }
681
682 static struct bin_attribute sysfs_sfp_attr = {
683 .attr = {
684 .name = "sfp",
685 .mode = S_IRUSR | S_IWUSR,
686 },
687 .size = SFP_DEV_SIZE,
688 .read = qla2x00_sysfs_read_sfp,
689 };
690
691 static ssize_t
qla2x00_sysfs_write_reset(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)692 qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
693 struct bin_attribute *bin_attr,
694 char *buf, loff_t off, size_t count)
695 {
696 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
697 struct device, kobj)));
698 struct qla_hw_data *ha = vha->hw;
699 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
700 int type;
701 uint32_t idc_control;
702 uint8_t *tmp_data = NULL;
703
704 if (off != 0)
705 return -EINVAL;
706
707 type = simple_strtol(buf, NULL, 10);
708 switch (type) {
709 case 0x2025c:
710 ql_log(ql_log_info, vha, 0x706e,
711 "Issuing ISP reset.\n");
712
713 scsi_block_requests(vha->host);
714 if (IS_QLA82XX(ha)) {
715 ha->flags.isp82xx_no_md_cap = 1;
716 qla82xx_idc_lock(ha);
717 qla82xx_set_reset_owner(vha);
718 qla82xx_idc_unlock(ha);
719 } else if (IS_QLA8044(ha)) {
720 qla8044_idc_lock(ha);
721 idc_control = qla8044_rd_reg(ha,
722 QLA8044_IDC_DRV_CTRL);
723 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
724 (idc_control | GRACEFUL_RESET_BIT1));
725 qla82xx_set_reset_owner(vha);
726 qla8044_idc_unlock(ha);
727 } else {
728 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
729 qla2xxx_wake_dpc(vha);
730 }
731 qla2x00_wait_for_chip_reset(vha);
732 scsi_unblock_requests(vha->host);
733 break;
734 case 0x2025d:
735 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
736 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
737 return -EPERM;
738
739 ql_log(ql_log_info, vha, 0x706f,
740 "Issuing MPI reset.\n");
741
742 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
743 uint32_t idc_control;
744
745 qla83xx_idc_lock(vha, 0);
746 __qla83xx_get_idc_control(vha, &idc_control);
747 idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
748 __qla83xx_set_idc_control(vha, idc_control);
749 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
750 QLA8XXX_DEV_NEED_RESET);
751 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
752 qla83xx_idc_unlock(vha, 0);
753 break;
754 } else {
755 /* Make sure FC side is not in reset */
756 WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
757 QLA_SUCCESS);
758
759 /* Issue MPI reset */
760 scsi_block_requests(vha->host);
761 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
762 ql_log(ql_log_warn, vha, 0x7070,
763 "MPI reset failed.\n");
764 scsi_unblock_requests(vha->host);
765 break;
766 }
767 break;
768 case 0x2025e:
769 if (!IS_P3P_TYPE(ha) || vha != base_vha) {
770 ql_log(ql_log_info, vha, 0x7071,
771 "FCoE ctx reset not supported.\n");
772 return -EPERM;
773 }
774
775 ql_log(ql_log_info, vha, 0x7072,
776 "Issuing FCoE ctx reset.\n");
777 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
778 qla2xxx_wake_dpc(vha);
779 qla2x00_wait_for_fcoe_ctx_reset(vha);
780 break;
781 case 0x2025f:
782 if (!IS_QLA8031(ha))
783 return -EPERM;
784 ql_log(ql_log_info, vha, 0x70bc,
785 "Disabling Reset by IDC control\n");
786 qla83xx_idc_lock(vha, 0);
787 __qla83xx_get_idc_control(vha, &idc_control);
788 idc_control |= QLA83XX_IDC_RESET_DISABLED;
789 __qla83xx_set_idc_control(vha, idc_control);
790 qla83xx_idc_unlock(vha, 0);
791 break;
792 case 0x20260:
793 if (!IS_QLA8031(ha))
794 return -EPERM;
795 ql_log(ql_log_info, vha, 0x70bd,
796 "Enabling Reset by IDC control\n");
797 qla83xx_idc_lock(vha, 0);
798 __qla83xx_get_idc_control(vha, &idc_control);
799 idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
800 __qla83xx_set_idc_control(vha, idc_control);
801 qla83xx_idc_unlock(vha, 0);
802 break;
803 case 0x20261:
804 ql_dbg(ql_dbg_user, vha, 0x70e0,
805 "Updating cache versions without reset ");
806
807 tmp_data = vmalloc(256);
808 if (!tmp_data) {
809 ql_log(ql_log_warn, vha, 0x70e1,
810 "Unable to allocate memory for VPD information update.\n");
811 return -ENOMEM;
812 }
813 ha->isp_ops->get_flash_version(vha, tmp_data);
814 vfree(tmp_data);
815 break;
816 }
817 return count;
818 }
819
820 static struct bin_attribute sysfs_reset_attr = {
821 .attr = {
822 .name = "reset",
823 .mode = S_IWUSR,
824 },
825 .size = 0,
826 .write = qla2x00_sysfs_write_reset,
827 };
828
829 static ssize_t
qla2x00_issue_logo(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)830 qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
831 struct bin_attribute *bin_attr,
832 char *buf, loff_t off, size_t count)
833 {
834 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
835 struct device, kobj)));
836 int type;
837 port_id_t did;
838
839 if (!capable(CAP_SYS_ADMIN))
840 return 0;
841
842 if (unlikely(pci_channel_offline(vha->hw->pdev)))
843 return 0;
844
845 if (qla2x00_chip_is_down(vha))
846 return 0;
847
848 type = simple_strtol(buf, NULL, 10);
849
850 did.b.domain = (type & 0x00ff0000) >> 16;
851 did.b.area = (type & 0x0000ff00) >> 8;
852 did.b.al_pa = (type & 0x000000ff);
853
854 ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n",
855 did.b.domain, did.b.area, did.b.al_pa);
856
857 ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
858
859 qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
860 return count;
861 }
862
863 static struct bin_attribute sysfs_issue_logo_attr = {
864 .attr = {
865 .name = "issue_logo",
866 .mode = S_IWUSR,
867 },
868 .size = 0,
869 .write = qla2x00_issue_logo,
870 };
871
872 static ssize_t
qla2x00_sysfs_read_xgmac_stats(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)873 qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
874 struct bin_attribute *bin_attr,
875 char *buf, loff_t off, size_t count)
876 {
877 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
878 struct device, kobj)));
879 struct qla_hw_data *ha = vha->hw;
880 int rval;
881 uint16_t actual_size;
882
883 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
884 return 0;
885
886 if (unlikely(pci_channel_offline(ha->pdev)))
887 return 0;
888 mutex_lock(&vha->hw->optrom_mutex);
889 if (qla2x00_chip_is_down(vha)) {
890 mutex_unlock(&vha->hw->optrom_mutex);
891 return 0;
892 }
893
894 if (ha->xgmac_data)
895 goto do_read;
896
897 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
898 &ha->xgmac_data_dma, GFP_KERNEL);
899 if (!ha->xgmac_data) {
900 mutex_unlock(&vha->hw->optrom_mutex);
901 ql_log(ql_log_warn, vha, 0x7076,
902 "Unable to allocate memory for XGMAC read-data.\n");
903 return 0;
904 }
905
906 do_read:
907 actual_size = 0;
908 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
909
910 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
911 XGMAC_DATA_SIZE, &actual_size);
912
913 mutex_unlock(&vha->hw->optrom_mutex);
914 if (rval != QLA_SUCCESS) {
915 ql_log(ql_log_warn, vha, 0x7077,
916 "Unable to read XGMAC data (%x).\n", rval);
917 count = 0;
918 }
919
920 count = actual_size > count ? count : actual_size;
921 memcpy(buf, ha->xgmac_data, count);
922
923 return count;
924 }
925
926 static struct bin_attribute sysfs_xgmac_stats_attr = {
927 .attr = {
928 .name = "xgmac_stats",
929 .mode = S_IRUSR,
930 },
931 .size = 0,
932 .read = qla2x00_sysfs_read_xgmac_stats,
933 };
934
935 static ssize_t
qla2x00_sysfs_read_dcbx_tlv(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)936 qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
937 struct bin_attribute *bin_attr,
938 char *buf, loff_t off, size_t count)
939 {
940 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
941 struct device, kobj)));
942 struct qla_hw_data *ha = vha->hw;
943 int rval;
944
945 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
946 return 0;
947
948 if (ha->dcbx_tlv)
949 goto do_read;
950 mutex_lock(&vha->hw->optrom_mutex);
951 if (qla2x00_chip_is_down(vha)) {
952 mutex_unlock(&vha->hw->optrom_mutex);
953 return 0;
954 }
955
956 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
957 &ha->dcbx_tlv_dma, GFP_KERNEL);
958 if (!ha->dcbx_tlv) {
959 mutex_unlock(&vha->hw->optrom_mutex);
960 ql_log(ql_log_warn, vha, 0x7078,
961 "Unable to allocate memory for DCBX TLV read-data.\n");
962 return -ENOMEM;
963 }
964
965 do_read:
966 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
967
968 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
969 DCBX_TLV_DATA_SIZE);
970
971 mutex_unlock(&vha->hw->optrom_mutex);
972
973 if (rval != QLA_SUCCESS) {
974 ql_log(ql_log_warn, vha, 0x7079,
975 "Unable to read DCBX TLV (%x).\n", rval);
976 return -EIO;
977 }
978
979 memcpy(buf, ha->dcbx_tlv, count);
980
981 return count;
982 }
983
984 static struct bin_attribute sysfs_dcbx_tlv_attr = {
985 .attr = {
986 .name = "dcbx_tlv",
987 .mode = S_IRUSR,
988 },
989 .size = 0,
990 .read = qla2x00_sysfs_read_dcbx_tlv,
991 };
992
993 static struct sysfs_entry {
994 char *name;
995 struct bin_attribute *attr;
996 int type;
997 } bin_file_entries[] = {
998 { "fw_dump", &sysfs_fw_dump_attr, },
999 { "nvram", &sysfs_nvram_attr, },
1000 { "optrom", &sysfs_optrom_attr, },
1001 { "optrom_ctl", &sysfs_optrom_ctl_attr, },
1002 { "vpd", &sysfs_vpd_attr, 1 },
1003 { "sfp", &sysfs_sfp_attr, 1 },
1004 { "reset", &sysfs_reset_attr, },
1005 { "issue_logo", &sysfs_issue_logo_attr, },
1006 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
1007 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
1008 { NULL },
1009 };
1010
1011 void
qla2x00_alloc_sysfs_attr(scsi_qla_host_t * vha)1012 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
1013 {
1014 struct Scsi_Host *host = vha->host;
1015 struct sysfs_entry *iter;
1016 int ret;
1017
1018 for (iter = bin_file_entries; iter->name; iter++) {
1019 if (iter->type && !IS_FWI2_CAPABLE(vha->hw))
1020 continue;
1021 if (iter->type == 2 && !IS_QLA25XX(vha->hw))
1022 continue;
1023 if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw)))
1024 continue;
1025
1026 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
1027 iter->attr);
1028 if (ret)
1029 ql_log(ql_log_warn, vha, 0x00f3,
1030 "Unable to create sysfs %s binary attribute (%d).\n",
1031 iter->name, ret);
1032 else
1033 ql_dbg(ql_dbg_init, vha, 0x00f4,
1034 "Successfully created sysfs %s binary attribute.\n",
1035 iter->name);
1036 }
1037 }
1038
1039 void
qla2x00_free_sysfs_attr(scsi_qla_host_t * vha,bool stop_beacon)1040 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
1041 {
1042 struct Scsi_Host *host = vha->host;
1043 struct sysfs_entry *iter;
1044 struct qla_hw_data *ha = vha->hw;
1045
1046 for (iter = bin_file_entries; iter->name; iter++) {
1047 if (iter->type && !IS_FWI2_CAPABLE(ha))
1048 continue;
1049 if (iter->type == 2 && !IS_QLA25XX(ha))
1050 continue;
1051 if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
1052 continue;
1053 if (iter->type == 0x27 &&
1054 (!IS_QLA27XX(ha) || !IS_QLA28XX(ha)))
1055 continue;
1056
1057 sysfs_remove_bin_file(&host->shost_gendev.kobj,
1058 iter->attr);
1059 }
1060
1061 if (stop_beacon && ha->beacon_blink_led == 1)
1062 ha->isp_ops->beacon_off(vha);
1063 }
1064
1065 /* Scsi_Host attributes. */
1066
1067 static ssize_t
qla2x00_driver_version_show(struct device * dev,struct device_attribute * attr,char * buf)1068 qla2x00_driver_version_show(struct device *dev,
1069 struct device_attribute *attr, char *buf)
1070 {
1071 return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
1072 }
1073
1074 static ssize_t
qla2x00_fw_version_show(struct device * dev,struct device_attribute * attr,char * buf)1075 qla2x00_fw_version_show(struct device *dev,
1076 struct device_attribute *attr, char *buf)
1077 {
1078 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1079 struct qla_hw_data *ha = vha->hw;
1080 char fw_str[128];
1081
1082 return scnprintf(buf, PAGE_SIZE, "%s\n",
1083 ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
1084 }
1085
1086 static ssize_t
qla2x00_serial_num_show(struct device * dev,struct device_attribute * attr,char * buf)1087 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
1088 char *buf)
1089 {
1090 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1091 struct qla_hw_data *ha = vha->hw;
1092 uint32_t sn;
1093
1094 if (IS_QLAFX00(vha->hw)) {
1095 return scnprintf(buf, PAGE_SIZE, "%s\n",
1096 vha->hw->mr.serial_num);
1097 } else if (IS_FWI2_CAPABLE(ha)) {
1098 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
1099 return strlen(strcat(buf, "\n"));
1100 }
1101
1102 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
1103 return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
1104 sn % 100000);
1105 }
1106
1107 static ssize_t
qla2x00_isp_name_show(struct device * dev,struct device_attribute * attr,char * buf)1108 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
1109 char *buf)
1110 {
1111 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1112
1113 return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
1114 }
1115
1116 static ssize_t
qla2x00_isp_id_show(struct device * dev,struct device_attribute * attr,char * buf)1117 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
1118 char *buf)
1119 {
1120 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1121 struct qla_hw_data *ha = vha->hw;
1122
1123 if (IS_QLAFX00(vha->hw))
1124 return scnprintf(buf, PAGE_SIZE, "%s\n",
1125 vha->hw->mr.hw_version);
1126
1127 return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
1128 ha->product_id[0], ha->product_id[1], ha->product_id[2],
1129 ha->product_id[3]);
1130 }
1131
1132 static ssize_t
qla2x00_model_name_show(struct device * dev,struct device_attribute * attr,char * buf)1133 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
1134 char *buf)
1135 {
1136 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1137
1138 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
1139 }
1140
1141 static ssize_t
qla2x00_model_desc_show(struct device * dev,struct device_attribute * attr,char * buf)1142 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
1143 char *buf)
1144 {
1145 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1146
1147 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
1148 }
1149
1150 static ssize_t
qla2x00_pci_info_show(struct device * dev,struct device_attribute * attr,char * buf)1151 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
1152 char *buf)
1153 {
1154 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1155 char pci_info[30];
1156
1157 return scnprintf(buf, PAGE_SIZE, "%s\n",
1158 vha->hw->isp_ops->pci_info_str(vha, pci_info,
1159 sizeof(pci_info)));
1160 }
1161
1162 static ssize_t
qla2x00_link_state_show(struct device * dev,struct device_attribute * attr,char * buf)1163 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
1164 char *buf)
1165 {
1166 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1167 struct qla_hw_data *ha = vha->hw;
1168 int len = 0;
1169
1170 if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
1171 atomic_read(&vha->loop_state) == LOOP_DEAD ||
1172 vha->device_flags & DFLG_NO_CABLE)
1173 len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
1174 else if (atomic_read(&vha->loop_state) != LOOP_READY ||
1175 qla2x00_chip_is_down(vha))
1176 len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
1177 else {
1178 len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
1179
1180 switch (ha->current_topology) {
1181 case ISP_CFG_NL:
1182 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1183 break;
1184 case ISP_CFG_FL:
1185 len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
1186 break;
1187 case ISP_CFG_N:
1188 len += scnprintf(buf + len, PAGE_SIZE-len,
1189 "N_Port to N_Port\n");
1190 break;
1191 case ISP_CFG_F:
1192 len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
1193 break;
1194 default:
1195 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1196 break;
1197 }
1198 }
1199 return len;
1200 }
1201
1202 static ssize_t
qla2x00_zio_show(struct device * dev,struct device_attribute * attr,char * buf)1203 qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1204 char *buf)
1205 {
1206 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1207 int len = 0;
1208
1209 switch (vha->hw->zio_mode) {
1210 case QLA_ZIO_MODE_6:
1211 len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1212 break;
1213 case QLA_ZIO_DISABLED:
1214 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1215 break;
1216 }
1217 return len;
1218 }
1219
1220 static ssize_t
qla2x00_zio_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1221 qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1222 const char *buf, size_t count)
1223 {
1224 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1225 struct qla_hw_data *ha = vha->hw;
1226 int val = 0;
1227 uint16_t zio_mode;
1228
1229 if (!IS_ZIO_SUPPORTED(ha))
1230 return -ENOTSUPP;
1231
1232 if (sscanf(buf, "%d", &val) != 1)
1233 return -EINVAL;
1234
1235 if (val)
1236 zio_mode = QLA_ZIO_MODE_6;
1237 else
1238 zio_mode = QLA_ZIO_DISABLED;
1239
1240 /* Update per-hba values and queue a reset. */
1241 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1242 ha->zio_mode = zio_mode;
1243 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1244 }
1245 return strlen(buf);
1246 }
1247
1248 static ssize_t
qla2x00_zio_timer_show(struct device * dev,struct device_attribute * attr,char * buf)1249 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1250 char *buf)
1251 {
1252 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1253
1254 return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1255 }
1256
1257 static ssize_t
qla2x00_zio_timer_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1258 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1259 const char *buf, size_t count)
1260 {
1261 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1262 int val = 0;
1263 uint16_t zio_timer;
1264
1265 if (sscanf(buf, "%d", &val) != 1)
1266 return -EINVAL;
1267 if (val > 25500 || val < 100)
1268 return -ERANGE;
1269
1270 zio_timer = (uint16_t)(val / 100);
1271 vha->hw->zio_timer = zio_timer;
1272
1273 return strlen(buf);
1274 }
1275
1276 static ssize_t
qla_zio_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)1277 qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
1278 char *buf)
1279 {
1280 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1281
1282 return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
1283 vha->hw->last_zio_threshold);
1284 }
1285
1286 static ssize_t
qla_zio_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1287 qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
1288 const char *buf, size_t count)
1289 {
1290 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1291 int val = 0;
1292
1293 if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
1294 return -EINVAL;
1295 if (sscanf(buf, "%d", &val) != 1)
1296 return -EINVAL;
1297 if (val < 0 || val > 256)
1298 return -ERANGE;
1299
1300 atomic_set(&vha->hw->zio_threshold, val);
1301 return strlen(buf);
1302 }
1303
1304 static ssize_t
qla2x00_beacon_show(struct device * dev,struct device_attribute * attr,char * buf)1305 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1306 char *buf)
1307 {
1308 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1309 int len = 0;
1310
1311 if (vha->hw->beacon_blink_led)
1312 len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1313 else
1314 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1315 return len;
1316 }
1317
1318 static ssize_t
qla2x00_beacon_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1319 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1320 const char *buf, size_t count)
1321 {
1322 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1323 struct qla_hw_data *ha = vha->hw;
1324 int val = 0;
1325 int rval;
1326
1327 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1328 return -EPERM;
1329
1330 if (sscanf(buf, "%d", &val) != 1)
1331 return -EINVAL;
1332
1333 mutex_lock(&vha->hw->optrom_mutex);
1334 if (qla2x00_chip_is_down(vha)) {
1335 mutex_unlock(&vha->hw->optrom_mutex);
1336 ql_log(ql_log_warn, vha, 0x707a,
1337 "Abort ISP active -- ignoring beacon request.\n");
1338 return -EBUSY;
1339 }
1340
1341 if (val)
1342 rval = ha->isp_ops->beacon_on(vha);
1343 else
1344 rval = ha->isp_ops->beacon_off(vha);
1345
1346 if (rval != QLA_SUCCESS)
1347 count = 0;
1348
1349 mutex_unlock(&vha->hw->optrom_mutex);
1350
1351 return count;
1352 }
1353
1354 static ssize_t
qla2x00_beacon_config_show(struct device * dev,struct device_attribute * attr,char * buf)1355 qla2x00_beacon_config_show(struct device *dev, struct device_attribute *attr,
1356 char *buf)
1357 {
1358 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1359 struct qla_hw_data *ha = vha->hw;
1360 uint16_t led[3] = { 0 };
1361
1362 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1363 return -EPERM;
1364
1365 if (ql26xx_led_config(vha, 0, led))
1366 return scnprintf(buf, PAGE_SIZE, "\n");
1367
1368 return scnprintf(buf, PAGE_SIZE, "%#04hx %#04hx %#04hx\n",
1369 led[0], led[1], led[2]);
1370 }
1371
1372 static ssize_t
qla2x00_beacon_config_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1373 qla2x00_beacon_config_store(struct device *dev, struct device_attribute *attr,
1374 const char *buf, size_t count)
1375 {
1376 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1377 struct qla_hw_data *ha = vha->hw;
1378 uint16_t options = BIT_0;
1379 uint16_t led[3] = { 0 };
1380 uint16_t word[4];
1381 int n;
1382
1383 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1384 return -EPERM;
1385
1386 n = sscanf(buf, "%hx %hx %hx %hx", word+0, word+1, word+2, word+3);
1387 if (n == 4) {
1388 if (word[0] == 3) {
1389 options |= BIT_3|BIT_2|BIT_1;
1390 led[0] = word[1];
1391 led[1] = word[2];
1392 led[2] = word[3];
1393 goto write;
1394 }
1395 return -EINVAL;
1396 }
1397
1398 if (n == 2) {
1399 /* check led index */
1400 if (word[0] == 0) {
1401 options |= BIT_2;
1402 led[0] = word[1];
1403 goto write;
1404 }
1405 if (word[0] == 1) {
1406 options |= BIT_3;
1407 led[1] = word[1];
1408 goto write;
1409 }
1410 if (word[0] == 2) {
1411 options |= BIT_1;
1412 led[2] = word[1];
1413 goto write;
1414 }
1415 return -EINVAL;
1416 }
1417
1418 return -EINVAL;
1419
1420 write:
1421 if (ql26xx_led_config(vha, options, led))
1422 return -EFAULT;
1423
1424 return count;
1425 }
1426
1427 static ssize_t
qla2x00_optrom_bios_version_show(struct device * dev,struct device_attribute * attr,char * buf)1428 qla2x00_optrom_bios_version_show(struct device *dev,
1429 struct device_attribute *attr, char *buf)
1430 {
1431 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1432 struct qla_hw_data *ha = vha->hw;
1433
1434 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1435 ha->bios_revision[0]);
1436 }
1437
1438 static ssize_t
qla2x00_optrom_efi_version_show(struct device * dev,struct device_attribute * attr,char * buf)1439 qla2x00_optrom_efi_version_show(struct device *dev,
1440 struct device_attribute *attr, char *buf)
1441 {
1442 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1443 struct qla_hw_data *ha = vha->hw;
1444
1445 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1446 ha->efi_revision[0]);
1447 }
1448
1449 static ssize_t
qla2x00_optrom_fcode_version_show(struct device * dev,struct device_attribute * attr,char * buf)1450 qla2x00_optrom_fcode_version_show(struct device *dev,
1451 struct device_attribute *attr, char *buf)
1452 {
1453 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1454 struct qla_hw_data *ha = vha->hw;
1455
1456 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1457 ha->fcode_revision[0]);
1458 }
1459
1460 static ssize_t
qla2x00_optrom_fw_version_show(struct device * dev,struct device_attribute * attr,char * buf)1461 qla2x00_optrom_fw_version_show(struct device *dev,
1462 struct device_attribute *attr, char *buf)
1463 {
1464 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1465 struct qla_hw_data *ha = vha->hw;
1466
1467 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1468 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1469 ha->fw_revision[3]);
1470 }
1471
1472 static ssize_t
qla2x00_optrom_gold_fw_version_show(struct device * dev,struct device_attribute * attr,char * buf)1473 qla2x00_optrom_gold_fw_version_show(struct device *dev,
1474 struct device_attribute *attr, char *buf)
1475 {
1476 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1477 struct qla_hw_data *ha = vha->hw;
1478
1479 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
1480 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1481 return scnprintf(buf, PAGE_SIZE, "\n");
1482
1483 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1484 ha->gold_fw_version[0], ha->gold_fw_version[1],
1485 ha->gold_fw_version[2], ha->gold_fw_version[3]);
1486 }
1487
1488 static ssize_t
qla2x00_total_isp_aborts_show(struct device * dev,struct device_attribute * attr,char * buf)1489 qla2x00_total_isp_aborts_show(struct device *dev,
1490 struct device_attribute *attr, char *buf)
1491 {
1492 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1493
1494 return scnprintf(buf, PAGE_SIZE, "%d\n",
1495 vha->qla_stats.total_isp_aborts);
1496 }
1497
1498 static ssize_t
qla24xx_84xx_fw_version_show(struct device * dev,struct device_attribute * attr,char * buf)1499 qla24xx_84xx_fw_version_show(struct device *dev,
1500 struct device_attribute *attr, char *buf)
1501 {
1502 int rval = QLA_SUCCESS;
1503 uint16_t status[2] = { 0 };
1504 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1505 struct qla_hw_data *ha = vha->hw;
1506
1507 if (!IS_QLA84XX(ha))
1508 return scnprintf(buf, PAGE_SIZE, "\n");
1509
1510 if (!ha->cs84xx->op_fw_version) {
1511 rval = qla84xx_verify_chip(vha, status);
1512
1513 if (!rval && !status[0])
1514 return scnprintf(buf, PAGE_SIZE, "%u\n",
1515 (uint32_t)ha->cs84xx->op_fw_version);
1516 }
1517
1518 return scnprintf(buf, PAGE_SIZE, "\n");
1519 }
1520
1521 static ssize_t
qla2x00_serdes_version_show(struct device * dev,struct device_attribute * attr,char * buf)1522 qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr,
1523 char *buf)
1524 {
1525 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1526 struct qla_hw_data *ha = vha->hw;
1527
1528 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1529 return scnprintf(buf, PAGE_SIZE, "\n");
1530
1531 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1532 ha->serdes_version[0], ha->serdes_version[1],
1533 ha->serdes_version[2]);
1534 }
1535
1536 static ssize_t
qla2x00_mpi_version_show(struct device * dev,struct device_attribute * attr,char * buf)1537 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1538 char *buf)
1539 {
1540 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1541 struct qla_hw_data *ha = vha->hw;
1542
1543 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
1544 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1545 return scnprintf(buf, PAGE_SIZE, "\n");
1546
1547 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1548 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1549 ha->mpi_capabilities);
1550 }
1551
1552 static ssize_t
qla2x00_phy_version_show(struct device * dev,struct device_attribute * attr,char * buf)1553 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1554 char *buf)
1555 {
1556 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1557 struct qla_hw_data *ha = vha->hw;
1558
1559 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1560 return scnprintf(buf, PAGE_SIZE, "\n");
1561
1562 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1563 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1564 }
1565
1566 static ssize_t
qla2x00_flash_block_size_show(struct device * dev,struct device_attribute * attr,char * buf)1567 qla2x00_flash_block_size_show(struct device *dev,
1568 struct device_attribute *attr, char *buf)
1569 {
1570 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1571 struct qla_hw_data *ha = vha->hw;
1572
1573 return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1574 }
1575
1576 static ssize_t
qla2x00_vlan_id_show(struct device * dev,struct device_attribute * attr,char * buf)1577 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1578 char *buf)
1579 {
1580 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1581
1582 if (!IS_CNA_CAPABLE(vha->hw))
1583 return scnprintf(buf, PAGE_SIZE, "\n");
1584
1585 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1586 }
1587
1588 static ssize_t
qla2x00_vn_port_mac_address_show(struct device * dev,struct device_attribute * attr,char * buf)1589 qla2x00_vn_port_mac_address_show(struct device *dev,
1590 struct device_attribute *attr, char *buf)
1591 {
1592 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1593
1594 if (!IS_CNA_CAPABLE(vha->hw))
1595 return scnprintf(buf, PAGE_SIZE, "\n");
1596
1597 return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
1598 }
1599
1600 static ssize_t
qla2x00_fabric_param_show(struct device * dev,struct device_attribute * attr,char * buf)1601 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1602 char *buf)
1603 {
1604 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1605
1606 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1607 }
1608
1609 static ssize_t
qla2x00_thermal_temp_show(struct device * dev,struct device_attribute * attr,char * buf)1610 qla2x00_thermal_temp_show(struct device *dev,
1611 struct device_attribute *attr, char *buf)
1612 {
1613 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1614 uint16_t temp = 0;
1615 int rc;
1616
1617 mutex_lock(&vha->hw->optrom_mutex);
1618 if (qla2x00_chip_is_down(vha)) {
1619 mutex_unlock(&vha->hw->optrom_mutex);
1620 ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
1621 goto done;
1622 }
1623
1624 if (vha->hw->flags.eeh_busy) {
1625 mutex_unlock(&vha->hw->optrom_mutex);
1626 ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
1627 goto done;
1628 }
1629
1630 rc = qla2x00_get_thermal_temp(vha, &temp);
1631 mutex_unlock(&vha->hw->optrom_mutex);
1632 if (rc == QLA_SUCCESS)
1633 return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
1634
1635 done:
1636 return scnprintf(buf, PAGE_SIZE, "\n");
1637 }
1638
1639 static ssize_t
qla2x00_fw_state_show(struct device * dev,struct device_attribute * attr,char * buf)1640 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1641 char *buf)
1642 {
1643 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1644 int rval = QLA_FUNCTION_FAILED;
1645 uint16_t state[6];
1646 uint32_t pstate;
1647
1648 if (IS_QLAFX00(vha->hw)) {
1649 pstate = qlafx00_fw_state_show(dev, attr, buf);
1650 return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
1651 }
1652
1653 mutex_lock(&vha->hw->optrom_mutex);
1654 if (qla2x00_chip_is_down(vha)) {
1655 mutex_unlock(&vha->hw->optrom_mutex);
1656 ql_log(ql_log_warn, vha, 0x707c,
1657 "ISP reset active.\n");
1658 goto out;
1659 } else if (vha->hw->flags.eeh_busy) {
1660 mutex_unlock(&vha->hw->optrom_mutex);
1661 goto out;
1662 }
1663
1664 rval = qla2x00_get_firmware_state(vha, state);
1665 mutex_unlock(&vha->hw->optrom_mutex);
1666 out:
1667 if (rval != QLA_SUCCESS) {
1668 memset(state, -1, sizeof(state));
1669 rval = qla2x00_get_firmware_state(vha, state);
1670 }
1671
1672 return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1673 state[0], state[1], state[2], state[3], state[4], state[5]);
1674 }
1675
1676 static ssize_t
qla2x00_diag_requests_show(struct device * dev,struct device_attribute * attr,char * buf)1677 qla2x00_diag_requests_show(struct device *dev,
1678 struct device_attribute *attr, char *buf)
1679 {
1680 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1681
1682 if (!IS_BIDI_CAPABLE(vha->hw))
1683 return scnprintf(buf, PAGE_SIZE, "\n");
1684
1685 return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
1686 }
1687
1688 static ssize_t
qla2x00_diag_megabytes_show(struct device * dev,struct device_attribute * attr,char * buf)1689 qla2x00_diag_megabytes_show(struct device *dev,
1690 struct device_attribute *attr, char *buf)
1691 {
1692 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1693
1694 if (!IS_BIDI_CAPABLE(vha->hw))
1695 return scnprintf(buf, PAGE_SIZE, "\n");
1696
1697 return scnprintf(buf, PAGE_SIZE, "%llu\n",
1698 vha->bidi_stats.transfer_bytes >> 20);
1699 }
1700
1701 static ssize_t
qla2x00_fw_dump_size_show(struct device * dev,struct device_attribute * attr,char * buf)1702 qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
1703 char *buf)
1704 {
1705 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1706 struct qla_hw_data *ha = vha->hw;
1707 uint32_t size;
1708
1709 if (!ha->fw_dumped)
1710 size = 0;
1711 else if (IS_P3P_TYPE(ha))
1712 size = ha->md_template_size + ha->md_dump_size;
1713 else
1714 size = ha->fw_dump_len;
1715
1716 return scnprintf(buf, PAGE_SIZE, "%d\n", size);
1717 }
1718
1719 static ssize_t
qla2x00_allow_cna_fw_dump_show(struct device * dev,struct device_attribute * attr,char * buf)1720 qla2x00_allow_cna_fw_dump_show(struct device *dev,
1721 struct device_attribute *attr, char *buf)
1722 {
1723 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1724
1725 if (!IS_P3P_TYPE(vha->hw))
1726 return scnprintf(buf, PAGE_SIZE, "\n");
1727 else
1728 return scnprintf(buf, PAGE_SIZE, "%s\n",
1729 vha->hw->allow_cna_fw_dump ? "true" : "false");
1730 }
1731
1732 static ssize_t
qla2x00_allow_cna_fw_dump_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1733 qla2x00_allow_cna_fw_dump_store(struct device *dev,
1734 struct device_attribute *attr, const char *buf, size_t count)
1735 {
1736 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1737 int val = 0;
1738
1739 if (!IS_P3P_TYPE(vha->hw))
1740 return -EINVAL;
1741
1742 if (sscanf(buf, "%d", &val) != 1)
1743 return -EINVAL;
1744
1745 vha->hw->allow_cna_fw_dump = val != 0;
1746
1747 return strlen(buf);
1748 }
1749
1750 static ssize_t
qla2x00_pep_version_show(struct device * dev,struct device_attribute * attr,char * buf)1751 qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
1752 char *buf)
1753 {
1754 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1755 struct qla_hw_data *ha = vha->hw;
1756
1757 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1758 return scnprintf(buf, PAGE_SIZE, "\n");
1759
1760 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1761 ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
1762 }
1763
1764 static ssize_t
qla2x00_min_supported_speed_show(struct device * dev,struct device_attribute * attr,char * buf)1765 qla2x00_min_supported_speed_show(struct device *dev,
1766 struct device_attribute *attr, char *buf)
1767 {
1768 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1769 struct qla_hw_data *ha = vha->hw;
1770
1771 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1772 return scnprintf(buf, PAGE_SIZE, "\n");
1773
1774 return scnprintf(buf, PAGE_SIZE, "%s\n",
1775 ha->min_supported_speed == 6 ? "64Gps" :
1776 ha->min_supported_speed == 5 ? "32Gps" :
1777 ha->min_supported_speed == 4 ? "16Gps" :
1778 ha->min_supported_speed == 3 ? "8Gps" :
1779 ha->min_supported_speed == 2 ? "4Gps" :
1780 ha->min_supported_speed != 0 ? "unknown" : "");
1781 }
1782
1783 static ssize_t
qla2x00_max_supported_speed_show(struct device * dev,struct device_attribute * attr,char * buf)1784 qla2x00_max_supported_speed_show(struct device *dev,
1785 struct device_attribute *attr, char *buf)
1786 {
1787 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1788 struct qla_hw_data *ha = vha->hw;
1789
1790 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1791 return scnprintf(buf, PAGE_SIZE, "\n");
1792
1793 return scnprintf(buf, PAGE_SIZE, "%s\n",
1794 ha->max_supported_speed == 2 ? "64Gps" :
1795 ha->max_supported_speed == 1 ? "32Gps" :
1796 ha->max_supported_speed == 0 ? "16Gps" : "unknown");
1797 }
1798
1799 static ssize_t
qla2x00_port_speed_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1800 qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
1801 const char *buf, size_t count)
1802 {
1803 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1804 ulong type, speed;
1805 int oldspeed, rval;
1806 int mode = QLA_SET_DATA_RATE_LR;
1807 struct qla_hw_data *ha = vha->hw;
1808
1809 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
1810 ql_log(ql_log_warn, vha, 0x70d8,
1811 "Speed setting not supported \n");
1812 return -EINVAL;
1813 }
1814
1815 rval = kstrtol(buf, 10, &type);
1816 if (rval)
1817 return rval;
1818 speed = type;
1819 if (type == 40 || type == 80 || type == 160 ||
1820 type == 320) {
1821 ql_dbg(ql_dbg_user, vha, 0x70d9,
1822 "Setting will be affected after a loss of sync\n");
1823 type = type/10;
1824 mode = QLA_SET_DATA_RATE_NOLR;
1825 }
1826
1827 oldspeed = ha->set_data_rate;
1828
1829 switch (type) {
1830 case 0:
1831 ha->set_data_rate = PORT_SPEED_AUTO;
1832 break;
1833 case 4:
1834 ha->set_data_rate = PORT_SPEED_4GB;
1835 break;
1836 case 8:
1837 ha->set_data_rate = PORT_SPEED_8GB;
1838 break;
1839 case 16:
1840 ha->set_data_rate = PORT_SPEED_16GB;
1841 break;
1842 case 32:
1843 ha->set_data_rate = PORT_SPEED_32GB;
1844 break;
1845 default:
1846 ql_log(ql_log_warn, vha, 0x1199,
1847 "Unrecognized speed setting:%lx. Setting Autoneg\n",
1848 speed);
1849 ha->set_data_rate = PORT_SPEED_AUTO;
1850 }
1851
1852 if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate))
1853 return -EINVAL;
1854
1855 ql_log(ql_log_info, vha, 0x70da,
1856 "Setting speed to %lx Gbps \n", type);
1857
1858 rval = qla2x00_set_data_rate(vha, mode);
1859 if (rval != QLA_SUCCESS)
1860 return -EIO;
1861
1862 return strlen(buf);
1863 }
1864
1865 static const struct {
1866 u16 rate;
1867 char *str;
1868 } port_speed_str[] = {
1869 { PORT_SPEED_4GB, "4" },
1870 { PORT_SPEED_8GB, "8" },
1871 { PORT_SPEED_16GB, "16" },
1872 { PORT_SPEED_32GB, "32" },
1873 { PORT_SPEED_64GB, "64" },
1874 { PORT_SPEED_10GB, "10" },
1875 };
1876
1877 static ssize_t
qla2x00_port_speed_show(struct device * dev,struct device_attribute * attr,char * buf)1878 qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
1879 char *buf)
1880 {
1881 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1882 struct qla_hw_data *ha = vha->hw;
1883 ssize_t rval;
1884 u16 i;
1885 char *speed = "Unknown";
1886
1887 rval = qla2x00_get_data_rate(vha);
1888 if (rval != QLA_SUCCESS) {
1889 ql_log(ql_log_warn, vha, 0x70db,
1890 "Unable to get port speed rval:%zd\n", rval);
1891 return -EINVAL;
1892 }
1893
1894 for (i = 0; i < ARRAY_SIZE(port_speed_str); i++) {
1895 if (port_speed_str[i].rate != ha->link_data_rate)
1896 continue;
1897 speed = port_speed_str[i].str;
1898 break;
1899 }
1900
1901 return scnprintf(buf, PAGE_SIZE, "%s\n", speed);
1902 }
1903
1904 /* ----- */
1905
1906 static ssize_t
qlini_mode_show(struct device * dev,struct device_attribute * attr,char * buf)1907 qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1908 {
1909 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1910 int len = 0;
1911
1912 len += scnprintf(buf + len, PAGE_SIZE-len,
1913 "Supported options: enabled | disabled | dual | exclusive\n");
1914
1915 /* --- */
1916 len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
1917
1918 switch (vha->qlini_mode) {
1919 case QLA2XXX_INI_MODE_EXCLUSIVE:
1920 len += scnprintf(buf + len, PAGE_SIZE-len,
1921 QLA2XXX_INI_MODE_STR_EXCLUSIVE);
1922 break;
1923 case QLA2XXX_INI_MODE_DISABLED:
1924 len += scnprintf(buf + len, PAGE_SIZE-len,
1925 QLA2XXX_INI_MODE_STR_DISABLED);
1926 break;
1927 case QLA2XXX_INI_MODE_ENABLED:
1928 len += scnprintf(buf + len, PAGE_SIZE-len,
1929 QLA2XXX_INI_MODE_STR_ENABLED);
1930 break;
1931 case QLA2XXX_INI_MODE_DUAL:
1932 len += scnprintf(buf + len, PAGE_SIZE-len,
1933 QLA2XXX_INI_MODE_STR_DUAL);
1934 break;
1935 }
1936 len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
1937
1938 return len;
1939 }
1940
1941 static char *mode_to_str[] = {
1942 "exclusive",
1943 "disabled",
1944 "enabled",
1945 "dual",
1946 };
1947
1948 #define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
qla_set_ini_mode(scsi_qla_host_t * vha,int op)1949 static void qla_set_ini_mode(scsi_qla_host_t *vha, int op)
1950 {
1951 enum {
1952 NO_ACTION,
1953 MODE_CHANGE_ACCEPT,
1954 MODE_CHANGE_NO_ACTION,
1955 TARGET_STILL_ACTIVE,
1956 };
1957 int action = NO_ACTION;
1958 int set_mode = 0;
1959 u8 eo_toggle = 0; /* exchange offload flipped */
1960
1961 switch (vha->qlini_mode) {
1962 case QLA2XXX_INI_MODE_DISABLED:
1963 switch (op) {
1964 case QLA2XXX_INI_MODE_DISABLED:
1965 if (qla_tgt_mode_enabled(vha)) {
1966 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1967 vha->hw->flags.exchoffld_enabled)
1968 eo_toggle = 1;
1969 if (((vha->ql2xexchoffld !=
1970 vha->u_ql2xexchoffld) &&
1971 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1972 eo_toggle) {
1973 /*
1974 * The number of exchange to be offload
1975 * was tweaked or offload option was
1976 * flipped
1977 */
1978 action = MODE_CHANGE_ACCEPT;
1979 } else {
1980 action = MODE_CHANGE_NO_ACTION;
1981 }
1982 } else {
1983 action = MODE_CHANGE_NO_ACTION;
1984 }
1985 break;
1986 case QLA2XXX_INI_MODE_EXCLUSIVE:
1987 if (qla_tgt_mode_enabled(vha)) {
1988 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1989 vha->hw->flags.exchoffld_enabled)
1990 eo_toggle = 1;
1991 if (((vha->ql2xexchoffld !=
1992 vha->u_ql2xexchoffld) &&
1993 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1994 eo_toggle) {
1995 /*
1996 * The number of exchange to be offload
1997 * was tweaked or offload option was
1998 * flipped
1999 */
2000 action = MODE_CHANGE_ACCEPT;
2001 } else {
2002 action = MODE_CHANGE_NO_ACTION;
2003 }
2004 } else {
2005 action = MODE_CHANGE_ACCEPT;
2006 }
2007 break;
2008 case QLA2XXX_INI_MODE_DUAL:
2009 action = MODE_CHANGE_ACCEPT;
2010 /* active_mode is target only, reset it to dual */
2011 if (qla_tgt_mode_enabled(vha)) {
2012 set_mode = 1;
2013 action = MODE_CHANGE_ACCEPT;
2014 } else {
2015 action = MODE_CHANGE_NO_ACTION;
2016 }
2017 break;
2018
2019 case QLA2XXX_INI_MODE_ENABLED:
2020 if (qla_tgt_mode_enabled(vha))
2021 action = TARGET_STILL_ACTIVE;
2022 else {
2023 action = MODE_CHANGE_ACCEPT;
2024 set_mode = 1;
2025 }
2026 break;
2027 }
2028 break;
2029
2030 case QLA2XXX_INI_MODE_EXCLUSIVE:
2031 switch (op) {
2032 case QLA2XXX_INI_MODE_EXCLUSIVE:
2033 if (qla_tgt_mode_enabled(vha)) {
2034 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
2035 vha->hw->flags.exchoffld_enabled)
2036 eo_toggle = 1;
2037 if (((vha->ql2xexchoffld !=
2038 vha->u_ql2xexchoffld) &&
2039 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
2040 eo_toggle)
2041 /*
2042 * The number of exchange to be offload
2043 * was tweaked or offload option was
2044 * flipped
2045 */
2046 action = MODE_CHANGE_ACCEPT;
2047 else
2048 action = NO_ACTION;
2049 } else
2050 action = NO_ACTION;
2051
2052 break;
2053
2054 case QLA2XXX_INI_MODE_DISABLED:
2055 if (qla_tgt_mode_enabled(vha)) {
2056 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
2057 vha->hw->flags.exchoffld_enabled)
2058 eo_toggle = 1;
2059 if (((vha->ql2xexchoffld !=
2060 vha->u_ql2xexchoffld) &&
2061 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
2062 eo_toggle)
2063 action = MODE_CHANGE_ACCEPT;
2064 else
2065 action = MODE_CHANGE_NO_ACTION;
2066 } else
2067 action = MODE_CHANGE_NO_ACTION;
2068 break;
2069
2070 case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
2071 if (qla_tgt_mode_enabled(vha)) {
2072 action = MODE_CHANGE_ACCEPT;
2073 set_mode = 1;
2074 } else
2075 action = MODE_CHANGE_ACCEPT;
2076 break;
2077
2078 case QLA2XXX_INI_MODE_ENABLED:
2079 if (qla_tgt_mode_enabled(vha))
2080 action = TARGET_STILL_ACTIVE;
2081 else {
2082 if (vha->hw->flags.fw_started)
2083 action = MODE_CHANGE_NO_ACTION;
2084 else
2085 action = MODE_CHANGE_ACCEPT;
2086 }
2087 break;
2088 }
2089 break;
2090
2091 case QLA2XXX_INI_MODE_ENABLED:
2092 switch (op) {
2093 case QLA2XXX_INI_MODE_ENABLED:
2094 if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
2095 vha->hw->flags.exchoffld_enabled)
2096 eo_toggle = 1;
2097 if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
2098 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
2099 eo_toggle)
2100 action = MODE_CHANGE_ACCEPT;
2101 else
2102 action = NO_ACTION;
2103 break;
2104 case QLA2XXX_INI_MODE_DUAL:
2105 case QLA2XXX_INI_MODE_DISABLED:
2106 action = MODE_CHANGE_ACCEPT;
2107 break;
2108 default:
2109 action = MODE_CHANGE_NO_ACTION;
2110 break;
2111 }
2112 break;
2113
2114 case QLA2XXX_INI_MODE_DUAL:
2115 switch (op) {
2116 case QLA2XXX_INI_MODE_DUAL:
2117 if (qla_tgt_mode_enabled(vha) ||
2118 qla_dual_mode_enabled(vha)) {
2119 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
2120 vha->u_ql2xiniexchg) !=
2121 vha->hw->flags.exchoffld_enabled)
2122 eo_toggle = 1;
2123
2124 if ((((vha->ql2xexchoffld +
2125 vha->ql2xiniexchg) !=
2126 (vha->u_ql2xiniexchg +
2127 vha->u_ql2xexchoffld)) &&
2128 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2129 vha->u_ql2xexchoffld)) || eo_toggle)
2130 action = MODE_CHANGE_ACCEPT;
2131 else
2132 action = NO_ACTION;
2133 } else {
2134 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
2135 vha->u_ql2xiniexchg) !=
2136 vha->hw->flags.exchoffld_enabled)
2137 eo_toggle = 1;
2138
2139 if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
2140 != (vha->u_ql2xiniexchg +
2141 vha->u_ql2xexchoffld)) &&
2142 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2143 vha->u_ql2xexchoffld)) || eo_toggle)
2144 action = MODE_CHANGE_NO_ACTION;
2145 else
2146 action = NO_ACTION;
2147 }
2148 break;
2149
2150 case QLA2XXX_INI_MODE_DISABLED:
2151 if (qla_tgt_mode_enabled(vha) ||
2152 qla_dual_mode_enabled(vha)) {
2153 /* turning off initiator mode */
2154 set_mode = 1;
2155 action = MODE_CHANGE_ACCEPT;
2156 } else {
2157 action = MODE_CHANGE_NO_ACTION;
2158 }
2159 break;
2160
2161 case QLA2XXX_INI_MODE_EXCLUSIVE:
2162 if (qla_tgt_mode_enabled(vha) ||
2163 qla_dual_mode_enabled(vha)) {
2164 set_mode = 1;
2165 action = MODE_CHANGE_ACCEPT;
2166 } else {
2167 action = MODE_CHANGE_ACCEPT;
2168 }
2169 break;
2170
2171 case QLA2XXX_INI_MODE_ENABLED:
2172 if (qla_tgt_mode_enabled(vha) ||
2173 qla_dual_mode_enabled(vha)) {
2174 action = TARGET_STILL_ACTIVE;
2175 } else {
2176 action = MODE_CHANGE_ACCEPT;
2177 }
2178 }
2179 break;
2180 }
2181
2182 switch (action) {
2183 case MODE_CHANGE_ACCEPT:
2184 ql_log(ql_log_warn, vha, 0xffff,
2185 "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2186 mode_to_str[vha->qlini_mode], mode_to_str[op],
2187 vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2188 vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2189
2190 vha->qlini_mode = op;
2191 vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2192 vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2193 if (set_mode)
2194 qlt_set_mode(vha);
2195 vha->flags.online = 1;
2196 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2197 break;
2198
2199 case MODE_CHANGE_NO_ACTION:
2200 ql_log(ql_log_warn, vha, 0xffff,
2201 "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2202 mode_to_str[vha->qlini_mode], mode_to_str[op],
2203 vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2204 vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2205 vha->qlini_mode = op;
2206 vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2207 vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2208 break;
2209
2210 case TARGET_STILL_ACTIVE:
2211 ql_log(ql_log_warn, vha, 0xffff,
2212 "Target Mode is active. Unable to change Mode.\n");
2213 break;
2214
2215 case NO_ACTION:
2216 default:
2217 ql_log(ql_log_warn, vha, 0xffff,
2218 "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
2219 vha->qlini_mode, op,
2220 vha->ql2xexchoffld, vha->u_ql2xexchoffld);
2221 break;
2222 }
2223 }
2224
2225 static ssize_t
qlini_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2226 qlini_mode_store(struct device *dev, struct device_attribute *attr,
2227 const char *buf, size_t count)
2228 {
2229 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2230 int ini;
2231
2232 if (!buf)
2233 return -EINVAL;
2234
2235 if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
2236 strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
2237 ini = QLA2XXX_INI_MODE_EXCLUSIVE;
2238 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
2239 strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
2240 ini = QLA2XXX_INI_MODE_DISABLED;
2241 else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
2242 strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
2243 ini = QLA2XXX_INI_MODE_ENABLED;
2244 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
2245 strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
2246 ini = QLA2XXX_INI_MODE_DUAL;
2247 else
2248 return -EINVAL;
2249
2250 qla_set_ini_mode(vha, ini);
2251 return strlen(buf);
2252 }
2253
2254 static ssize_t
ql2xexchoffld_show(struct device * dev,struct device_attribute * attr,char * buf)2255 ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
2256 char *buf)
2257 {
2258 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2259 int len = 0;
2260
2261 len += scnprintf(buf + len, PAGE_SIZE-len,
2262 "target exchange: new %d : current: %d\n\n",
2263 vha->u_ql2xexchoffld, vha->ql2xexchoffld);
2264
2265 len += scnprintf(buf + len, PAGE_SIZE-len,
2266 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2267 vha->host_no);
2268
2269 return len;
2270 }
2271
2272 static ssize_t
ql2xexchoffld_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2273 ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
2274 const char *buf, size_t count)
2275 {
2276 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2277 int val = 0;
2278
2279 if (sscanf(buf, "%d", &val) != 1)
2280 return -EINVAL;
2281
2282 if (val > FW_MAX_EXCHANGES_CNT)
2283 val = FW_MAX_EXCHANGES_CNT;
2284 else if (val < 0)
2285 val = 0;
2286
2287 vha->u_ql2xexchoffld = val;
2288 return strlen(buf);
2289 }
2290
2291 static ssize_t
ql2xiniexchg_show(struct device * dev,struct device_attribute * attr,char * buf)2292 ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
2293 char *buf)
2294 {
2295 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2296 int len = 0;
2297
2298 len += scnprintf(buf + len, PAGE_SIZE-len,
2299 "target exchange: new %d : current: %d\n\n",
2300 vha->u_ql2xiniexchg, vha->ql2xiniexchg);
2301
2302 len += scnprintf(buf + len, PAGE_SIZE-len,
2303 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2304 vha->host_no);
2305
2306 return len;
2307 }
2308
2309 static ssize_t
ql2xiniexchg_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2310 ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
2311 const char *buf, size_t count)
2312 {
2313 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2314 int val = 0;
2315
2316 if (sscanf(buf, "%d", &val) != 1)
2317 return -EINVAL;
2318
2319 if (val > FW_MAX_EXCHANGES_CNT)
2320 val = FW_MAX_EXCHANGES_CNT;
2321 else if (val < 0)
2322 val = 0;
2323
2324 vha->u_ql2xiniexchg = val;
2325 return strlen(buf);
2326 }
2327
2328 static ssize_t
qla2x00_dif_bundle_statistics_show(struct device * dev,struct device_attribute * attr,char * buf)2329 qla2x00_dif_bundle_statistics_show(struct device *dev,
2330 struct device_attribute *attr, char *buf)
2331 {
2332 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2333 struct qla_hw_data *ha = vha->hw;
2334
2335 return scnprintf(buf, PAGE_SIZE,
2336 "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n",
2337 ha->dif_bundle_crossed_pages, ha->dif_bundle_reads,
2338 ha->dif_bundle_writes, ha->dif_bundle_kallocs,
2339 ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
2340 }
2341
2342 static ssize_t
qla2x00_fw_attr_show(struct device * dev,struct device_attribute * attr,char * buf)2343 qla2x00_fw_attr_show(struct device *dev,
2344 struct device_attribute *attr, char *buf)
2345 {
2346 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2347 struct qla_hw_data *ha = vha->hw;
2348
2349 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2350 return scnprintf(buf, PAGE_SIZE, "\n");
2351
2352 return scnprintf(buf, PAGE_SIZE, "%llx\n",
2353 (uint64_t)ha->fw_attributes_ext[1] << 48 |
2354 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2355 (uint64_t)ha->fw_attributes_h << 16 |
2356 (uint64_t)ha->fw_attributes);
2357 }
2358
2359 static ssize_t
qla2x00_port_no_show(struct device * dev,struct device_attribute * attr,char * buf)2360 qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
2361 char *buf)
2362 {
2363 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2364
2365 return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
2366 }
2367
2368 static ssize_t
qla2x00_dport_diagnostics_show(struct device * dev,struct device_attribute * attr,char * buf)2369 qla2x00_dport_diagnostics_show(struct device *dev,
2370 struct device_attribute *attr, char *buf)
2371 {
2372 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2373
2374 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2375 !IS_QLA28XX(vha->hw))
2376 return scnprintf(buf, PAGE_SIZE, "\n");
2377
2378 if (!*vha->dport_data)
2379 return scnprintf(buf, PAGE_SIZE, "\n");
2380
2381 return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
2382 vha->dport_data[0], vha->dport_data[1],
2383 vha->dport_data[2], vha->dport_data[3]);
2384 }
2385 static DEVICE_ATTR(dport_diagnostics, 0444,
2386 qla2x00_dport_diagnostics_show, NULL);
2387
2388 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
2389 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
2390 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
2391 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
2392 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
2393 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
2394 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
2395 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
2396 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
2397 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
2398 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
2399 qla2x00_zio_timer_store);
2400 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
2401 qla2x00_beacon_store);
2402 static DEVICE_ATTR(beacon_config, 0644, qla2x00_beacon_config_show,
2403 qla2x00_beacon_config_store);
2404 static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
2405 qla2x00_optrom_bios_version_show, NULL);
2406 static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
2407 qla2x00_optrom_efi_version_show, NULL);
2408 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
2409 qla2x00_optrom_fcode_version_show, NULL);
2410 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
2411 NULL);
2412 static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
2413 qla2x00_optrom_gold_fw_version_show, NULL);
2414 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
2415 NULL);
2416 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
2417 NULL);
2418 static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL);
2419 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
2420 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
2421 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
2422 NULL);
2423 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
2424 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
2425 qla2x00_vn_port_mac_address_show, NULL);
2426 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
2427 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
2428 static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
2429 static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
2430 static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
2431 static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
2432 static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
2433 qla2x00_allow_cna_fw_dump_show,
2434 qla2x00_allow_cna_fw_dump_store);
2435 static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
2436 static DEVICE_ATTR(min_supported_speed, 0444,
2437 qla2x00_min_supported_speed_show, NULL);
2438 static DEVICE_ATTR(max_supported_speed, 0444,
2439 qla2x00_max_supported_speed_show, NULL);
2440 static DEVICE_ATTR(zio_threshold, 0644,
2441 qla_zio_threshold_show,
2442 qla_zio_threshold_store);
2443 static DEVICE_ATTR_RW(qlini_mode);
2444 static DEVICE_ATTR_RW(ql2xexchoffld);
2445 static DEVICE_ATTR_RW(ql2xiniexchg);
2446 static DEVICE_ATTR(dif_bundle_statistics, 0444,
2447 qla2x00_dif_bundle_statistics_show, NULL);
2448 static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
2449 qla2x00_port_speed_store);
2450 static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
2451 static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
2452
2453
2454 struct device_attribute *qla2x00_host_attrs[] = {
2455 &dev_attr_driver_version,
2456 &dev_attr_fw_version,
2457 &dev_attr_serial_num,
2458 &dev_attr_isp_name,
2459 &dev_attr_isp_id,
2460 &dev_attr_model_name,
2461 &dev_attr_model_desc,
2462 &dev_attr_pci_info,
2463 &dev_attr_link_state,
2464 &dev_attr_zio,
2465 &dev_attr_zio_timer,
2466 &dev_attr_beacon,
2467 &dev_attr_beacon_config,
2468 &dev_attr_optrom_bios_version,
2469 &dev_attr_optrom_efi_version,
2470 &dev_attr_optrom_fcode_version,
2471 &dev_attr_optrom_fw_version,
2472 &dev_attr_84xx_fw_version,
2473 &dev_attr_total_isp_aborts,
2474 &dev_attr_serdes_version,
2475 &dev_attr_mpi_version,
2476 &dev_attr_phy_version,
2477 &dev_attr_flash_block_size,
2478 &dev_attr_vlan_id,
2479 &dev_attr_vn_port_mac_address,
2480 &dev_attr_fabric_param,
2481 &dev_attr_fw_state,
2482 &dev_attr_optrom_gold_fw_version,
2483 &dev_attr_thermal_temp,
2484 &dev_attr_diag_requests,
2485 &dev_attr_diag_megabytes,
2486 &dev_attr_fw_dump_size,
2487 &dev_attr_allow_cna_fw_dump,
2488 &dev_attr_pep_version,
2489 &dev_attr_min_supported_speed,
2490 &dev_attr_max_supported_speed,
2491 &dev_attr_zio_threshold,
2492 &dev_attr_dif_bundle_statistics,
2493 &dev_attr_port_speed,
2494 &dev_attr_port_no,
2495 &dev_attr_fw_attr,
2496 &dev_attr_dport_diagnostics,
2497 NULL, /* reserve for qlini_mode */
2498 NULL, /* reserve for ql2xiniexchg */
2499 NULL, /* reserve for ql2xexchoffld */
2500 NULL,
2501 };
2502
qla_insert_tgt_attrs(void)2503 void qla_insert_tgt_attrs(void)
2504 {
2505 struct device_attribute **attr;
2506
2507 /* advance to empty slot */
2508 for (attr = &qla2x00_host_attrs[0]; *attr; ++attr)
2509 continue;
2510
2511 *attr = &dev_attr_qlini_mode;
2512 attr++;
2513 *attr = &dev_attr_ql2xiniexchg;
2514 attr++;
2515 *attr = &dev_attr_ql2xexchoffld;
2516 }
2517
2518 /* Host attributes. */
2519
2520 static void
qla2x00_get_host_port_id(struct Scsi_Host * shost)2521 qla2x00_get_host_port_id(struct Scsi_Host *shost)
2522 {
2523 scsi_qla_host_t *vha = shost_priv(shost);
2524
2525 fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
2526 vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
2527 }
2528
2529 static void
qla2x00_get_host_speed(struct Scsi_Host * shost)2530 qla2x00_get_host_speed(struct Scsi_Host *shost)
2531 {
2532 scsi_qla_host_t *vha = shost_priv(shost);
2533 u32 speed;
2534
2535 if (IS_QLAFX00(vha->hw)) {
2536 qlafx00_get_host_speed(shost);
2537 return;
2538 }
2539
2540 switch (vha->hw->link_data_rate) {
2541 case PORT_SPEED_1GB:
2542 speed = FC_PORTSPEED_1GBIT;
2543 break;
2544 case PORT_SPEED_2GB:
2545 speed = FC_PORTSPEED_2GBIT;
2546 break;
2547 case PORT_SPEED_4GB:
2548 speed = FC_PORTSPEED_4GBIT;
2549 break;
2550 case PORT_SPEED_8GB:
2551 speed = FC_PORTSPEED_8GBIT;
2552 break;
2553 case PORT_SPEED_10GB:
2554 speed = FC_PORTSPEED_10GBIT;
2555 break;
2556 case PORT_SPEED_16GB:
2557 speed = FC_PORTSPEED_16GBIT;
2558 break;
2559 case PORT_SPEED_32GB:
2560 speed = FC_PORTSPEED_32GBIT;
2561 break;
2562 case PORT_SPEED_64GB:
2563 speed = FC_PORTSPEED_64GBIT;
2564 break;
2565 default:
2566 speed = FC_PORTSPEED_UNKNOWN;
2567 break;
2568 }
2569
2570 fc_host_speed(shost) = speed;
2571 }
2572
2573 static void
qla2x00_get_host_port_type(struct Scsi_Host * shost)2574 qla2x00_get_host_port_type(struct Scsi_Host *shost)
2575 {
2576 scsi_qla_host_t *vha = shost_priv(shost);
2577 uint32_t port_type;
2578
2579 if (vha->vp_idx) {
2580 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
2581 return;
2582 }
2583 switch (vha->hw->current_topology) {
2584 case ISP_CFG_NL:
2585 port_type = FC_PORTTYPE_LPORT;
2586 break;
2587 case ISP_CFG_FL:
2588 port_type = FC_PORTTYPE_NLPORT;
2589 break;
2590 case ISP_CFG_N:
2591 port_type = FC_PORTTYPE_PTP;
2592 break;
2593 case ISP_CFG_F:
2594 port_type = FC_PORTTYPE_NPORT;
2595 break;
2596 default:
2597 port_type = FC_PORTTYPE_UNKNOWN;
2598 break;
2599 }
2600
2601 fc_host_port_type(shost) = port_type;
2602 }
2603
2604 static void
qla2x00_get_starget_node_name(struct scsi_target * starget)2605 qla2x00_get_starget_node_name(struct scsi_target *starget)
2606 {
2607 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2608 scsi_qla_host_t *vha = shost_priv(host);
2609 fc_port_t *fcport;
2610 u64 node_name = 0;
2611
2612 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2613 if (fcport->rport &&
2614 starget->id == fcport->rport->scsi_target_id) {
2615 node_name = wwn_to_u64(fcport->node_name);
2616 break;
2617 }
2618 }
2619
2620 fc_starget_node_name(starget) = node_name;
2621 }
2622
2623 static void
qla2x00_get_starget_port_name(struct scsi_target * starget)2624 qla2x00_get_starget_port_name(struct scsi_target *starget)
2625 {
2626 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2627 scsi_qla_host_t *vha = shost_priv(host);
2628 fc_port_t *fcport;
2629 u64 port_name = 0;
2630
2631 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2632 if (fcport->rport &&
2633 starget->id == fcport->rport->scsi_target_id) {
2634 port_name = wwn_to_u64(fcport->port_name);
2635 break;
2636 }
2637 }
2638
2639 fc_starget_port_name(starget) = port_name;
2640 }
2641
2642 static void
qla2x00_get_starget_port_id(struct scsi_target * starget)2643 qla2x00_get_starget_port_id(struct scsi_target *starget)
2644 {
2645 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2646 scsi_qla_host_t *vha = shost_priv(host);
2647 fc_port_t *fcport;
2648 uint32_t port_id = ~0U;
2649
2650 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2651 if (fcport->rport &&
2652 starget->id == fcport->rport->scsi_target_id) {
2653 port_id = fcport->d_id.b.domain << 16 |
2654 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2655 break;
2656 }
2657 }
2658
2659 fc_starget_port_id(starget) = port_id;
2660 }
2661
2662 static inline void
qla2x00_set_rport_loss_tmo(struct fc_rport * rport,uint32_t timeout)2663 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
2664 {
2665 rport->dev_loss_tmo = timeout ? timeout : 1;
2666 }
2667
2668 static void
qla2x00_dev_loss_tmo_callbk(struct fc_rport * rport)2669 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
2670 {
2671 struct Scsi_Host *host = rport_to_shost(rport);
2672 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2673 unsigned long flags;
2674
2675 if (!fcport)
2676 return;
2677
2678 /* Now that the rport has been deleted, set the fcport state to
2679 FCS_DEVICE_DEAD */
2680 qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
2681
2682 /*
2683 * Transport has effectively 'deleted' the rport, clear
2684 * all local references.
2685 */
2686 spin_lock_irqsave(host->host_lock, flags);
2687 fcport->rport = fcport->drport = NULL;
2688 *((fc_port_t **)rport->dd_data) = NULL;
2689 spin_unlock_irqrestore(host->host_lock, flags);
2690
2691 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2692 return;
2693
2694 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2695 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2696 return;
2697 }
2698 }
2699
2700 static void
qla2x00_terminate_rport_io(struct fc_rport * rport)2701 qla2x00_terminate_rport_io(struct fc_rport *rport)
2702 {
2703 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2704
2705 if (!fcport)
2706 return;
2707
2708 if (test_bit(UNLOADING, &fcport->vha->dpc_flags))
2709 return;
2710
2711 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2712 return;
2713
2714 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2715 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2716 return;
2717 }
2718 /*
2719 * At this point all fcport's software-states are cleared. Perform any
2720 * final cleanup of firmware resources (PCBs and XCBs).
2721 */
2722 if (fcport->loop_id != FC_NO_LOOP_ID) {
2723 if (IS_FWI2_CAPABLE(fcport->vha->hw))
2724 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
2725 fcport->loop_id, fcport->d_id.b.domain,
2726 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2727 else
2728 qla2x00_port_logout(fcport->vha, fcport);
2729 }
2730 }
2731
2732 static int
qla2x00_issue_lip(struct Scsi_Host * shost)2733 qla2x00_issue_lip(struct Scsi_Host *shost)
2734 {
2735 scsi_qla_host_t *vha = shost_priv(shost);
2736
2737 if (IS_QLAFX00(vha->hw))
2738 return 0;
2739
2740 qla2x00_loop_reset(vha);
2741 return 0;
2742 }
2743
2744 static struct fc_host_statistics *
qla2x00_get_fc_host_stats(struct Scsi_Host * shost)2745 qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
2746 {
2747 scsi_qla_host_t *vha = shost_priv(shost);
2748 struct qla_hw_data *ha = vha->hw;
2749 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2750 int rval;
2751 struct link_statistics *stats;
2752 dma_addr_t stats_dma;
2753 struct fc_host_statistics *p = &vha->fc_host_stat;
2754 struct qla_qpair *qpair;
2755 int i;
2756 u64 ib = 0, ob = 0, ir = 0, or = 0;
2757
2758 memset(p, -1, sizeof(*p));
2759
2760 if (IS_QLAFX00(vha->hw))
2761 goto done;
2762
2763 if (test_bit(UNLOADING, &vha->dpc_flags))
2764 goto done;
2765
2766 if (unlikely(pci_channel_offline(ha->pdev)))
2767 goto done;
2768
2769 if (qla2x00_chip_is_down(vha))
2770 goto done;
2771
2772 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2773 GFP_KERNEL);
2774 if (!stats) {
2775 ql_log(ql_log_warn, vha, 0x707d,
2776 "Failed to allocate memory for stats.\n");
2777 goto done;
2778 }
2779
2780 rval = QLA_FUNCTION_FAILED;
2781 if (IS_FWI2_CAPABLE(ha)) {
2782 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0);
2783 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
2784 !ha->dpc_active) {
2785 /* Must be in a 'READY' state for statistics retrieval. */
2786 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
2787 stats, stats_dma);
2788 }
2789
2790 if (rval != QLA_SUCCESS)
2791 goto done_free;
2792
2793 /* --- */
2794 for (i = 0; i < vha->hw->max_qpairs; i++) {
2795 qpair = vha->hw->queue_pair_map[i];
2796 if (!qpair)
2797 continue;
2798 ir += qpair->counters.input_requests;
2799 or += qpair->counters.output_requests;
2800 ib += qpair->counters.input_bytes;
2801 ob += qpair->counters.output_bytes;
2802 }
2803 ir += ha->base_qpair->counters.input_requests;
2804 or += ha->base_qpair->counters.output_requests;
2805 ib += ha->base_qpair->counters.input_bytes;
2806 ob += ha->base_qpair->counters.output_bytes;
2807
2808 ir += vha->qla_stats.input_requests;
2809 or += vha->qla_stats.output_requests;
2810 ib += vha->qla_stats.input_bytes;
2811 ob += vha->qla_stats.output_bytes;
2812 /* --- */
2813
2814 p->link_failure_count = le32_to_cpu(stats->link_fail_cnt);
2815 p->loss_of_sync_count = le32_to_cpu(stats->loss_sync_cnt);
2816 p->loss_of_signal_count = le32_to_cpu(stats->loss_sig_cnt);
2817 p->prim_seq_protocol_err_count = le32_to_cpu(stats->prim_seq_err_cnt);
2818 p->invalid_tx_word_count = le32_to_cpu(stats->inval_xmit_word_cnt);
2819 p->invalid_crc_count = le32_to_cpu(stats->inval_crc_cnt);
2820 if (IS_FWI2_CAPABLE(ha)) {
2821 p->lip_count = le32_to_cpu(stats->lip_cnt);
2822 p->tx_frames = le32_to_cpu(stats->tx_frames);
2823 p->rx_frames = le32_to_cpu(stats->rx_frames);
2824 p->dumped_frames = le32_to_cpu(stats->discarded_frames);
2825 p->nos_count = le32_to_cpu(stats->nos_rcvd);
2826 p->error_frames =
2827 le32_to_cpu(stats->dropped_frames) +
2828 le32_to_cpu(stats->discarded_frames);
2829 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2830 p->rx_words = le64_to_cpu(stats->fpm_recv_word_cnt);
2831 p->tx_words = le64_to_cpu(stats->fpm_xmit_word_cnt);
2832 } else {
2833 p->rx_words = ib >> 2;
2834 p->tx_words = ob >> 2;
2835 }
2836 }
2837
2838 p->fcp_control_requests = vha->qla_stats.control_requests;
2839 p->fcp_input_requests = ir;
2840 p->fcp_output_requests = or;
2841 p->fcp_input_megabytes = ib >> 20;
2842 p->fcp_output_megabytes = ob >> 20;
2843 p->seconds_since_last_reset =
2844 get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
2845 do_div(p->seconds_since_last_reset, HZ);
2846
2847 done_free:
2848 dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
2849 stats, stats_dma);
2850 done:
2851 return p;
2852 }
2853
2854 static void
qla2x00_reset_host_stats(struct Scsi_Host * shost)2855 qla2x00_reset_host_stats(struct Scsi_Host *shost)
2856 {
2857 scsi_qla_host_t *vha = shost_priv(shost);
2858 struct qla_hw_data *ha = vha->hw;
2859 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2860 struct link_statistics *stats;
2861 dma_addr_t stats_dma;
2862 int i;
2863 struct qla_qpair *qpair;
2864
2865 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2866 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2867 for (i = 0; i < vha->hw->max_qpairs; i++) {
2868 qpair = vha->hw->queue_pair_map[i];
2869 if (!qpair)
2870 continue;
2871 memset(&qpair->counters, 0, sizeof(qpair->counters));
2872 }
2873 memset(&ha->base_qpair->counters, 0, sizeof(qpair->counters));
2874
2875 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2876
2877 if (IS_FWI2_CAPABLE(ha)) {
2878 int rval;
2879
2880 stats = dma_alloc_coherent(&ha->pdev->dev,
2881 sizeof(*stats), &stats_dma, GFP_KERNEL);
2882 if (!stats) {
2883 ql_log(ql_log_warn, vha, 0x70d7,
2884 "Failed to allocate memory for stats.\n");
2885 return;
2886 }
2887
2888 /* reset firmware statistics */
2889 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
2890 if (rval != QLA_SUCCESS)
2891 ql_log(ql_log_warn, vha, 0x70de,
2892 "Resetting ISP statistics failed: rval = %d\n",
2893 rval);
2894
2895 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2896 stats, stats_dma);
2897 }
2898 }
2899
2900 static void
qla2x00_get_host_symbolic_name(struct Scsi_Host * shost)2901 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
2902 {
2903 scsi_qla_host_t *vha = shost_priv(shost);
2904
2905 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
2906 sizeof(fc_host_symbolic_name(shost)));
2907 }
2908
2909 static void
qla2x00_set_host_system_hostname(struct Scsi_Host * shost)2910 qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
2911 {
2912 scsi_qla_host_t *vha = shost_priv(shost);
2913
2914 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
2915 }
2916
2917 static void
qla2x00_get_host_fabric_name(struct Scsi_Host * shost)2918 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
2919 {
2920 scsi_qla_host_t *vha = shost_priv(shost);
2921 static const uint8_t node_name[WWN_SIZE] = {
2922 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
2923 };
2924 u64 fabric_name = wwn_to_u64(node_name);
2925
2926 if (vha->device_flags & SWITCH_FOUND)
2927 fabric_name = wwn_to_u64(vha->fabric_node_name);
2928
2929 fc_host_fabric_name(shost) = fabric_name;
2930 }
2931
2932 static void
qla2x00_get_host_port_state(struct Scsi_Host * shost)2933 qla2x00_get_host_port_state(struct Scsi_Host *shost)
2934 {
2935 scsi_qla_host_t *vha = shost_priv(shost);
2936 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
2937
2938 if (!base_vha->flags.online) {
2939 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
2940 return;
2941 }
2942
2943 switch (atomic_read(&base_vha->loop_state)) {
2944 case LOOP_UPDATE:
2945 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2946 break;
2947 case LOOP_DOWN:
2948 if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
2949 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2950 else
2951 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2952 break;
2953 case LOOP_DEAD:
2954 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2955 break;
2956 case LOOP_READY:
2957 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
2958 break;
2959 default:
2960 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
2961 break;
2962 }
2963 }
2964
2965 static int
qla24xx_vport_create(struct fc_vport * fc_vport,bool disable)2966 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
2967 {
2968 int ret = 0;
2969 uint8_t qos = 0;
2970 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
2971 scsi_qla_host_t *vha = NULL;
2972 struct qla_hw_data *ha = base_vha->hw;
2973 int cnt;
2974 struct req_que *req = ha->req_q_map[0];
2975 struct qla_qpair *qpair;
2976
2977 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
2978 if (ret) {
2979 ql_log(ql_log_warn, vha, 0x707e,
2980 "Vport sanity check failed, status %x\n", ret);
2981 return (ret);
2982 }
2983
2984 vha = qla24xx_create_vhost(fc_vport);
2985 if (vha == NULL) {
2986 ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
2987 return FC_VPORT_FAILED;
2988 }
2989 if (disable) {
2990 atomic_set(&vha->vp_state, VP_OFFLINE);
2991 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
2992 } else
2993 atomic_set(&vha->vp_state, VP_FAILED);
2994
2995 /* ready to create vport */
2996 ql_log(ql_log_info, vha, 0x7080,
2997 "VP entry id %d assigned.\n", vha->vp_idx);
2998
2999 /* initialized vport states */
3000 atomic_set(&vha->loop_state, LOOP_DOWN);
3001 vha->vp_err_state = VP_ERR_PORTDWN;
3002 vha->vp_prev_err_state = VP_ERR_UNKWN;
3003 /* Check if physical ha port is Up */
3004 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
3005 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
3006 /* Don't retry or attempt login of this virtual port */
3007 ql_dbg(ql_dbg_user, vha, 0x7081,
3008 "Vport loop state is not UP.\n");
3009 atomic_set(&vha->loop_state, LOOP_DEAD);
3010 if (!disable)
3011 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
3012 }
3013
3014 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
3015 if (ha->fw_attributes & BIT_4) {
3016 int prot = 0, guard;
3017
3018 vha->flags.difdix_supported = 1;
3019 ql_dbg(ql_dbg_user, vha, 0x7082,
3020 "Registered for DIF/DIX type 1 and 3 protection.\n");
3021 if (ql2xenabledif == 1)
3022 prot = SHOST_DIX_TYPE0_PROTECTION;
3023 scsi_host_set_prot(vha->host,
3024 prot | SHOST_DIF_TYPE1_PROTECTION
3025 | SHOST_DIF_TYPE2_PROTECTION
3026 | SHOST_DIF_TYPE3_PROTECTION
3027 | SHOST_DIX_TYPE1_PROTECTION
3028 | SHOST_DIX_TYPE2_PROTECTION
3029 | SHOST_DIX_TYPE3_PROTECTION);
3030
3031 guard = SHOST_DIX_GUARD_CRC;
3032
3033 if (IS_PI_IPGUARD_CAPABLE(ha) &&
3034 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
3035 guard |= SHOST_DIX_GUARD_IP;
3036
3037 scsi_host_set_guard(vha->host, guard);
3038 } else
3039 vha->flags.difdix_supported = 0;
3040 }
3041
3042 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
3043 &ha->pdev->dev)) {
3044 ql_dbg(ql_dbg_user, vha, 0x7083,
3045 "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
3046 goto vport_create_failed_2;
3047 }
3048
3049 /* initialize attributes */
3050 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
3051 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
3052 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
3053 fc_host_supported_classes(vha->host) =
3054 fc_host_supported_classes(base_vha->host);
3055 fc_host_supported_speeds(vha->host) =
3056 fc_host_supported_speeds(base_vha->host);
3057
3058 qlt_vport_create(vha, ha);
3059 qla24xx_vport_disable(fc_vport, disable);
3060
3061 if (!ql2xmqsupport || !ha->npiv_info)
3062 goto vport_queue;
3063
3064 /* Create a request queue in QoS mode for the vport */
3065 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
3066 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
3067 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
3068 8) == 0) {
3069 qos = ha->npiv_info[cnt].q_qos;
3070 break;
3071 }
3072 }
3073
3074 if (qos) {
3075 qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true);
3076 if (!qpair)
3077 ql_log(ql_log_warn, vha, 0x7084,
3078 "Can't create qpair for VP[%d]\n",
3079 vha->vp_idx);
3080 else {
3081 ql_dbg(ql_dbg_multiq, vha, 0xc001,
3082 "Queue pair: %d Qos: %d) created for VP[%d]\n",
3083 qpair->id, qos, vha->vp_idx);
3084 ql_dbg(ql_dbg_user, vha, 0x7085,
3085 "Queue Pair: %d Qos: %d) created for VP[%d]\n",
3086 qpair->id, qos, vha->vp_idx);
3087 req = qpair->req;
3088 vha->qpair = qpair;
3089 }
3090 }
3091
3092 vport_queue:
3093 vha->req = req;
3094 return 0;
3095
3096 vport_create_failed_2:
3097 qla24xx_disable_vp(vha);
3098 qla24xx_deallocate_vp_id(vha);
3099 scsi_host_put(vha->host);
3100 return FC_VPORT_FAILED;
3101 }
3102
3103 static int
qla24xx_vport_delete(struct fc_vport * fc_vport)3104 qla24xx_vport_delete(struct fc_vport *fc_vport)
3105 {
3106 scsi_qla_host_t *vha = fc_vport->dd_data;
3107 struct qla_hw_data *ha = vha->hw;
3108 uint16_t id = vha->vp_idx;
3109
3110 set_bit(VPORT_DELETE, &vha->dpc_flags);
3111
3112 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
3113 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
3114 msleep(1000);
3115
3116
3117 qla24xx_disable_vp(vha);
3118 qla2x00_wait_for_sess_deletion(vha);
3119
3120 qla_nvme_delete(vha);
3121 vha->flags.delete_progress = 1;
3122
3123 qlt_remove_target(ha, vha);
3124
3125 fc_remove_host(vha->host);
3126
3127 scsi_remove_host(vha->host);
3128
3129 /* Allow timer to run to drain queued items, when removing vp */
3130 qla24xx_deallocate_vp_id(vha);
3131
3132 if (vha->timer_active) {
3133 qla2x00_vp_stop_timer(vha);
3134 ql_dbg(ql_dbg_user, vha, 0x7086,
3135 "Timer for the VP[%d] has stopped\n", vha->vp_idx);
3136 }
3137
3138 qla2x00_free_fcports(vha);
3139
3140 mutex_lock(&ha->vport_lock);
3141 ha->cur_vport_count--;
3142 clear_bit(vha->vp_idx, ha->vp_idx_map);
3143 mutex_unlock(&ha->vport_lock);
3144
3145 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
3146 vha->gnl.ldma);
3147
3148 vha->gnl.l = NULL;
3149
3150 vfree(vha->scan.l);
3151
3152 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
3153 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
3154 ql_log(ql_log_warn, vha, 0x7087,
3155 "Queue Pair delete failed.\n");
3156 }
3157
3158 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
3159 scsi_host_put(vha->host);
3160 return 0;
3161 }
3162
3163 static int
qla24xx_vport_disable(struct fc_vport * fc_vport,bool disable)3164 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
3165 {
3166 scsi_qla_host_t *vha = fc_vport->dd_data;
3167
3168 if (disable)
3169 qla24xx_disable_vp(vha);
3170 else
3171 qla24xx_enable_vp(vha);
3172
3173 return 0;
3174 }
3175
3176 struct fc_function_template qla2xxx_transport_functions = {
3177
3178 .show_host_node_name = 1,
3179 .show_host_port_name = 1,
3180 .show_host_supported_classes = 1,
3181 .show_host_supported_speeds = 1,
3182
3183 .get_host_port_id = qla2x00_get_host_port_id,
3184 .show_host_port_id = 1,
3185 .get_host_speed = qla2x00_get_host_speed,
3186 .show_host_speed = 1,
3187 .get_host_port_type = qla2x00_get_host_port_type,
3188 .show_host_port_type = 1,
3189 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
3190 .show_host_symbolic_name = 1,
3191 .set_host_system_hostname = qla2x00_set_host_system_hostname,
3192 .show_host_system_hostname = 1,
3193 .get_host_fabric_name = qla2x00_get_host_fabric_name,
3194 .show_host_fabric_name = 1,
3195 .get_host_port_state = qla2x00_get_host_port_state,
3196 .show_host_port_state = 1,
3197
3198 .dd_fcrport_size = sizeof(struct fc_port *),
3199 .show_rport_supported_classes = 1,
3200
3201 .get_starget_node_name = qla2x00_get_starget_node_name,
3202 .show_starget_node_name = 1,
3203 .get_starget_port_name = qla2x00_get_starget_port_name,
3204 .show_starget_port_name = 1,
3205 .get_starget_port_id = qla2x00_get_starget_port_id,
3206 .show_starget_port_id = 1,
3207
3208 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3209 .show_rport_dev_loss_tmo = 1,
3210
3211 .issue_fc_host_lip = qla2x00_issue_lip,
3212 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3213 .terminate_rport_io = qla2x00_terminate_rport_io,
3214 .get_fc_host_stats = qla2x00_get_fc_host_stats,
3215 .reset_fc_host_stats = qla2x00_reset_host_stats,
3216
3217 .vport_create = qla24xx_vport_create,
3218 .vport_disable = qla24xx_vport_disable,
3219 .vport_delete = qla24xx_vport_delete,
3220 .bsg_request = qla24xx_bsg_request,
3221 .bsg_timeout = qla24xx_bsg_timeout,
3222 };
3223
3224 struct fc_function_template qla2xxx_transport_vport_functions = {
3225
3226 .show_host_node_name = 1,
3227 .show_host_port_name = 1,
3228 .show_host_supported_classes = 1,
3229
3230 .get_host_port_id = qla2x00_get_host_port_id,
3231 .show_host_port_id = 1,
3232 .get_host_speed = qla2x00_get_host_speed,
3233 .show_host_speed = 1,
3234 .get_host_port_type = qla2x00_get_host_port_type,
3235 .show_host_port_type = 1,
3236 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
3237 .show_host_symbolic_name = 1,
3238 .set_host_system_hostname = qla2x00_set_host_system_hostname,
3239 .show_host_system_hostname = 1,
3240 .get_host_fabric_name = qla2x00_get_host_fabric_name,
3241 .show_host_fabric_name = 1,
3242 .get_host_port_state = qla2x00_get_host_port_state,
3243 .show_host_port_state = 1,
3244
3245 .dd_fcrport_size = sizeof(struct fc_port *),
3246 .show_rport_supported_classes = 1,
3247
3248 .get_starget_node_name = qla2x00_get_starget_node_name,
3249 .show_starget_node_name = 1,
3250 .get_starget_port_name = qla2x00_get_starget_port_name,
3251 .show_starget_port_name = 1,
3252 .get_starget_port_id = qla2x00_get_starget_port_id,
3253 .show_starget_port_id = 1,
3254
3255 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3256 .show_rport_dev_loss_tmo = 1,
3257
3258 .issue_fc_host_lip = qla2x00_issue_lip,
3259 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3260 .terminate_rport_io = qla2x00_terminate_rport_io,
3261 .get_fc_host_stats = qla2x00_get_fc_host_stats,
3262 .reset_fc_host_stats = qla2x00_reset_host_stats,
3263
3264 .bsg_request = qla24xx_bsg_request,
3265 .bsg_timeout = qla24xx_bsg_timeout,
3266 };
3267
3268 void
qla2x00_init_host_attr(scsi_qla_host_t * vha)3269 qla2x00_init_host_attr(scsi_qla_host_t *vha)
3270 {
3271 struct qla_hw_data *ha = vha->hw;
3272 u32 speeds = FC_PORTSPEED_UNKNOWN;
3273
3274 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
3275 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
3276 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
3277 fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ?
3278 (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
3279 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
3280 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
3281
3282 speeds = qla25xx_fdmi_port_speed_capability(ha);
3283
3284 fc_host_supported_speeds(vha->host) = speeds;
3285 }
3286