1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/slab.h>
12 #include <linux/delay.h>
13
14 static int qla24xx_vport_disable(struct fc_vport *, bool);
15
16 /* SYSFS attributes --------------------------------------------------------- */
17
18 static ssize_t
qla2x00_sysfs_read_fw_dump(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)19 qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
20 struct bin_attribute *bin_attr,
21 char *buf, loff_t off, size_t count)
22 {
23 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
24 struct device, kobj)));
25 struct qla_hw_data *ha = vha->hw;
26 int rval = 0;
27
28 if (ha->fw_dump_reading == 0)
29 return 0;
30
31 if (IS_QLA82XX(ha)) {
32 if (off < ha->md_template_size) {
33 rval = memory_read_from_buffer(buf, count,
34 &off, ha->md_tmplt_hdr, ha->md_template_size);
35 return rval;
36 }
37 off -= ha->md_template_size;
38 rval = memory_read_from_buffer(buf, count,
39 &off, ha->md_dump, ha->md_dump_size);
40 return rval;
41 } else
42 return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
43 ha->fw_dump_len);
44 }
45
46 static ssize_t
qla2x00_sysfs_write_fw_dump(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)47 qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
48 struct bin_attribute *bin_attr,
49 char *buf, loff_t off, size_t count)
50 {
51 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
52 struct device, kobj)));
53 struct qla_hw_data *ha = vha->hw;
54 int reading;
55
56 if (off != 0)
57 return (0);
58
59 reading = simple_strtol(buf, NULL, 10);
60 switch (reading) {
61 case 0:
62 if (!ha->fw_dump_reading)
63 break;
64
65 ql_log(ql_log_info, vha, 0x705d,
66 "Firmware dump cleared on (%ld).\n", vha->host_no);
67
68 if (IS_QLA82XX(vha->hw)) {
69 qla82xx_md_free(vha);
70 qla82xx_md_prep(vha);
71 }
72 ha->fw_dump_reading = 0;
73 ha->fw_dumped = 0;
74 break;
75 case 1:
76 if (ha->fw_dumped && !ha->fw_dump_reading) {
77 ha->fw_dump_reading = 1;
78
79 ql_log(ql_log_info, vha, 0x705e,
80 "Raw firmware dump ready for read on (%ld).\n",
81 vha->host_no);
82 }
83 break;
84 case 2:
85 qla2x00_alloc_fw_dump(vha);
86 break;
87 case 3:
88 if (IS_QLA82XX(ha)) {
89 qla82xx_idc_lock(ha);
90 qla82xx_set_reset_owner(vha);
91 qla82xx_idc_unlock(ha);
92 } else
93 qla2x00_system_error(vha);
94 break;
95 case 4:
96 if (IS_QLA82XX(ha)) {
97 if (ha->md_tmplt_hdr)
98 ql_dbg(ql_dbg_user, vha, 0x705b,
99 "MiniDump supported with this firmware.\n");
100 else
101 ql_dbg(ql_dbg_user, vha, 0x709d,
102 "MiniDump not supported with this firmware.\n");
103 }
104 break;
105 case 5:
106 if (IS_QLA82XX(ha))
107 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
108 break;
109 }
110 return count;
111 }
112
113 static struct bin_attribute sysfs_fw_dump_attr = {
114 .attr = {
115 .name = "fw_dump",
116 .mode = S_IRUSR | S_IWUSR,
117 },
118 .size = 0,
119 .read = qla2x00_sysfs_read_fw_dump,
120 .write = qla2x00_sysfs_write_fw_dump,
121 };
122
123 static ssize_t
qla2x00_sysfs_read_nvram(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)124 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
125 struct bin_attribute *bin_attr,
126 char *buf, loff_t off, size_t count)
127 {
128 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
129 struct device, kobj)));
130 struct qla_hw_data *ha = vha->hw;
131
132 if (!capable(CAP_SYS_ADMIN))
133 return 0;
134
135 if (IS_NOCACHE_VPD_TYPE(ha))
136 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
137 ha->nvram_size);
138 return memory_read_from_buffer(buf, count, &off, ha->nvram,
139 ha->nvram_size);
140 }
141
142 static ssize_t
qla2x00_sysfs_write_nvram(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)143 qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
144 struct bin_attribute *bin_attr,
145 char *buf, loff_t off, size_t count)
146 {
147 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
148 struct device, kobj)));
149 struct qla_hw_data *ha = vha->hw;
150 uint16_t cnt;
151
152 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
153 !ha->isp_ops->write_nvram)
154 return -EINVAL;
155
156 /* Checksum NVRAM. */
157 if (IS_FWI2_CAPABLE(ha)) {
158 uint32_t *iter;
159 uint32_t chksum;
160
161 iter = (uint32_t *)buf;
162 chksum = 0;
163 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++)
164 chksum += le32_to_cpu(*iter++);
165 chksum = ~chksum + 1;
166 *iter = cpu_to_le32(chksum);
167 } else {
168 uint8_t *iter;
169 uint8_t chksum;
170
171 iter = (uint8_t *)buf;
172 chksum = 0;
173 for (cnt = 0; cnt < count - 1; cnt++)
174 chksum += *iter++;
175 chksum = ~chksum + 1;
176 *iter = chksum;
177 }
178
179 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
180 ql_log(ql_log_warn, vha, 0x705f,
181 "HBA not online, failing NVRAM update.\n");
182 return -EAGAIN;
183 }
184
185 /* Write NVRAM. */
186 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
187 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
188 count);
189
190 ql_dbg(ql_dbg_user, vha, 0x7060,
191 "Setting ISP_ABORT_NEEDED\n");
192 /* NVRAM settings take effect immediately. */
193 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
194 qla2xxx_wake_dpc(vha);
195 qla2x00_wait_for_chip_reset(vha);
196
197 return count;
198 }
199
200 static struct bin_attribute sysfs_nvram_attr = {
201 .attr = {
202 .name = "nvram",
203 .mode = S_IRUSR | S_IWUSR,
204 },
205 .size = 512,
206 .read = qla2x00_sysfs_read_nvram,
207 .write = qla2x00_sysfs_write_nvram,
208 };
209
210 static ssize_t
qla2x00_sysfs_read_optrom(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)211 qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
212 struct bin_attribute *bin_attr,
213 char *buf, loff_t off, size_t count)
214 {
215 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
216 struct device, kobj)));
217 struct qla_hw_data *ha = vha->hw;
218
219 if (ha->optrom_state != QLA_SREADING)
220 return 0;
221
222 return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
223 ha->optrom_region_size);
224 }
225
226 static ssize_t
qla2x00_sysfs_write_optrom(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)227 qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
228 struct bin_attribute *bin_attr,
229 char *buf, loff_t off, size_t count)
230 {
231 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
232 struct device, kobj)));
233 struct qla_hw_data *ha = vha->hw;
234
235 if (ha->optrom_state != QLA_SWRITING)
236 return -EINVAL;
237 if (off > ha->optrom_region_size)
238 return -ERANGE;
239 if (off + count > ha->optrom_region_size)
240 count = ha->optrom_region_size - off;
241
242 memcpy(&ha->optrom_buffer[off], buf, count);
243
244 return count;
245 }
246
247 static struct bin_attribute sysfs_optrom_attr = {
248 .attr = {
249 .name = "optrom",
250 .mode = S_IRUSR | S_IWUSR,
251 },
252 .size = 0,
253 .read = qla2x00_sysfs_read_optrom,
254 .write = qla2x00_sysfs_write_optrom,
255 };
256
257 static ssize_t
qla2x00_sysfs_write_optrom_ctl(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)258 qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
259 struct bin_attribute *bin_attr,
260 char *buf, loff_t off, size_t count)
261 {
262 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
263 struct device, kobj)));
264 struct qla_hw_data *ha = vha->hw;
265
266 uint32_t start = 0;
267 uint32_t size = ha->optrom_size;
268 int val, valid;
269
270 if (off)
271 return -EINVAL;
272
273 if (unlikely(pci_channel_offline(ha->pdev)))
274 return -EAGAIN;
275
276 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
277 return -EINVAL;
278 if (start > ha->optrom_size)
279 return -EINVAL;
280
281 switch (val) {
282 case 0:
283 if (ha->optrom_state != QLA_SREADING &&
284 ha->optrom_state != QLA_SWRITING)
285 return -EINVAL;
286
287 ha->optrom_state = QLA_SWAITING;
288
289 ql_dbg(ql_dbg_user, vha, 0x7061,
290 "Freeing flash region allocation -- 0x%x bytes.\n",
291 ha->optrom_region_size);
292
293 vfree(ha->optrom_buffer);
294 ha->optrom_buffer = NULL;
295 break;
296 case 1:
297 if (ha->optrom_state != QLA_SWAITING)
298 return -EINVAL;
299
300 ha->optrom_region_start = start;
301 ha->optrom_region_size = start + size > ha->optrom_size ?
302 ha->optrom_size - start : size;
303
304 ha->optrom_state = QLA_SREADING;
305 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
306 if (ha->optrom_buffer == NULL) {
307 ql_log(ql_log_warn, vha, 0x7062,
308 "Unable to allocate memory for optrom retrieval "
309 "(%x).\n", ha->optrom_region_size);
310
311 ha->optrom_state = QLA_SWAITING;
312 return -ENOMEM;
313 }
314
315 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
316 ql_log(ql_log_warn, vha, 0x7063,
317 "HBA not online, failing NVRAM update.\n");
318 return -EAGAIN;
319 }
320
321 ql_dbg(ql_dbg_user, vha, 0x7064,
322 "Reading flash region -- 0x%x/0x%x.\n",
323 ha->optrom_region_start, ha->optrom_region_size);
324
325 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
326 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
327 ha->optrom_region_start, ha->optrom_region_size);
328 break;
329 case 2:
330 if (ha->optrom_state != QLA_SWAITING)
331 return -EINVAL;
332
333 /*
334 * We need to be more restrictive on which FLASH regions are
335 * allowed to be updated via user-space. Regions accessible
336 * via this method include:
337 *
338 * ISP21xx/ISP22xx/ISP23xx type boards:
339 *
340 * 0x000000 -> 0x020000 -- Boot code.
341 *
342 * ISP2322/ISP24xx type boards:
343 *
344 * 0x000000 -> 0x07ffff -- Boot code.
345 * 0x080000 -> 0x0fffff -- Firmware.
346 *
347 * ISP25xx type boards:
348 *
349 * 0x000000 -> 0x07ffff -- Boot code.
350 * 0x080000 -> 0x0fffff -- Firmware.
351 * 0x120000 -> 0x12ffff -- VPD and HBA parameters.
352 */
353 valid = 0;
354 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
355 valid = 1;
356 else if (start == (ha->flt_region_boot * 4) ||
357 start == (ha->flt_region_fw * 4))
358 valid = 1;
359 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
360 || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
361 valid = 1;
362 if (!valid) {
363 ql_log(ql_log_warn, vha, 0x7065,
364 "Invalid start region 0x%x/0x%x.\n", start, size);
365 return -EINVAL;
366 }
367
368 ha->optrom_region_start = start;
369 ha->optrom_region_size = start + size > ha->optrom_size ?
370 ha->optrom_size - start : size;
371
372 ha->optrom_state = QLA_SWRITING;
373 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
374 if (ha->optrom_buffer == NULL) {
375 ql_log(ql_log_warn, vha, 0x7066,
376 "Unable to allocate memory for optrom update "
377 "(%x)\n", ha->optrom_region_size);
378
379 ha->optrom_state = QLA_SWAITING;
380 return -ENOMEM;
381 }
382
383 ql_dbg(ql_dbg_user, vha, 0x7067,
384 "Staging flash region write -- 0x%x/0x%x.\n",
385 ha->optrom_region_start, ha->optrom_region_size);
386
387 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
388 break;
389 case 3:
390 if (ha->optrom_state != QLA_SWRITING)
391 return -EINVAL;
392
393 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
394 ql_log(ql_log_warn, vha, 0x7068,
395 "HBA not online, failing flash update.\n");
396 return -EAGAIN;
397 }
398
399 ql_dbg(ql_dbg_user, vha, 0x7069,
400 "Writing flash region -- 0x%x/0x%x.\n",
401 ha->optrom_region_start, ha->optrom_region_size);
402
403 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
404 ha->optrom_region_start, ha->optrom_region_size);
405 break;
406 default:
407 return -EINVAL;
408 }
409 return count;
410 }
411
412 static struct bin_attribute sysfs_optrom_ctl_attr = {
413 .attr = {
414 .name = "optrom_ctl",
415 .mode = S_IWUSR,
416 },
417 .size = 0,
418 .write = qla2x00_sysfs_write_optrom_ctl,
419 };
420
421 static ssize_t
qla2x00_sysfs_read_vpd(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)422 qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
423 struct bin_attribute *bin_attr,
424 char *buf, loff_t off, size_t count)
425 {
426 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
427 struct device, kobj)));
428 struct qla_hw_data *ha = vha->hw;
429
430 if (unlikely(pci_channel_offline(ha->pdev)))
431 return -EAGAIN;
432
433 if (!capable(CAP_SYS_ADMIN))
434 return -EINVAL;
435
436 if (IS_NOCACHE_VPD_TYPE(ha))
437 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
438 ha->vpd_size);
439 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
440 }
441
442 static ssize_t
qla2x00_sysfs_write_vpd(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)443 qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
444 struct bin_attribute *bin_attr,
445 char *buf, loff_t off, size_t count)
446 {
447 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
448 struct device, kobj)));
449 struct qla_hw_data *ha = vha->hw;
450 uint8_t *tmp_data;
451
452 if (unlikely(pci_channel_offline(ha->pdev)))
453 return 0;
454
455 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
456 !ha->isp_ops->write_nvram)
457 return 0;
458
459 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
460 ql_log(ql_log_warn, vha, 0x706a,
461 "HBA not online, failing VPD update.\n");
462 return -EAGAIN;
463 }
464
465 /* Write NVRAM. */
466 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
467 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
468
469 /* Update flash version information for 4Gb & above. */
470 if (!IS_FWI2_CAPABLE(ha))
471 return -EINVAL;
472
473 tmp_data = vmalloc(256);
474 if (!tmp_data) {
475 ql_log(ql_log_warn, vha, 0x706b,
476 "Unable to allocate memory for VPD information update.\n");
477 return -ENOMEM;
478 }
479 ha->isp_ops->get_flash_version(vha, tmp_data);
480 vfree(tmp_data);
481
482 return count;
483 }
484
485 static struct bin_attribute sysfs_vpd_attr = {
486 .attr = {
487 .name = "vpd",
488 .mode = S_IRUSR | S_IWUSR,
489 },
490 .size = 0,
491 .read = qla2x00_sysfs_read_vpd,
492 .write = qla2x00_sysfs_write_vpd,
493 };
494
495 static ssize_t
qla2x00_sysfs_read_sfp(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)496 qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
497 struct bin_attribute *bin_attr,
498 char *buf, loff_t off, size_t count)
499 {
500 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
501 struct device, kobj)));
502 struct qla_hw_data *ha = vha->hw;
503 uint16_t iter, addr, offset;
504 int rval;
505
506 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
507 return 0;
508
509 if (ha->sfp_data)
510 goto do_read;
511
512 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
513 &ha->sfp_data_dma);
514 if (!ha->sfp_data) {
515 ql_log(ql_log_warn, vha, 0x706c,
516 "Unable to allocate memory for SFP read-data.\n");
517 return 0;
518 }
519
520 do_read:
521 memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
522 addr = 0xa0;
523 for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
524 iter++, offset += SFP_BLOCK_SIZE) {
525 if (iter == 4) {
526 /* Skip to next device address. */
527 addr = 0xa2;
528 offset = 0;
529 }
530
531 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
532 addr, offset, SFP_BLOCK_SIZE, 0);
533 if (rval != QLA_SUCCESS) {
534 ql_log(ql_log_warn, vha, 0x706d,
535 "Unable to read SFP data (%x/%x/%x).\n", rval,
536 addr, offset);
537
538 return -EIO;
539 }
540 memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
541 buf += SFP_BLOCK_SIZE;
542 }
543
544 return count;
545 }
546
547 static struct bin_attribute sysfs_sfp_attr = {
548 .attr = {
549 .name = "sfp",
550 .mode = S_IRUSR | S_IWUSR,
551 },
552 .size = SFP_DEV_SIZE * 2,
553 .read = qla2x00_sysfs_read_sfp,
554 };
555
556 static ssize_t
qla2x00_sysfs_write_reset(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)557 qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
558 struct bin_attribute *bin_attr,
559 char *buf, loff_t off, size_t count)
560 {
561 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
562 struct device, kobj)));
563 struct qla_hw_data *ha = vha->hw;
564 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
565 int type;
566
567 if (off != 0)
568 return -EINVAL;
569
570 type = simple_strtol(buf, NULL, 10);
571 switch (type) {
572 case 0x2025c:
573 ql_log(ql_log_info, vha, 0x706e,
574 "Issuing ISP reset.\n");
575
576 scsi_block_requests(vha->host);
577 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
578 if (IS_QLA82XX(ha)) {
579 qla82xx_idc_lock(ha);
580 qla82xx_set_reset_owner(vha);
581 qla82xx_idc_unlock(ha);
582 }
583 qla2xxx_wake_dpc(vha);
584 qla2x00_wait_for_chip_reset(vha);
585 scsi_unblock_requests(vha->host);
586 break;
587 case 0x2025d:
588 if (!IS_QLA81XX(ha))
589 return -EPERM;
590
591 ql_log(ql_log_info, vha, 0x706f,
592 "Issuing MPI reset.\n");
593
594 /* Make sure FC side is not in reset */
595 qla2x00_wait_for_hba_online(vha);
596
597 /* Issue MPI reset */
598 scsi_block_requests(vha->host);
599 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
600 ql_log(ql_log_warn, vha, 0x7070,
601 "MPI reset failed.\n");
602 scsi_unblock_requests(vha->host);
603 break;
604 case 0x2025e:
605 if (!IS_QLA82XX(ha) || vha != base_vha) {
606 ql_log(ql_log_info, vha, 0x7071,
607 "FCoE ctx reset no supported.\n");
608 return -EPERM;
609 }
610
611 ql_log(ql_log_info, vha, 0x7072,
612 "Issuing FCoE ctx reset.\n");
613 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
614 qla2xxx_wake_dpc(vha);
615 qla2x00_wait_for_fcoe_ctx_reset(vha);
616 break;
617 }
618 return count;
619 }
620
621 static struct bin_attribute sysfs_reset_attr = {
622 .attr = {
623 .name = "reset",
624 .mode = S_IWUSR,
625 },
626 .size = 0,
627 .write = qla2x00_sysfs_write_reset,
628 };
629
630 static ssize_t
qla2x00_sysfs_read_xgmac_stats(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)631 qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
632 struct bin_attribute *bin_attr,
633 char *buf, loff_t off, size_t count)
634 {
635 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
636 struct device, kobj)));
637 struct qla_hw_data *ha = vha->hw;
638 int rval;
639 uint16_t actual_size;
640
641 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
642 return 0;
643
644 if (ha->xgmac_data)
645 goto do_read;
646
647 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
648 &ha->xgmac_data_dma, GFP_KERNEL);
649 if (!ha->xgmac_data) {
650 ql_log(ql_log_warn, vha, 0x7076,
651 "Unable to allocate memory for XGMAC read-data.\n");
652 return 0;
653 }
654
655 do_read:
656 actual_size = 0;
657 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
658
659 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
660 XGMAC_DATA_SIZE, &actual_size);
661 if (rval != QLA_SUCCESS) {
662 ql_log(ql_log_warn, vha, 0x7077,
663 "Unable to read XGMAC data (%x).\n", rval);
664 count = 0;
665 }
666
667 count = actual_size > count ? count: actual_size;
668 memcpy(buf, ha->xgmac_data, count);
669
670 return count;
671 }
672
673 static struct bin_attribute sysfs_xgmac_stats_attr = {
674 .attr = {
675 .name = "xgmac_stats",
676 .mode = S_IRUSR,
677 },
678 .size = 0,
679 .read = qla2x00_sysfs_read_xgmac_stats,
680 };
681
682 static ssize_t
qla2x00_sysfs_read_dcbx_tlv(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)683 qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
684 struct bin_attribute *bin_attr,
685 char *buf, loff_t off, size_t count)
686 {
687 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
688 struct device, kobj)));
689 struct qla_hw_data *ha = vha->hw;
690 int rval;
691 uint16_t actual_size;
692
693 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
694 return 0;
695
696 if (ha->dcbx_tlv)
697 goto do_read;
698
699 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
700 &ha->dcbx_tlv_dma, GFP_KERNEL);
701 if (!ha->dcbx_tlv) {
702 ql_log(ql_log_warn, vha, 0x7078,
703 "Unable to allocate memory for DCBX TLV read-data.\n");
704 return -ENOMEM;
705 }
706
707 do_read:
708 actual_size = 0;
709 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
710
711 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
712 DCBX_TLV_DATA_SIZE);
713 if (rval != QLA_SUCCESS) {
714 ql_log(ql_log_warn, vha, 0x7079,
715 "Unable to read DCBX TLV (%x).\n", rval);
716 return -EIO;
717 }
718
719 memcpy(buf, ha->dcbx_tlv, count);
720
721 return count;
722 }
723
724 static struct bin_attribute sysfs_dcbx_tlv_attr = {
725 .attr = {
726 .name = "dcbx_tlv",
727 .mode = S_IRUSR,
728 },
729 .size = 0,
730 .read = qla2x00_sysfs_read_dcbx_tlv,
731 };
732
733 static struct sysfs_entry {
734 char *name;
735 struct bin_attribute *attr;
736 int is4GBp_only;
737 } bin_file_entries[] = {
738 { "fw_dump", &sysfs_fw_dump_attr, },
739 { "nvram", &sysfs_nvram_attr, },
740 { "optrom", &sysfs_optrom_attr, },
741 { "optrom_ctl", &sysfs_optrom_ctl_attr, },
742 { "vpd", &sysfs_vpd_attr, 1 },
743 { "sfp", &sysfs_sfp_attr, 1 },
744 { "reset", &sysfs_reset_attr, },
745 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
746 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
747 { NULL },
748 };
749
750 void
qla2x00_alloc_sysfs_attr(scsi_qla_host_t * vha)751 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
752 {
753 struct Scsi_Host *host = vha->host;
754 struct sysfs_entry *iter;
755 int ret;
756
757 for (iter = bin_file_entries; iter->name; iter++) {
758 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
759 continue;
760 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
761 continue;
762 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
763 continue;
764
765 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
766 iter->attr);
767 if (ret)
768 ql_log(ql_log_warn, vha, 0x00f3,
769 "Unable to create sysfs %s binary attribute (%d).\n",
770 iter->name, ret);
771 else
772 ql_dbg(ql_dbg_init, vha, 0x00f4,
773 "Successfully created sysfs %s binary attribure.\n",
774 iter->name);
775 }
776 }
777
778 void
qla2x00_free_sysfs_attr(scsi_qla_host_t * vha)779 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
780 {
781 struct Scsi_Host *host = vha->host;
782 struct sysfs_entry *iter;
783 struct qla_hw_data *ha = vha->hw;
784
785 for (iter = bin_file_entries; iter->name; iter++) {
786 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
787 continue;
788 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
789 continue;
790 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
791 continue;
792
793 sysfs_remove_bin_file(&host->shost_gendev.kobj,
794 iter->attr);
795 }
796
797 if (ha->beacon_blink_led == 1)
798 ha->isp_ops->beacon_off(vha);
799 }
800
801 /* Scsi_Host attributes. */
802
803 static ssize_t
qla2x00_drvr_version_show(struct device * dev,struct device_attribute * attr,char * buf)804 qla2x00_drvr_version_show(struct device *dev,
805 struct device_attribute *attr, char *buf)
806 {
807 return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
808 }
809
810 static ssize_t
qla2x00_fw_version_show(struct device * dev,struct device_attribute * attr,char * buf)811 qla2x00_fw_version_show(struct device *dev,
812 struct device_attribute *attr, char *buf)
813 {
814 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
815 struct qla_hw_data *ha = vha->hw;
816 char fw_str[128];
817
818 return snprintf(buf, PAGE_SIZE, "%s\n",
819 ha->isp_ops->fw_version_str(vha, fw_str));
820 }
821
822 static ssize_t
qla2x00_serial_num_show(struct device * dev,struct device_attribute * attr,char * buf)823 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
824 char *buf)
825 {
826 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
827 struct qla_hw_data *ha = vha->hw;
828 uint32_t sn;
829
830 if (IS_FWI2_CAPABLE(ha)) {
831 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
832 return snprintf(buf, PAGE_SIZE, "%s\n", buf);
833 }
834
835 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
836 return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
837 sn % 100000);
838 }
839
840 static ssize_t
qla2x00_isp_name_show(struct device * dev,struct device_attribute * attr,char * buf)841 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
842 char *buf)
843 {
844 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
845 return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
846 }
847
848 static ssize_t
qla2x00_isp_id_show(struct device * dev,struct device_attribute * attr,char * buf)849 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
850 char *buf)
851 {
852 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
853 struct qla_hw_data *ha = vha->hw;
854 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
855 ha->product_id[0], ha->product_id[1], ha->product_id[2],
856 ha->product_id[3]);
857 }
858
859 static ssize_t
qla2x00_model_name_show(struct device * dev,struct device_attribute * attr,char * buf)860 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
861 char *buf)
862 {
863 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
864 return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
865 }
866
867 static ssize_t
qla2x00_model_desc_show(struct device * dev,struct device_attribute * attr,char * buf)868 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
869 char *buf)
870 {
871 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
872 return snprintf(buf, PAGE_SIZE, "%s\n",
873 vha->hw->model_desc ? vha->hw->model_desc : "");
874 }
875
876 static ssize_t
qla2x00_pci_info_show(struct device * dev,struct device_attribute * attr,char * buf)877 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
878 char *buf)
879 {
880 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
881 char pci_info[30];
882
883 return snprintf(buf, PAGE_SIZE, "%s\n",
884 vha->hw->isp_ops->pci_info_str(vha, pci_info));
885 }
886
887 static ssize_t
qla2x00_link_state_show(struct device * dev,struct device_attribute * attr,char * buf)888 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
889 char *buf)
890 {
891 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
892 struct qla_hw_data *ha = vha->hw;
893 int len = 0;
894
895 if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
896 atomic_read(&vha->loop_state) == LOOP_DEAD ||
897 vha->device_flags & DFLG_NO_CABLE)
898 len = snprintf(buf, PAGE_SIZE, "Link Down\n");
899 else if (atomic_read(&vha->loop_state) != LOOP_READY ||
900 qla2x00_reset_active(vha))
901 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
902 else {
903 len = snprintf(buf, PAGE_SIZE, "Link Up - ");
904
905 switch (ha->current_topology) {
906 case ISP_CFG_NL:
907 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
908 break;
909 case ISP_CFG_FL:
910 len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
911 break;
912 case ISP_CFG_N:
913 len += snprintf(buf + len, PAGE_SIZE-len,
914 "N_Port to N_Port\n");
915 break;
916 case ISP_CFG_F:
917 len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
918 break;
919 default:
920 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
921 break;
922 }
923 }
924 return len;
925 }
926
927 static ssize_t
qla2x00_zio_show(struct device * dev,struct device_attribute * attr,char * buf)928 qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
929 char *buf)
930 {
931 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
932 int len = 0;
933
934 switch (vha->hw->zio_mode) {
935 case QLA_ZIO_MODE_6:
936 len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
937 break;
938 case QLA_ZIO_DISABLED:
939 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
940 break;
941 }
942 return len;
943 }
944
945 static ssize_t
qla2x00_zio_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)946 qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
947 const char *buf, size_t count)
948 {
949 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
950 struct qla_hw_data *ha = vha->hw;
951 int val = 0;
952 uint16_t zio_mode;
953
954 if (!IS_ZIO_SUPPORTED(ha))
955 return -ENOTSUPP;
956
957 if (sscanf(buf, "%d", &val) != 1)
958 return -EINVAL;
959
960 if (val)
961 zio_mode = QLA_ZIO_MODE_6;
962 else
963 zio_mode = QLA_ZIO_DISABLED;
964
965 /* Update per-hba values and queue a reset. */
966 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
967 ha->zio_mode = zio_mode;
968 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
969 }
970 return strlen(buf);
971 }
972
973 static ssize_t
qla2x00_zio_timer_show(struct device * dev,struct device_attribute * attr,char * buf)974 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
975 char *buf)
976 {
977 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
978
979 return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
980 }
981
982 static ssize_t
qla2x00_zio_timer_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)983 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
984 const char *buf, size_t count)
985 {
986 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
987 int val = 0;
988 uint16_t zio_timer;
989
990 if (sscanf(buf, "%d", &val) != 1)
991 return -EINVAL;
992 if (val > 25500 || val < 100)
993 return -ERANGE;
994
995 zio_timer = (uint16_t)(val / 100);
996 vha->hw->zio_timer = zio_timer;
997
998 return strlen(buf);
999 }
1000
1001 static ssize_t
qla2x00_beacon_show(struct device * dev,struct device_attribute * attr,char * buf)1002 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1003 char *buf)
1004 {
1005 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1006 int len = 0;
1007
1008 if (vha->hw->beacon_blink_led)
1009 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1010 else
1011 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1012 return len;
1013 }
1014
1015 static ssize_t
qla2x00_beacon_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1016 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1017 const char *buf, size_t count)
1018 {
1019 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1020 struct qla_hw_data *ha = vha->hw;
1021 int val = 0;
1022 int rval;
1023
1024 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1025 return -EPERM;
1026
1027 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
1028 ql_log(ql_log_warn, vha, 0x707a,
1029 "Abort ISP active -- ignoring beacon request.\n");
1030 return -EBUSY;
1031 }
1032
1033 if (sscanf(buf, "%d", &val) != 1)
1034 return -EINVAL;
1035
1036 if (val)
1037 rval = ha->isp_ops->beacon_on(vha);
1038 else
1039 rval = ha->isp_ops->beacon_off(vha);
1040
1041 if (rval != QLA_SUCCESS)
1042 count = 0;
1043
1044 return count;
1045 }
1046
1047 static ssize_t
qla2x00_optrom_bios_version_show(struct device * dev,struct device_attribute * attr,char * buf)1048 qla2x00_optrom_bios_version_show(struct device *dev,
1049 struct device_attribute *attr, char *buf)
1050 {
1051 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1052 struct qla_hw_data *ha = vha->hw;
1053 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1054 ha->bios_revision[0]);
1055 }
1056
1057 static ssize_t
qla2x00_optrom_efi_version_show(struct device * dev,struct device_attribute * attr,char * buf)1058 qla2x00_optrom_efi_version_show(struct device *dev,
1059 struct device_attribute *attr, char *buf)
1060 {
1061 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1062 struct qla_hw_data *ha = vha->hw;
1063 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1064 ha->efi_revision[0]);
1065 }
1066
1067 static ssize_t
qla2x00_optrom_fcode_version_show(struct device * dev,struct device_attribute * attr,char * buf)1068 qla2x00_optrom_fcode_version_show(struct device *dev,
1069 struct device_attribute *attr, char *buf)
1070 {
1071 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1072 struct qla_hw_data *ha = vha->hw;
1073 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1074 ha->fcode_revision[0]);
1075 }
1076
1077 static ssize_t
qla2x00_optrom_fw_version_show(struct device * dev,struct device_attribute * attr,char * buf)1078 qla2x00_optrom_fw_version_show(struct device *dev,
1079 struct device_attribute *attr, char *buf)
1080 {
1081 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1082 struct qla_hw_data *ha = vha->hw;
1083 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1084 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1085 ha->fw_revision[3]);
1086 }
1087
1088 static ssize_t
qla2x00_optrom_gold_fw_version_show(struct device * dev,struct device_attribute * attr,char * buf)1089 qla2x00_optrom_gold_fw_version_show(struct device *dev,
1090 struct device_attribute *attr, char *buf)
1091 {
1092 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1093 struct qla_hw_data *ha = vha->hw;
1094
1095 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
1096 return snprintf(buf, PAGE_SIZE, "\n");
1097
1098 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1099 ha->gold_fw_version[0], ha->gold_fw_version[1],
1100 ha->gold_fw_version[2], ha->gold_fw_version[3]);
1101 }
1102
1103 static ssize_t
qla2x00_total_isp_aborts_show(struct device * dev,struct device_attribute * attr,char * buf)1104 qla2x00_total_isp_aborts_show(struct device *dev,
1105 struct device_attribute *attr, char *buf)
1106 {
1107 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1108 struct qla_hw_data *ha = vha->hw;
1109 return snprintf(buf, PAGE_SIZE, "%d\n",
1110 ha->qla_stats.total_isp_aborts);
1111 }
1112
1113 static ssize_t
qla24xx_84xx_fw_version_show(struct device * dev,struct device_attribute * attr,char * buf)1114 qla24xx_84xx_fw_version_show(struct device *dev,
1115 struct device_attribute *attr, char *buf)
1116 {
1117 int rval = QLA_SUCCESS;
1118 uint16_t status[2] = {0, 0};
1119 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1120 struct qla_hw_data *ha = vha->hw;
1121
1122 if (!IS_QLA84XX(ha))
1123 return snprintf(buf, PAGE_SIZE, "\n");
1124
1125 if (ha->cs84xx->op_fw_version == 0)
1126 rval = qla84xx_verify_chip(vha, status);
1127
1128 if ((rval == QLA_SUCCESS) && (status[0] == 0))
1129 return snprintf(buf, PAGE_SIZE, "%u\n",
1130 (uint32_t)ha->cs84xx->op_fw_version);
1131
1132 return snprintf(buf, PAGE_SIZE, "\n");
1133 }
1134
1135 static ssize_t
qla2x00_mpi_version_show(struct device * dev,struct device_attribute * attr,char * buf)1136 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1137 char *buf)
1138 {
1139 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1140 struct qla_hw_data *ha = vha->hw;
1141
1142 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1143 return snprintf(buf, PAGE_SIZE, "\n");
1144
1145 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1146 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1147 ha->mpi_capabilities);
1148 }
1149
1150 static ssize_t
qla2x00_phy_version_show(struct device * dev,struct device_attribute * attr,char * buf)1151 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1152 char *buf)
1153 {
1154 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1155 struct qla_hw_data *ha = vha->hw;
1156
1157 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
1158 return snprintf(buf, PAGE_SIZE, "\n");
1159
1160 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1161 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1162 }
1163
1164 static ssize_t
qla2x00_flash_block_size_show(struct device * dev,struct device_attribute * attr,char * buf)1165 qla2x00_flash_block_size_show(struct device *dev,
1166 struct device_attribute *attr, char *buf)
1167 {
1168 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1169 struct qla_hw_data *ha = vha->hw;
1170
1171 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1172 }
1173
1174 static ssize_t
qla2x00_vlan_id_show(struct device * dev,struct device_attribute * attr,char * buf)1175 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1176 char *buf)
1177 {
1178 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1179
1180 if (!IS_CNA_CAPABLE(vha->hw))
1181 return snprintf(buf, PAGE_SIZE, "\n");
1182
1183 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1184 }
1185
1186 static ssize_t
qla2x00_vn_port_mac_address_show(struct device * dev,struct device_attribute * attr,char * buf)1187 qla2x00_vn_port_mac_address_show(struct device *dev,
1188 struct device_attribute *attr, char *buf)
1189 {
1190 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1191
1192 if (!IS_CNA_CAPABLE(vha->hw))
1193 return snprintf(buf, PAGE_SIZE, "\n");
1194
1195 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
1196 vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
1197 vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
1198 vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
1199 }
1200
1201 static ssize_t
qla2x00_fabric_param_show(struct device * dev,struct device_attribute * attr,char * buf)1202 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1203 char *buf)
1204 {
1205 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1206
1207 return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1208 }
1209
1210 static ssize_t
qla2x00_thermal_temp_show(struct device * dev,struct device_attribute * attr,char * buf)1211 qla2x00_thermal_temp_show(struct device *dev,
1212 struct device_attribute *attr, char *buf)
1213 {
1214 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1215 int rval = QLA_FUNCTION_FAILED;
1216 uint16_t temp, frac;
1217
1218 if (!vha->hw->flags.thermal_supported)
1219 return snprintf(buf, PAGE_SIZE, "\n");
1220
1221 temp = frac = 0;
1222 if (qla2x00_reset_active(vha))
1223 ql_log(ql_log_warn, vha, 0x707b,
1224 "ISP reset active.\n");
1225 else if (!vha->hw->flags.eeh_busy)
1226 rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
1227 if (rval != QLA_SUCCESS)
1228 return snprintf(buf, PAGE_SIZE, "\n");
1229
1230 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
1231 }
1232
1233 static ssize_t
qla2x00_fw_state_show(struct device * dev,struct device_attribute * attr,char * buf)1234 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1235 char *buf)
1236 {
1237 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1238 int rval = QLA_FUNCTION_FAILED;
1239 uint16_t state[5];
1240
1241 if (qla2x00_reset_active(vha))
1242 ql_log(ql_log_warn, vha, 0x707c,
1243 "ISP reset active.\n");
1244 else if (!vha->hw->flags.eeh_busy)
1245 rval = qla2x00_get_firmware_state(vha, state);
1246 if (rval != QLA_SUCCESS)
1247 memset(state, -1, sizeof(state));
1248
1249 return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
1250 state[1], state[2], state[3], state[4]);
1251 }
1252
1253 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1254 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1255 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
1256 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
1257 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
1258 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
1259 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
1260 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
1261 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
1262 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
1263 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
1264 qla2x00_zio_timer_store);
1265 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
1266 qla2x00_beacon_store);
1267 static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
1268 qla2x00_optrom_bios_version_show, NULL);
1269 static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
1270 qla2x00_optrom_efi_version_show, NULL);
1271 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
1272 qla2x00_optrom_fcode_version_show, NULL);
1273 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
1274 NULL);
1275 static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
1276 qla2x00_optrom_gold_fw_version_show, NULL);
1277 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
1278 NULL);
1279 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
1280 NULL);
1281 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
1282 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
1283 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1284 NULL);
1285 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
1286 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1287 qla2x00_vn_port_mac_address_show, NULL);
1288 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1289 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1290 static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
1291
1292 struct device_attribute *qla2x00_host_attrs[] = {
1293 &dev_attr_driver_version,
1294 &dev_attr_fw_version,
1295 &dev_attr_serial_num,
1296 &dev_attr_isp_name,
1297 &dev_attr_isp_id,
1298 &dev_attr_model_name,
1299 &dev_attr_model_desc,
1300 &dev_attr_pci_info,
1301 &dev_attr_link_state,
1302 &dev_attr_zio,
1303 &dev_attr_zio_timer,
1304 &dev_attr_beacon,
1305 &dev_attr_optrom_bios_version,
1306 &dev_attr_optrom_efi_version,
1307 &dev_attr_optrom_fcode_version,
1308 &dev_attr_optrom_fw_version,
1309 &dev_attr_84xx_fw_version,
1310 &dev_attr_total_isp_aborts,
1311 &dev_attr_mpi_version,
1312 &dev_attr_phy_version,
1313 &dev_attr_flash_block_size,
1314 &dev_attr_vlan_id,
1315 &dev_attr_vn_port_mac_address,
1316 &dev_attr_fabric_param,
1317 &dev_attr_fw_state,
1318 &dev_attr_optrom_gold_fw_version,
1319 &dev_attr_thermal_temp,
1320 NULL,
1321 };
1322
1323 /* Host attributes. */
1324
1325 static void
qla2x00_get_host_port_id(struct Scsi_Host * shost)1326 qla2x00_get_host_port_id(struct Scsi_Host *shost)
1327 {
1328 scsi_qla_host_t *vha = shost_priv(shost);
1329
1330 fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
1331 vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
1332 }
1333
1334 static void
qla2x00_get_host_speed(struct Scsi_Host * shost)1335 qla2x00_get_host_speed(struct Scsi_Host *shost)
1336 {
1337 struct qla_hw_data *ha = ((struct scsi_qla_host *)
1338 (shost_priv(shost)))->hw;
1339 u32 speed = FC_PORTSPEED_UNKNOWN;
1340
1341 switch (ha->link_data_rate) {
1342 case PORT_SPEED_1GB:
1343 speed = FC_PORTSPEED_1GBIT;
1344 break;
1345 case PORT_SPEED_2GB:
1346 speed = FC_PORTSPEED_2GBIT;
1347 break;
1348 case PORT_SPEED_4GB:
1349 speed = FC_PORTSPEED_4GBIT;
1350 break;
1351 case PORT_SPEED_8GB:
1352 speed = FC_PORTSPEED_8GBIT;
1353 break;
1354 case PORT_SPEED_10GB:
1355 speed = FC_PORTSPEED_10GBIT;
1356 break;
1357 case PORT_SPEED_16GB:
1358 speed = FC_PORTSPEED_16GBIT;
1359 break;
1360 }
1361 fc_host_speed(shost) = speed;
1362 }
1363
1364 static void
qla2x00_get_host_port_type(struct Scsi_Host * shost)1365 qla2x00_get_host_port_type(struct Scsi_Host *shost)
1366 {
1367 scsi_qla_host_t *vha = shost_priv(shost);
1368 uint32_t port_type = FC_PORTTYPE_UNKNOWN;
1369
1370 if (vha->vp_idx) {
1371 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
1372 return;
1373 }
1374 switch (vha->hw->current_topology) {
1375 case ISP_CFG_NL:
1376 port_type = FC_PORTTYPE_LPORT;
1377 break;
1378 case ISP_CFG_FL:
1379 port_type = FC_PORTTYPE_NLPORT;
1380 break;
1381 case ISP_CFG_N:
1382 port_type = FC_PORTTYPE_PTP;
1383 break;
1384 case ISP_CFG_F:
1385 port_type = FC_PORTTYPE_NPORT;
1386 break;
1387 }
1388 fc_host_port_type(shost) = port_type;
1389 }
1390
1391 static void
qla2x00_get_starget_node_name(struct scsi_target * starget)1392 qla2x00_get_starget_node_name(struct scsi_target *starget)
1393 {
1394 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1395 scsi_qla_host_t *vha = shost_priv(host);
1396 fc_port_t *fcport;
1397 u64 node_name = 0;
1398
1399 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1400 if (fcport->rport &&
1401 starget->id == fcport->rport->scsi_target_id) {
1402 node_name = wwn_to_u64(fcport->node_name);
1403 break;
1404 }
1405 }
1406
1407 fc_starget_node_name(starget) = node_name;
1408 }
1409
1410 static void
qla2x00_get_starget_port_name(struct scsi_target * starget)1411 qla2x00_get_starget_port_name(struct scsi_target *starget)
1412 {
1413 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1414 scsi_qla_host_t *vha = shost_priv(host);
1415 fc_port_t *fcport;
1416 u64 port_name = 0;
1417
1418 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1419 if (fcport->rport &&
1420 starget->id == fcport->rport->scsi_target_id) {
1421 port_name = wwn_to_u64(fcport->port_name);
1422 break;
1423 }
1424 }
1425
1426 fc_starget_port_name(starget) = port_name;
1427 }
1428
1429 static void
qla2x00_get_starget_port_id(struct scsi_target * starget)1430 qla2x00_get_starget_port_id(struct scsi_target *starget)
1431 {
1432 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1433 scsi_qla_host_t *vha = shost_priv(host);
1434 fc_port_t *fcport;
1435 uint32_t port_id = ~0U;
1436
1437 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1438 if (fcport->rport &&
1439 starget->id == fcport->rport->scsi_target_id) {
1440 port_id = fcport->d_id.b.domain << 16 |
1441 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
1442 break;
1443 }
1444 }
1445
1446 fc_starget_port_id(starget) = port_id;
1447 }
1448
1449 static void
qla2x00_set_rport_loss_tmo(struct fc_rport * rport,uint32_t timeout)1450 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1451 {
1452 if (timeout)
1453 rport->dev_loss_tmo = timeout;
1454 else
1455 rport->dev_loss_tmo = 1;
1456 }
1457
1458 static void
qla2x00_dev_loss_tmo_callbk(struct fc_rport * rport)1459 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1460 {
1461 struct Scsi_Host *host = rport_to_shost(rport);
1462 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1463 unsigned long flags;
1464
1465 if (!fcport)
1466 return;
1467
1468 /* Now that the rport has been deleted, set the fcport state to
1469 FCS_DEVICE_DEAD */
1470 qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
1471
1472 /*
1473 * Transport has effectively 'deleted' the rport, clear
1474 * all local references.
1475 */
1476 spin_lock_irqsave(host->host_lock, flags);
1477 fcport->rport = fcport->drport = NULL;
1478 *((fc_port_t **)rport->dd_data) = NULL;
1479 spin_unlock_irqrestore(host->host_lock, flags);
1480
1481 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1482 return;
1483
1484 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1485 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1486 return;
1487 }
1488 }
1489
1490 static void
qla2x00_terminate_rport_io(struct fc_rport * rport)1491 qla2x00_terminate_rport_io(struct fc_rport *rport)
1492 {
1493 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1494
1495 if (!fcport)
1496 return;
1497
1498 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1499 return;
1500
1501 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1502 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1503 return;
1504 }
1505 /*
1506 * At this point all fcport's software-states are cleared. Perform any
1507 * final cleanup of firmware resources (PCBs and XCBs).
1508 */
1509 if (fcport->loop_id != FC_NO_LOOP_ID &&
1510 !test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
1511 if (IS_FWI2_CAPABLE(fcport->vha->hw))
1512 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1513 fcport->loop_id, fcport->d_id.b.domain,
1514 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1515 else
1516 qla2x00_port_logout(fcport->vha, fcport);
1517 }
1518 }
1519
1520 static int
qla2x00_issue_lip(struct Scsi_Host * shost)1521 qla2x00_issue_lip(struct Scsi_Host *shost)
1522 {
1523 scsi_qla_host_t *vha = shost_priv(shost);
1524
1525 qla2x00_loop_reset(vha);
1526 return 0;
1527 }
1528
1529 static struct fc_host_statistics *
qla2x00_get_fc_host_stats(struct Scsi_Host * shost)1530 qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1531 {
1532 scsi_qla_host_t *vha = shost_priv(shost);
1533 struct qla_hw_data *ha = vha->hw;
1534 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1535 int rval;
1536 struct link_statistics *stats;
1537 dma_addr_t stats_dma;
1538 struct fc_host_statistics *pfc_host_stat;
1539
1540 pfc_host_stat = &ha->fc_host_stat;
1541 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1542
1543 if (test_bit(UNLOADING, &vha->dpc_flags))
1544 goto done;
1545
1546 if (unlikely(pci_channel_offline(ha->pdev)))
1547 goto done;
1548
1549 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1550 if (stats == NULL) {
1551 ql_log(ql_log_warn, vha, 0x707d,
1552 "Failed to allocate memory for stats.\n");
1553 goto done;
1554 }
1555 memset(stats, 0, DMA_POOL_SIZE);
1556
1557 rval = QLA_FUNCTION_FAILED;
1558 if (IS_FWI2_CAPABLE(ha)) {
1559 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
1560 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
1561 !qla2x00_reset_active(vha) && !ha->dpc_active) {
1562 /* Must be in a 'READY' state for statistics retrieval. */
1563 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
1564 stats, stats_dma);
1565 }
1566
1567 if (rval != QLA_SUCCESS)
1568 goto done_free;
1569
1570 pfc_host_stat->link_failure_count = stats->link_fail_cnt;
1571 pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
1572 pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
1573 pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
1574 pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
1575 pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
1576 if (IS_FWI2_CAPABLE(ha)) {
1577 pfc_host_stat->lip_count = stats->lip_cnt;
1578 pfc_host_stat->tx_frames = stats->tx_frames;
1579 pfc_host_stat->rx_frames = stats->rx_frames;
1580 pfc_host_stat->dumped_frames = stats->dumped_frames;
1581 pfc_host_stat->nos_count = stats->nos_rcvd;
1582 }
1583 pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
1584 pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
1585
1586 done_free:
1587 dma_pool_free(ha->s_dma_pool, stats, stats_dma);
1588 done:
1589 return pfc_host_stat;
1590 }
1591
1592 static void
qla2x00_get_host_symbolic_name(struct Scsi_Host * shost)1593 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1594 {
1595 scsi_qla_host_t *vha = shost_priv(shost);
1596
1597 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
1598 }
1599
1600 static void
qla2x00_set_host_system_hostname(struct Scsi_Host * shost)1601 qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
1602 {
1603 scsi_qla_host_t *vha = shost_priv(shost);
1604
1605 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1606 }
1607
1608 static void
qla2x00_get_host_fabric_name(struct Scsi_Host * shost)1609 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1610 {
1611 scsi_qla_host_t *vha = shost_priv(shost);
1612 uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \
1613 0xFF, 0xFF, 0xFF, 0xFF};
1614 u64 fabric_name = wwn_to_u64(node_name);
1615
1616 if (vha->device_flags & SWITCH_FOUND)
1617 fabric_name = wwn_to_u64(vha->fabric_node_name);
1618
1619 fc_host_fabric_name(shost) = fabric_name;
1620 }
1621
1622 static void
qla2x00_get_host_port_state(struct Scsi_Host * shost)1623 qla2x00_get_host_port_state(struct Scsi_Host *shost)
1624 {
1625 scsi_qla_host_t *vha = shost_priv(shost);
1626 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
1627
1628 if (!base_vha->flags.online) {
1629 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1630 return;
1631 }
1632
1633 switch (atomic_read(&base_vha->loop_state)) {
1634 case LOOP_UPDATE:
1635 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
1636 break;
1637 case LOOP_DOWN:
1638 if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
1639 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
1640 else
1641 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1642 break;
1643 case LOOP_DEAD:
1644 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1645 break;
1646 case LOOP_READY:
1647 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1648 break;
1649 default:
1650 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1651 break;
1652 }
1653 }
1654
1655 static int
qla24xx_vport_create(struct fc_vport * fc_vport,bool disable)1656 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1657 {
1658 int ret = 0;
1659 uint8_t qos = 0;
1660 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1661 scsi_qla_host_t *vha = NULL;
1662 struct qla_hw_data *ha = base_vha->hw;
1663 uint16_t options = 0;
1664 int cnt;
1665 struct req_que *req = ha->req_q_map[0];
1666
1667 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1668 if (ret) {
1669 ql_log(ql_log_warn, vha, 0x707e,
1670 "Vport sanity check failed, status %x\n", ret);
1671 return (ret);
1672 }
1673
1674 vha = qla24xx_create_vhost(fc_vport);
1675 if (vha == NULL) {
1676 ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
1677 return FC_VPORT_FAILED;
1678 }
1679 if (disable) {
1680 atomic_set(&vha->vp_state, VP_OFFLINE);
1681 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
1682 } else
1683 atomic_set(&vha->vp_state, VP_FAILED);
1684
1685 /* ready to create vport */
1686 ql_log(ql_log_info, vha, 0x7080,
1687 "VP entry id %d assigned.\n", vha->vp_idx);
1688
1689 /* initialized vport states */
1690 atomic_set(&vha->loop_state, LOOP_DOWN);
1691 vha->vp_err_state= VP_ERR_PORTDWN;
1692 vha->vp_prev_err_state= VP_ERR_UNKWN;
1693 /* Check if physical ha port is Up */
1694 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1695 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1696 /* Don't retry or attempt login of this virtual port */
1697 ql_dbg(ql_dbg_user, vha, 0x7081,
1698 "Vport loop state is not UP.\n");
1699 atomic_set(&vha->loop_state, LOOP_DEAD);
1700 if (!disable)
1701 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1702 }
1703
1704 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
1705 if (ha->fw_attributes & BIT_4) {
1706 int prot = 0;
1707 vha->flags.difdix_supported = 1;
1708 ql_dbg(ql_dbg_user, vha, 0x7082,
1709 "Registered for DIF/DIX type 1 and 3 protection.\n");
1710 if (ql2xenabledif == 1)
1711 prot = SHOST_DIX_TYPE0_PROTECTION;
1712 scsi_host_set_prot(vha->host,
1713 prot | SHOST_DIF_TYPE1_PROTECTION
1714 | SHOST_DIF_TYPE2_PROTECTION
1715 | SHOST_DIF_TYPE3_PROTECTION
1716 | SHOST_DIX_TYPE1_PROTECTION
1717 | SHOST_DIX_TYPE2_PROTECTION
1718 | SHOST_DIX_TYPE3_PROTECTION);
1719 scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC);
1720 } else
1721 vha->flags.difdix_supported = 0;
1722 }
1723
1724 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1725 &ha->pdev->dev)) {
1726 ql_dbg(ql_dbg_user, vha, 0x7083,
1727 "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
1728 goto vport_create_failed_2;
1729 }
1730
1731 /* initialize attributes */
1732 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
1733 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1734 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1735 fc_host_supported_classes(vha->host) =
1736 fc_host_supported_classes(base_vha->host);
1737 fc_host_supported_speeds(vha->host) =
1738 fc_host_supported_speeds(base_vha->host);
1739
1740 qla24xx_vport_disable(fc_vport, disable);
1741
1742 if (ha->flags.cpu_affinity_enabled) {
1743 req = ha->req_q_map[1];
1744 ql_dbg(ql_dbg_multiq, vha, 0xc000,
1745 "Request queue %p attached with "
1746 "VP[%d], cpu affinity =%d\n",
1747 req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
1748 goto vport_queue;
1749 } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1750 goto vport_queue;
1751 /* Create a request queue in QoS mode for the vport */
1752 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
1753 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
1754 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
1755 8) == 0) {
1756 qos = ha->npiv_info[cnt].q_qos;
1757 break;
1758 }
1759 }
1760
1761 if (qos) {
1762 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1763 qos);
1764 if (!ret)
1765 ql_log(ql_log_warn, vha, 0x7084,
1766 "Can't create request queue for VP[%d]\n",
1767 vha->vp_idx);
1768 else {
1769 ql_dbg(ql_dbg_multiq, vha, 0xc001,
1770 "Request Que:%d Q0s: %d) created for VP[%d]\n",
1771 ret, qos, vha->vp_idx);
1772 ql_dbg(ql_dbg_user, vha, 0x7085,
1773 "Request Que:%d Q0s: %d) created for VP[%d]\n",
1774 ret, qos, vha->vp_idx);
1775 req = ha->req_q_map[ret];
1776 }
1777 }
1778
1779 vport_queue:
1780 vha->req = req;
1781 return 0;
1782
1783 vport_create_failed_2:
1784 qla24xx_disable_vp(vha);
1785 qla24xx_deallocate_vp_id(vha);
1786 scsi_host_put(vha->host);
1787 return FC_VPORT_FAILED;
1788 }
1789
1790 static int
qla24xx_vport_delete(struct fc_vport * fc_vport)1791 qla24xx_vport_delete(struct fc_vport *fc_vport)
1792 {
1793 scsi_qla_host_t *vha = fc_vport->dd_data;
1794 struct qla_hw_data *ha = vha->hw;
1795 uint16_t id = vha->vp_idx;
1796
1797 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
1798 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
1799 msleep(1000);
1800
1801 qla24xx_disable_vp(vha);
1802
1803 vha->flags.delete_progress = 1;
1804
1805 fc_remove_host(vha->host);
1806
1807 scsi_remove_host(vha->host);
1808
1809 /* Allow timer to run to drain queued items, when removing vp */
1810 qla24xx_deallocate_vp_id(vha);
1811
1812 if (vha->timer_active) {
1813 qla2x00_vp_stop_timer(vha);
1814 ql_dbg(ql_dbg_user, vha, 0x7086,
1815 "Timer for the VP[%d] has stopped\n", vha->vp_idx);
1816 }
1817
1818 /* No pending activities shall be there on the vha now */
1819 if (ql2xextended_error_logging & ql_dbg_user)
1820 msleep(random32()%10); /* Just to see if something falls on
1821 * the net we have placed below */
1822
1823 BUG_ON(atomic_read(&vha->vref_count));
1824
1825 qla2x00_free_fcports(vha);
1826
1827 mutex_lock(&ha->vport_lock);
1828 ha->cur_vport_count--;
1829 clear_bit(vha->vp_idx, ha->vp_idx_map);
1830 mutex_unlock(&ha->vport_lock);
1831
1832 if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1833 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1834 ql_log(ql_log_warn, vha, 0x7087,
1835 "Queue delete failed.\n");
1836 }
1837
1838 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
1839 scsi_host_put(vha->host);
1840 return 0;
1841 }
1842
1843 static int
qla24xx_vport_disable(struct fc_vport * fc_vport,bool disable)1844 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1845 {
1846 scsi_qla_host_t *vha = fc_vport->dd_data;
1847
1848 if (disable)
1849 qla24xx_disable_vp(vha);
1850 else
1851 qla24xx_enable_vp(vha);
1852
1853 return 0;
1854 }
1855
1856 struct fc_function_template qla2xxx_transport_functions = {
1857
1858 .show_host_node_name = 1,
1859 .show_host_port_name = 1,
1860 .show_host_supported_classes = 1,
1861 .show_host_supported_speeds = 1,
1862
1863 .get_host_port_id = qla2x00_get_host_port_id,
1864 .show_host_port_id = 1,
1865 .get_host_speed = qla2x00_get_host_speed,
1866 .show_host_speed = 1,
1867 .get_host_port_type = qla2x00_get_host_port_type,
1868 .show_host_port_type = 1,
1869 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
1870 .show_host_symbolic_name = 1,
1871 .set_host_system_hostname = qla2x00_set_host_system_hostname,
1872 .show_host_system_hostname = 1,
1873 .get_host_fabric_name = qla2x00_get_host_fabric_name,
1874 .show_host_fabric_name = 1,
1875 .get_host_port_state = qla2x00_get_host_port_state,
1876 .show_host_port_state = 1,
1877
1878 .dd_fcrport_size = sizeof(struct fc_port *),
1879 .show_rport_supported_classes = 1,
1880
1881 .get_starget_node_name = qla2x00_get_starget_node_name,
1882 .show_starget_node_name = 1,
1883 .get_starget_port_name = qla2x00_get_starget_port_name,
1884 .show_starget_port_name = 1,
1885 .get_starget_port_id = qla2x00_get_starget_port_id,
1886 .show_starget_port_id = 1,
1887
1888 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
1889 .show_rport_dev_loss_tmo = 1,
1890
1891 .issue_fc_host_lip = qla2x00_issue_lip,
1892 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
1893 .terminate_rport_io = qla2x00_terminate_rport_io,
1894 .get_fc_host_stats = qla2x00_get_fc_host_stats,
1895
1896 .vport_create = qla24xx_vport_create,
1897 .vport_disable = qla24xx_vport_disable,
1898 .vport_delete = qla24xx_vport_delete,
1899 .bsg_request = qla24xx_bsg_request,
1900 .bsg_timeout = qla24xx_bsg_timeout,
1901 };
1902
1903 struct fc_function_template qla2xxx_transport_vport_functions = {
1904
1905 .show_host_node_name = 1,
1906 .show_host_port_name = 1,
1907 .show_host_supported_classes = 1,
1908
1909 .get_host_port_id = qla2x00_get_host_port_id,
1910 .show_host_port_id = 1,
1911 .get_host_speed = qla2x00_get_host_speed,
1912 .show_host_speed = 1,
1913 .get_host_port_type = qla2x00_get_host_port_type,
1914 .show_host_port_type = 1,
1915 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
1916 .show_host_symbolic_name = 1,
1917 .set_host_system_hostname = qla2x00_set_host_system_hostname,
1918 .show_host_system_hostname = 1,
1919 .get_host_fabric_name = qla2x00_get_host_fabric_name,
1920 .show_host_fabric_name = 1,
1921 .get_host_port_state = qla2x00_get_host_port_state,
1922 .show_host_port_state = 1,
1923
1924 .dd_fcrport_size = sizeof(struct fc_port *),
1925 .show_rport_supported_classes = 1,
1926
1927 .get_starget_node_name = qla2x00_get_starget_node_name,
1928 .show_starget_node_name = 1,
1929 .get_starget_port_name = qla2x00_get_starget_port_name,
1930 .show_starget_port_name = 1,
1931 .get_starget_port_id = qla2x00_get_starget_port_id,
1932 .show_starget_port_id = 1,
1933
1934 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
1935 .show_rport_dev_loss_tmo = 1,
1936
1937 .issue_fc_host_lip = qla2x00_issue_lip,
1938 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
1939 .terminate_rport_io = qla2x00_terminate_rport_io,
1940 .get_fc_host_stats = qla2x00_get_fc_host_stats,
1941 .bsg_request = qla24xx_bsg_request,
1942 .bsg_timeout = qla24xx_bsg_timeout,
1943 };
1944
1945 void
qla2x00_init_host_attr(scsi_qla_host_t * vha)1946 qla2x00_init_host_attr(scsi_qla_host_t *vha)
1947 {
1948 struct qla_hw_data *ha = vha->hw;
1949 u32 speed = FC_PORTSPEED_UNKNOWN;
1950
1951 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
1952 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1953 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1954 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
1955 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
1956 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
1957
1958 if (IS_CNA_CAPABLE(ha))
1959 speed = FC_PORTSPEED_10GBIT;
1960 else if (IS_QLA25XX(ha))
1961 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
1962 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
1963 else if (IS_QLA24XX_TYPE(ha))
1964 speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
1965 FC_PORTSPEED_1GBIT;
1966 else if (IS_QLA23XX(ha))
1967 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
1968 else
1969 speed = FC_PORTSPEED_1GBIT;
1970 fc_host_supported_speeds(vha->host) = speed;
1971 }
1972