Home
last modified time | relevance | path

Searched refs:ha (Results 1 – 25 of 431) sorted by relevance

12345678910>>...18

/kernel/linux/linux-5.10/drivers/scsi/qla4xxx/
Dql4_init.c13 static void ql4xxx_set_mac_number(struct scsi_qla_host *ha) in ql4xxx_set_mac_number() argument
19 spin_lock_irqsave(&ha->hardware_lock, flags); in ql4xxx_set_mac_number()
20 value = readw(&ha->reg->ctrl_status); in ql4xxx_set_mac_number()
21 spin_unlock_irqrestore(&ha->hardware_lock, flags); in ql4xxx_set_mac_number()
25 ha->mac_index = 1; in ql4xxx_set_mac_number()
28 ha->mac_index = 3; in ql4xxx_set_mac_number()
32 "ispControlStatus = 0x%x\n", ha->host_no, in ql4xxx_set_mac_number()
36 DEBUG2(printk("scsi%ld: %s: mac_index %d.\n", ha->host_no, __func__, in ql4xxx_set_mac_number()
37 ha->mac_index)); in ql4xxx_set_mac_number()
47 void qla4xxx_free_ddb(struct scsi_qla_host *ha, in qla4xxx_free_ddb() argument
[all …]
Dql4_isr.c18 static void qla4xxx_copy_sense(struct scsi_qla_host *ha, in qla4xxx_copy_sense() argument
28 DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%llu: %s:" in qla4xxx_copy_sense()
29 " sense len 0\n", ha->host_no, in qla4xxx_copy_sense()
32 ha->status_srb = NULL; in qla4xxx_copy_sense()
46 "ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no, in qla4xxx_copy_sense()
61 ha->status_srb = srb; in qla4xxx_copy_sense()
63 ha->status_srb = NULL; in qla4xxx_copy_sense()
74 qla4xxx_status_cont_entry(struct scsi_qla_host *ha, in qla4xxx_status_cont_entry() argument
77 struct srb *srb = ha->status_srb; in qla4xxx_status_cont_entry()
87 "back to OS srb=%p srb->state:%d\n", ha->host_no, in qla4xxx_status_cont_entry()
[all …]
Dql4_83xx.c15 uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr) in qla4_83xx_rd_reg() argument
17 return readl((void __iomem *)(ha->nx_pcibase + addr)); in qla4_83xx_rd_reg()
20 void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val) in qla4_83xx_wr_reg() argument
22 writel(val, (void __iomem *)(ha->nx_pcibase + addr)); in qla4_83xx_wr_reg()
25 static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr) in qla4_83xx_set_win_base() argument
30 qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr); in qla4_83xx_set_win_base()
31 val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num)); in qla4_83xx_set_win_base()
33 ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n", in qla4_83xx_set_win_base()
41 int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr, in qla4_83xx_rd_reg_indirect() argument
46 ret_val = qla4_83xx_set_win_base(ha, addr); in qla4_83xx_rd_reg_indirect()
[all …]
Dql4_glbl.h12 int qla4xxx_hw_reset(struct scsi_qla_host *ha);
14 int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
15 int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset);
16 int qla4xxx_soft_reset(struct scsi_qla_host *ha);
19 void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry);
20 void qla4xxx_process_aen(struct scsi_qla_host *ha, uint8_t process_aen);
22 int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host *ha);
23 int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb);
24 int qla4xxx_reset_lun(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry,
26 int qla4xxx_reset_target(struct scsi_qla_host *ha,
[all …]
Dql4_nx.c38 qla4_8xxx_pci_base_offsetfset(struct scsi_qla_host *ha, unsigned long off) in qla4_8xxx_pci_base_offsetfset() argument
40 if ((off < ha->first_page_group_end) && in qla4_8xxx_pci_base_offsetfset()
41 (off >= ha->first_page_group_start)) in qla4_8xxx_pci_base_offsetfset()
42 return (void __iomem *)(ha->nx_pcibase + off); in qla4_8xxx_pci_base_offsetfset()
359 qla4_82xx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off) in qla4_82xx_pci_set_crbwindow_2M() argument
363 ha->crb_win = CRB_HI(*off); in qla4_82xx_pci_set_crbwindow_2M()
364 writel(ha->crb_win, in qla4_82xx_pci_set_crbwindow_2M()
365 (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); in qla4_82xx_pci_set_crbwindow_2M()
369 win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); in qla4_82xx_pci_set_crbwindow_2M()
370 if (win_read != ha->crb_win) { in qla4_82xx_pci_set_crbwindow_2M()
[all …]
Dql4_nvram.c12 static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha) in eeprom_cmd() argument
14 writel(cmd, isp_nvram(ha)); in eeprom_cmd()
15 readl(isp_nvram(ha)); in eeprom_cmd()
19 static inline int eeprom_size(struct scsi_qla_host *ha) in eeprom_size() argument
21 return is_qla4010(ha) ? FM93C66A_SIZE_16 : FM93C86A_SIZE_16; in eeprom_size()
24 static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha) in eeprom_no_addr_bits() argument
26 return is_qla4010(ha) ? FM93C56A_NO_ADDR_BITS_16 : in eeprom_no_addr_bits()
30 static inline int eeprom_no_data_bits(struct scsi_qla_host *ha) in eeprom_no_data_bits() argument
35 static int fm93c56a_select(struct scsi_qla_host * ha) in fm93c56a_select() argument
39 ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000; in fm93c56a_select()
[all …]
Dql4_mbx.c14 void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd, in qla4xxx_queue_mbox_cmd() argument
21 writel(mbx_cmd[i], &ha->reg->mailbox[i]); in qla4xxx_queue_mbox_cmd()
24 writel(mbx_cmd[0], &ha->reg->mailbox[0]); in qla4xxx_queue_mbox_cmd()
25 readl(&ha->reg->mailbox[0]); in qla4xxx_queue_mbox_cmd()
26 writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status); in qla4xxx_queue_mbox_cmd()
27 readl(&ha->reg->ctrl_status); in qla4xxx_queue_mbox_cmd()
30 void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count) in qla4xxx_process_mbox_intr() argument
34 intr_status = readl(&ha->reg->ctrl_status); in qla4xxx_process_mbox_intr()
41 ha->mbox_status_count = out_count; in qla4xxx_process_mbox_intr()
42 ha->isp_ops->interrupt_service_routine(ha, intr_status); in qla4xxx_process_mbox_intr()
[all …]
Dql4_attr.c16 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, in qla4_8xxx_sysfs_read_fw_dump() local
19 if (is_qla40XX(ha)) in qla4_8xxx_sysfs_read_fw_dump()
22 if (!test_bit(AF_82XX_DUMP_READING, &ha->flags)) in qla4_8xxx_sysfs_read_fw_dump()
25 return memory_read_from_buffer(buf, count, &off, ha->fw_dump, in qla4_8xxx_sysfs_read_fw_dump()
26 ha->fw_dump_size); in qla4_8xxx_sysfs_read_fw_dump()
34 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, in qla4_8xxx_sysfs_write_fw_dump() local
40 if (is_qla40XX(ha)) in qla4_8xxx_sysfs_write_fw_dump()
49 ql4_printk(KERN_ERR, ha, "%s: Invalid input. Return err %d\n", in qla4_8xxx_sysfs_write_fw_dump()
57 if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) { in qla4_8xxx_sysfs_write_fw_dump()
58 clear_bit(AF_82XX_FW_DUMPED, &ha->flags); in qla4_8xxx_sysfs_write_fw_dump()
[all …]
Dql4_iocb.c15 qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt) in qla4xxx_space_in_req_ring() argument
20 if ((req_cnt + 2) >= ha->req_q_count) { in qla4xxx_space_in_req_ring()
21 cnt = (uint16_t) ha->isp_ops->rd_shdw_req_q_out(ha); in qla4xxx_space_in_req_ring()
22 if (ha->request_in < cnt) in qla4xxx_space_in_req_ring()
23 ha->req_q_count = cnt - ha->request_in; in qla4xxx_space_in_req_ring()
25 ha->req_q_count = REQUEST_QUEUE_DEPTH - in qla4xxx_space_in_req_ring()
26 (ha->request_in - cnt); in qla4xxx_space_in_req_ring()
30 if ((req_cnt + 2) < ha->req_q_count) in qla4xxx_space_in_req_ring()
36 static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha) in qla4xxx_advance_req_ring_ptr() argument
39 if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) { in qla4xxx_advance_req_ring_ptr()
[all …]
Dql4_os.c99 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
103 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
298 static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha) in qla4xxx_isp_check_reg() argument
303 if (is_qla8022(ha)) in qla4xxx_isp_check_reg()
304 reg_val = readl(&ha->qla4_82xx_reg->host_status); in qla4xxx_isp_check_reg()
305 else if (is_qla8032(ha) || is_qla8042(ha)) in qla4xxx_isp_check_reg()
306 reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); in qla4xxx_isp_check_reg()
308 reg_val = readw(&ha->reg->ctrl_status); in qla4xxx_isp_check_reg()
320 struct scsi_qla_host *ha = to_qla_host(shost); in qla4xxx_send_ping() local
333 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 " in qla4xxx_send_ping()
[all …]
Dql4_bsg.c15 struct scsi_qla_host *ha = to_qla_host(host); in qla4xxx_read_flash() local
26 if (unlikely(pci_channel_offline(ha->pdev))) in qla4xxx_read_flash()
29 if (ql4xxx_reset_active(ha)) { in qla4xxx_read_flash()
30 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); in qla4xxx_read_flash()
35 if (ha->flash_state != QLFLASH_WAITING) { in qla4xxx_read_flash()
36 ql4_printk(KERN_ERR, ha, "%s: another flash operation " in qla4xxx_read_flash()
42 ha->flash_state = QLFLASH_READING; in qla4xxx_read_flash()
46 flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma, in qla4xxx_read_flash()
49 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " in qla4xxx_read_flash()
55 rval = qla4xxx_get_flash(ha, flash_dma, offset, length); in qla4xxx_read_flash()
[all …]
Dql4_inline.h21 qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index) in qla4xxx_lookup_ddb_by_fw_index() argument
26 (ha->fw_ddb_index_map[fw_ddb_index] != in qla4xxx_lookup_ddb_by_fw_index()
28 ddb_entry = ha->fw_ddb_index_map[fw_ddb_index]; in qla4xxx_lookup_ddb_by_fw_index()
32 ha->host_no, __func__, fw_ddb_index, ddb_entry)); in qla4xxx_lookup_ddb_by_fw_index()
38 __qla4xxx_enable_intrs(struct scsi_qla_host *ha) in __qla4xxx_enable_intrs() argument
40 if (is_qla4022(ha) | is_qla4032(ha)) { in __qla4xxx_enable_intrs()
42 &ha->reg->u1.isp4022.intr_mask); in __qla4xxx_enable_intrs()
43 readl(&ha->reg->u1.isp4022.intr_mask); in __qla4xxx_enable_intrs()
45 writel(set_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status); in __qla4xxx_enable_intrs()
46 readl(&ha->reg->ctrl_status); in __qla4xxx_enable_intrs()
[all …]
Dql4_dbg.c32 void qla4xxx_dump_registers(struct scsi_qla_host *ha) in qla4xxx_dump_registers() argument
36 if (is_qla8022(ha)) { in qla4xxx_dump_registers()
39 i, readl(&ha->qla4_82xx_reg->mailbox_in[i])); in qla4xxx_dump_registers()
46 readw(&ha->reg->mailbox[i])); in qla4xxx_dump_registers()
51 readw(&ha->reg->flash_address)); in qla4xxx_dump_registers()
54 readw(&ha->reg->flash_data)); in qla4xxx_dump_registers()
57 readw(&ha->reg->ctrl_status)); in qla4xxx_dump_registers()
59 if (is_qla4010(ha)) { in qla4xxx_dump_registers()
62 readw(&ha->reg->u1.isp4010.nvram)); in qla4xxx_dump_registers()
63 } else if (is_qla4022(ha) | is_qla4032(ha)) { in qla4xxx_dump_registers()
[all …]
/kernel/linux/linux-5.10/drivers/scsi/qla2xxx/
Dqla_sup.c22 qla2x00_lock_nvram_access(struct qla_hw_data *ha) in qla2x00_lock_nvram_access() argument
25 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_lock_nvram_access()
27 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) { in qla2x00_lock_nvram_access()
55 qla2x00_unlock_nvram_access(struct qla_hw_data *ha) in qla2x00_unlock_nvram_access() argument
57 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_unlock_nvram_access()
59 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) { in qla2x00_unlock_nvram_access()
71 qla2x00_nv_write(struct qla_hw_data *ha, uint16_t data) in qla2x00_nv_write() argument
73 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_nv_write()
103 qla2x00_nvram_request(struct qla_hw_data *ha, uint32_t nv_cmd) in qla2x00_nvram_request() argument
106 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_nvram_request()
[all …]
Dqla_nx.c360 qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in, in qla82xx_pci_set_crbwindow_2M() argument
364 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); in qla82xx_pci_set_crbwindow_2M()
366 ha->crb_win = CRB_HI(off_in); in qla82xx_pci_set_crbwindow_2M()
367 writel(ha->crb_win, CRB_WINDOW_2M + ha->nx_pcibase); in qla82xx_pci_set_crbwindow_2M()
372 win_read = rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase); in qla82xx_pci_set_crbwindow_2M()
373 if (win_read != ha->crb_win) { in qla82xx_pci_set_crbwindow_2M()
377 __func__, ha->crb_win, win_read, off_in); in qla82xx_pci_set_crbwindow_2M()
379 *off_out = (off_in & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; in qla82xx_pci_set_crbwindow_2M()
383 qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in, in qla82xx_pci_get_crb_addr_2M() argument
393 QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase; in qla82xx_pci_get_crb_addr_2M()
[all …]
Dqla_os.c384 struct qla_hw_data *ha = vha->hw; in qla_init_base_qpair() local
386 rsp->qpair = ha->base_qpair; in qla_init_base_qpair()
388 ha->base_qpair->hw = ha; in qla_init_base_qpair()
389 ha->base_qpair->req = req; in qla_init_base_qpair()
390 ha->base_qpair->rsp = rsp; in qla_init_base_qpair()
391 ha->base_qpair->vha = vha; in qla_init_base_qpair()
392 ha->base_qpair->qp_lock_ptr = &ha->hardware_lock; in qla_init_base_qpair()
393 ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0; in qla_init_base_qpair()
394 ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q]; in qla_init_base_qpair()
395 ha->base_qpair->srb_mempool = ha->srb_mempool; in qla_init_base_qpair()
[all …]
Dqla_dfs.c152 struct qla_hw_data *ha = vha->hw; in qla2x00_dfs_tgt_sess_show() local
161 spin_lock_irqsave(&ha->tgt.sess_lock, flags); in qla2x00_dfs_tgt_sess_show()
167 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); in qla2x00_dfs_tgt_sess_show()
179 struct qla_hw_data *ha = vha->hw; in qla2x00_dfs_tgt_port_database_show() local
188 gid_list = dma_alloc_coherent(&ha->pdev->dev, in qla2x00_dfs_tgt_port_database_show()
189 qla2x00_gid_list_size(ha), in qla2x00_dfs_tgt_port_database_show()
194 qla2x00_gid_list_size(ha)); in qla2x00_dfs_tgt_port_database_show()
220 id_iter += ha->gid_list_info_size; in qla2x00_dfs_tgt_port_database_show()
223 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), in qla2x00_dfs_tgt_port_database_show()
237 struct qla_hw_data *ha = vha->hw; in qla_dfs_fw_resource_cnt_show() local
[all …]
Dqla_init.c81 struct qla_hw_data *ha = vha->hw; in qla2x00_get_async_timeout() local
84 tmo = ha->r_a_tov / 10 * 2; in qla2x00_get_async_timeout()
85 if (IS_QLAFX00(ha)) { in qla2x00_get_async_timeout()
87 } else if (!IS_FWI2_CAPABLE(ha)) { in qla2x00_get_async_timeout()
92 tmo = ha->login_timeout; in qla2x00_get_async_timeout()
622 struct qla_hw_data *ha = vha->hw; in qla2x00_is_reserved_id() local
624 if (IS_FWI2_CAPABLE(ha)) in qla2x00_is_reserved_id()
627 return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) || in qla2x00_is_reserved_id()
645 struct qla_hw_data *ha = vha->hw; in qla2x00_find_new_loop_id() local
650 spin_lock_irqsave(&ha->vport_slock, flags); in qla2x00_find_new_loop_id()
[all …]
Dqla_attr.c25 struct qla_hw_data *ha = vha->hw; in qla2x00_sysfs_read_fw_dump() local
28 if (!(ha->fw_dump_reading || ha->mctp_dump_reading || in qla2x00_sysfs_read_fw_dump()
29 ha->mpi_fw_dump_reading)) in qla2x00_sysfs_read_fw_dump()
32 mutex_lock(&ha->optrom_mutex); in qla2x00_sysfs_read_fw_dump()
33 if (IS_P3P_TYPE(ha)) { in qla2x00_sysfs_read_fw_dump()
34 if (off < ha->md_template_size) { in qla2x00_sysfs_read_fw_dump()
36 &off, ha->md_tmplt_hdr, ha->md_template_size); in qla2x00_sysfs_read_fw_dump()
38 off -= ha->md_template_size; in qla2x00_sysfs_read_fw_dump()
40 &off, ha->md_dump, ha->md_dump_size); in qla2x00_sysfs_read_fw_dump()
42 } else if (ha->mctp_dumped && ha->mctp_dump_reading) { in qla2x00_sysfs_read_fw_dump()
[all …]
Dqla_mid.c32 struct qla_hw_data *ha = vha->hw; in qla24xx_allocate_vp_id() local
36 mutex_lock(&ha->vport_lock); in qla24xx_allocate_vp_id()
37 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1); in qla24xx_allocate_vp_id()
38 if (vp_id > ha->max_npiv_vports) { in qla24xx_allocate_vp_id()
41 vp_id, ha->max_npiv_vports); in qla24xx_allocate_vp_id()
42 mutex_unlock(&ha->vport_lock); in qla24xx_allocate_vp_id()
46 set_bit(vp_id, ha->vp_idx_map); in qla24xx_allocate_vp_id()
47 ha->num_vhosts++; in qla24xx_allocate_vp_id()
50 spin_lock_irqsave(&ha->vport_slock, flags); in qla24xx_allocate_vp_id()
51 list_add_tail(&vha->list, &ha->vp_list); in qla24xx_allocate_vp_id()
[all …]
/kernel/linux/linux-5.10/drivers/scsi/
Dips.c275 static void ips_free_flash_copperhead(ips_ha_t * ha);
326 static int ips_abort_init(ips_ha_t * ha, int index);
332 static int ips_poll_for_flush_complete(ips_ha_t * ha);
333 static void ips_flush_and_reset(ips_ha_t *ha);
578 ips_setup_funclist(ips_ha_t * ha) in ips_setup_funclist() argument
584 if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) { in ips_setup_funclist()
586 ha->func.isintr = ips_isintr_morpheus; in ips_setup_funclist()
587 ha->func.isinit = ips_isinit_morpheus; in ips_setup_funclist()
588 ha->func.issue = ips_issue_i2o_memio; in ips_setup_funclist()
589 ha->func.init = ips_init_morpheus; in ips_setup_funclist()
[all …]
Dgdth.c119 static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
121 static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
123 static int gdth_async_event(gdth_ha_str *ha);
126 static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority);
127 static void gdth_next(gdth_ha_str *ha);
128 static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b);
129 static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
130 static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
132 static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
133 static void gdth_readapp_event(gdth_ha_str *ha, u8 application,
[all …]
Dqla1280.c379 #define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020) argument
380 #define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \ argument
381 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
382 #define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \ argument
383 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
578 static int qla1280_read_nvram(struct scsi_qla_host *ha) in qla1280_read_nvram() argument
590 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no); in qla1280_read_nvram()
592 wptr = (uint16_t *)&ha->nvram; in qla1280_read_nvram()
593 nv = &ha->nvram; in qla1280_read_nvram()
596 *wptr = qla1280_get_nvram_word(ha, cnt); in qla1280_read_nvram()
[all …]
/kernel/linux/linux-5.10/net/core/
Ddev_addr_lists.c24 struct netdev_hw_addr *ha; in __hw_addr_create_ex() local
27 alloc_size = sizeof(*ha); in __hw_addr_create_ex()
30 ha = kmalloc(alloc_size, GFP_ATOMIC); in __hw_addr_create_ex()
31 if (!ha) in __hw_addr_create_ex()
33 memcpy(ha->addr, addr, addr_len); in __hw_addr_create_ex()
34 ha->type = addr_type; in __hw_addr_create_ex()
35 ha->refcount = 1; in __hw_addr_create_ex()
36 ha->global_use = global; in __hw_addr_create_ex()
37 ha->synced = sync ? 1 : 0; in __hw_addr_create_ex()
38 ha->sync_cnt = 0; in __hw_addr_create_ex()
[all …]
/kernel/linux/linux-5.10/drivers/scsi/libsas/
Dsas_event.c13 int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw) in sas_queue_work() argument
18 if (!test_bit(SAS_HA_REGISTERED, &ha->state)) in sas_queue_work()
21 if (test_bit(SAS_HA_DRAINING, &ha->state)) { in sas_queue_work()
24 list_add_tail(&sw->drain_node, &ha->defer_q); in sas_queue_work()
26 rc = queue_work(ha->event_q, &sw->work); in sas_queue_work()
32 struct sas_ha_struct *ha) in sas_queue_event() argument
37 spin_lock_irqsave(&ha->lock, flags); in sas_queue_event()
38 rc = sas_queue_work(ha, work); in sas_queue_event()
39 spin_unlock_irqrestore(&ha->lock, flags); in sas_queue_event()
45 void __sas_drain_work(struct sas_ha_struct *ha) in __sas_drain_work() argument
[all …]

12345678910>>...18