Home
last modified time | relevance | path

Searched refs:ha (Results 1 – 25 of 61) sorted by relevance

123

/drivers/scsi/qla4xxx/
Dql4_os.c54 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
130 struct scsi_qla_host *ha = ddb_entry->ha; in qla4xxx_recovery_timedout() local
137 ha->host_no, __func__, ddb_entry->fw_ddb_index, in qla4xxx_recovery_timedout()
138 ha->port_down_retry_count)); in qla4xxx_recovery_timedout()
142 ha->host_no, __func__, ha->dpc_flags)); in qla4xxx_recovery_timedout()
143 queue_work(ha->dpc_thread, &ha->dpc_work); in qla4xxx_recovery_timedout()
150 struct scsi_qla_host *ha = to_qla_host(shost); in qla4xxx_host_get_param() local
155 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN); in qla4xxx_host_get_param()
158 len = sprintf(buf, "%d.%d.%d.%d\n", ha->ip_address[0], in qla4xxx_host_get_param()
159 ha->ip_address[1], ha->ip_address[2], in qla4xxx_host_get_param()
[all …]
Dql4_init.c14 static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
17 static void ql4xxx_set_mac_number(struct scsi_qla_host *ha) in ql4xxx_set_mac_number() argument
24 spin_lock_irqsave(&ha->hardware_lock, flags); in ql4xxx_set_mac_number()
25 value = readw(&ha->reg->ctrl_status); in ql4xxx_set_mac_number()
26 spin_unlock_irqrestore(&ha->hardware_lock, flags); in ql4xxx_set_mac_number()
31 ha->mac_index = 1; in ql4xxx_set_mac_number()
34 ha->mac_index = 3; in ql4xxx_set_mac_number()
38 "ispControlStatus = 0x%x\n", ha->host_no, in ql4xxx_set_mac_number()
42 DEBUG2(printk("scsi%ld: %s: mac_index %d.\n", ha->host_no, __func__, in ql4xxx_set_mac_number()
43 ha->mac_index)); in ql4xxx_set_mac_number()
[all …]
Dql4_isr.c18 static void qla4xxx_status_entry(struct scsi_qla_host *ha, in qla4xxx_status_entry() argument
28 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle)); in qla4xxx_status_entry()
33 "been completed.\n", ha->host_no, __func__, in qla4xxx_status_entry()
35 dev_warn(&ha->pdev->dev, "%s invalid status entry:" in qla4xxx_status_entry()
37 set_bit(DPC_RESET_HA, &ha->dpc_flags); in qla4xxx_status_entry()
45 ha->host_no, __func__, sts_entry->handle, in qla4xxx_status_entry()
47 dev_warn(&ha->pdev->dev, "Command is NULL:" in qla4xxx_status_entry()
80 "residual = 0x%x\n", ha->host_no, in qla4xxx_status_entry()
105 "ASC/ASCQ = %02x/%02x\n", ha->host_no, in qla4xxx_status_entry()
123 ha->host_no, cmd->device->channel, in qla4xxx_status_entry()
[all …]
Dql4_nvram.c13 static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha) in eeprom_cmd() argument
15 writel(cmd, isp_nvram(ha)); in eeprom_cmd()
16 readl(isp_nvram(ha)); in eeprom_cmd()
20 static inline int eeprom_size(struct scsi_qla_host *ha) in eeprom_size() argument
22 return is_qla4010(ha) ? FM93C66A_SIZE_16 : FM93C86A_SIZE_16; in eeprom_size()
25 static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha) in eeprom_no_addr_bits() argument
27 return is_qla4010(ha) ? FM93C56A_NO_ADDR_BITS_16 : in eeprom_no_addr_bits()
31 static inline int eeprom_no_data_bits(struct scsi_qla_host *ha) in eeprom_no_data_bits() argument
36 static int fm93c56a_select(struct scsi_qla_host * ha) in fm93c56a_select() argument
40 ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000; in fm93c56a_select()
[all …]
Dql4_mbx.c26 static int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, in qla4xxx_mailbox_command() argument
39 "pointer\n", ha->host_no, __func__)); in qla4xxx_mailbox_command()
46 mutex_lock(&ha->mbox_sem); in qla4xxx_mailbox_command()
47 if (!test_bit(AF_MBOX_COMMAND, &ha->flags)) { in qla4xxx_mailbox_command()
48 set_bit(AF_MBOX_COMMAND, &ha->flags); in qla4xxx_mailbox_command()
49 mutex_unlock(&ha->mbox_sem); in qla4xxx_mailbox_command()
52 mutex_unlock(&ha->mbox_sem); in qla4xxx_mailbox_command()
55 ha->host_no, __func__)); in qla4xxx_mailbox_command()
66 spin_lock_irqsave(&ha->hardware_lock, flags); in qla4xxx_mailbox_command()
67 intr_status = readl(&ha->reg->ctrl_status); in qla4xxx_mailbox_command()
[all …]
Dql4_iocb.c26 static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha, in qla4xxx_get_req_pkt() argument
32 *queue_entry = ha->request_ptr; in qla4xxx_get_req_pkt()
35 request_in = ha->request_in; in qla4xxx_get_req_pkt()
36 ha->request_out = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out); in qla4xxx_get_req_pkt()
41 ha->request_ptr = ha->request_ring; in qla4xxx_get_req_pkt()
44 ha->request_ptr++; in qla4xxx_get_req_pkt()
48 if ((ha->iocb_cnt + 1) >= ha->iocb_hiwat) { in qla4xxx_get_req_pkt()
50 ha->request_ptr = *queue_entry; in qla4xxx_get_req_pkt()
53 ha->request_in = request_in; in qla4xxx_get_req_pkt()
69 int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, in qla4xxx_send_marker_iocb() argument
[all …]
Dql4_inline.h22 qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index) in qla4xxx_lookup_ddb_by_fw_index() argument
27 (ha->fw_ddb_index_map[fw_ddb_index] != in qla4xxx_lookup_ddb_by_fw_index()
29 ddb_entry = ha->fw_ddb_index_map[fw_ddb_index]; in qla4xxx_lookup_ddb_by_fw_index()
33 ha->host_no, __func__, fw_ddb_index, ddb_entry)); in qla4xxx_lookup_ddb_by_fw_index()
39 __qla4xxx_enable_intrs(struct scsi_qla_host *ha) in __qla4xxx_enable_intrs() argument
41 if (is_qla4022(ha) | is_qla4032(ha)) { in __qla4xxx_enable_intrs()
43 &ha->reg->u1.isp4022.intr_mask); in __qla4xxx_enable_intrs()
44 readl(&ha->reg->u1.isp4022.intr_mask); in __qla4xxx_enable_intrs()
46 writel(set_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status); in __qla4xxx_enable_intrs()
47 readl(&ha->reg->ctrl_status); in __qla4xxx_enable_intrs()
[all …]
Dql4_glbl.h13 void qla4xxx_hw_reset(struct scsi_qla_host *ha);
15 int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
16 int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb);
17 int qla4xxx_initialize_adapter(struct scsi_qla_host * ha,
19 int qla4xxx_soft_reset(struct scsi_qla_host *ha);
22 void qla4xxx_free_ddb_list(struct scsi_qla_host * ha);
23 void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen);
25 int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha);
26 int qla4xxx_relogin_device(struct scsi_qla_host * ha,
28 int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
[all …]
Dql4_def.h157 struct scsi_qla_host *ha; /* HA the SP is queued on */ member
206 struct scsi_qla_host *ha; member
441 static inline int is_qla4010(struct scsi_qla_host *ha) in is_qla4010() argument
443 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4010; in is_qla4010()
446 static inline int is_qla4022(struct scsi_qla_host *ha) in is_qla4022() argument
448 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022; in is_qla4022()
451 static inline int is_qla4032(struct scsi_qla_host *ha) in is_qla4032() argument
453 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4032; in is_qla4032()
456 static inline int adapter_up(struct scsi_qla_host *ha) in adapter_up() argument
458 return (test_bit(AF_ONLINE, &ha->flags) != 0) && in adapter_up()
[all …]
/drivers/scsi/qla2xxx/
Dqla_sup.c22 qla2x00_lock_nvram_access(struct qla_hw_data *ha) in qla2x00_lock_nvram_access() argument
25 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_lock_nvram_access()
27 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) { in qla2x00_lock_nvram_access()
55 qla2x00_unlock_nvram_access(struct qla_hw_data *ha) in qla2x00_unlock_nvram_access() argument
57 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_unlock_nvram_access()
59 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) { in qla2x00_unlock_nvram_access()
71 qla2x00_nv_write(struct qla_hw_data *ha, uint16_t data) in qla2x00_nv_write() argument
73 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_nv_write()
103 qla2x00_nvram_request(struct qla_hw_data *ha, uint32_t nv_cmd) in qla2x00_nvram_request() argument
106 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; in qla2x00_nvram_request()
[all …]
Dqla_os.c222 static int qla2x00_alloc_queues(struct qla_hw_data *ha) in qla2x00_alloc_queues() argument
224 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues, in qla2x00_alloc_queues()
226 if (!ha->req_q_map) { in qla2x00_alloc_queues()
227 qla_printk(KERN_WARNING, ha, in qla2x00_alloc_queues()
232 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues, in qla2x00_alloc_queues()
234 if (!ha->rsp_q_map) { in qla2x00_alloc_queues()
235 qla_printk(KERN_WARNING, ha, in qla2x00_alloc_queues()
239 set_bit(0, ha->rsp_qid_map); in qla2x00_alloc_queues()
240 set_bit(0, ha->req_qid_map); in qla2x00_alloc_queues()
244 kfree(ha->req_q_map); in qla2x00_alloc_queues()
[all …]
Dqla_dfs.c22 struct qla_hw_data *ha = vha->hw; in qla2x00_dfs_fce_show() local
24 mutex_lock(&ha->fce_mutex); in qla2x00_dfs_fce_show()
27 seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr); in qla2x00_dfs_fce_show()
28 seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma); in qla2x00_dfs_fce_show()
31 ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4], in qla2x00_dfs_fce_show()
32 ha->fce_mb[5], ha->fce_mb[6]); in qla2x00_dfs_fce_show()
34 fce = (uint32_t *) ha->fce; in qla2x00_dfs_fce_show()
35 fce_start = (unsigned long long) ha->fce_dma; in qla2x00_dfs_fce_show()
36 for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) { in qla2x00_dfs_fce_show()
47 mutex_unlock(&ha->fce_mutex); in qla2x00_dfs_fce_show()
[all …]
Dqla_init.c62 struct qla_hw_data *ha = vha->hw; in qla2x00_initialize_adapter() local
63 struct req_que *req = ha->req_q_map[0]; in qla2x00_initialize_adapter()
73 ha->mbx_flags = 0; in qla2x00_initialize_adapter()
74 ha->isp_abort_cnt = 0; in qla2x00_initialize_adapter()
75 ha->beacon_blink_led = 0; in qla2x00_initialize_adapter()
78 set_bit(0, ha->req_qid_map); in qla2x00_initialize_adapter()
79 set_bit(0, ha->rsp_qid_map); in qla2x00_initialize_adapter()
81 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); in qla2x00_initialize_adapter()
82 rval = ha->isp_ops->pci_config(vha); in qla2x00_initialize_adapter()
89 ha->isp_ops->reset_chip(vha); in qla2x00_initialize_adapter()
[all …]
Dqla_attr.c24 struct qla_hw_data *ha = vha->hw; in qla2x00_sysfs_read_fw_dump() local
26 if (ha->fw_dump_reading == 0) in qla2x00_sysfs_read_fw_dump()
29 return memory_read_from_buffer(buf, count, &off, ha->fw_dump, in qla2x00_sysfs_read_fw_dump()
30 ha->fw_dump_len); in qla2x00_sysfs_read_fw_dump()
40 struct qla_hw_data *ha = vha->hw; in qla2x00_sysfs_write_fw_dump() local
49 if (!ha->fw_dump_reading) in qla2x00_sysfs_write_fw_dump()
52 qla_printk(KERN_INFO, ha, in qla2x00_sysfs_write_fw_dump()
55 ha->fw_dump_reading = 0; in qla2x00_sysfs_write_fw_dump()
56 ha->fw_dumped = 0; in qla2x00_sysfs_write_fw_dump()
59 if (ha->fw_dumped && !ha->fw_dump_reading) { in qla2x00_sysfs_write_fw_dump()
[all …]
Dqla_mid.c32 struct qla_hw_data *ha = vha->hw; in qla24xx_allocate_vp_id() local
35 mutex_lock(&ha->vport_lock); in qla24xx_allocate_vp_id()
36 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1); in qla24xx_allocate_vp_id()
37 if (vp_id > ha->max_npiv_vports) { in qla24xx_allocate_vp_id()
39 vp_id, ha->max_npiv_vports)); in qla24xx_allocate_vp_id()
40 mutex_unlock(&ha->vport_lock); in qla24xx_allocate_vp_id()
44 set_bit(vp_id, ha->vp_idx_map); in qla24xx_allocate_vp_id()
45 ha->num_vhosts++; in qla24xx_allocate_vp_id()
46 ha->cur_vport_count++; in qla24xx_allocate_vp_id()
48 list_add_tail(&vha->list, &ha->vp_list); in qla24xx_allocate_vp_id()
[all …]
Dqla_isr.c34 struct qla_hw_data *ha; in qla2100_intr_handler() local
49 ha = rsp->hw; in qla2100_intr_handler()
50 reg = &ha->iobase->isp; in qla2100_intr_handler()
53 spin_lock(&ha->hardware_lock); in qla2100_intr_handler()
58 if (pci_channel_offline(ha->pdev)) in qla2100_intr_handler()
69 ha->isp_ops->fw_dump(vha, 1); in qla2100_intr_handler()
80 mb[0] = RD_MAILBOX_REG(ha, reg, 0); in qla2100_intr_handler()
85 mb[1] = RD_MAILBOX_REG(ha, reg, 1); in qla2100_intr_handler()
86 mb[2] = RD_MAILBOX_REG(ha, reg, 2); in qla2100_intr_handler()
87 mb[3] = RD_MAILBOX_REG(ha, reg, 3); in qla2100_intr_handler()
[all …]
Dqla_dbg.c12 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump) in qla2xxx_prep_dump() argument
14 fw_dump->fw_major_version = htonl(ha->fw_major_version); in qla2xxx_prep_dump()
15 fw_dump->fw_minor_version = htonl(ha->fw_minor_version); in qla2xxx_prep_dump()
16 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version); in qla2xxx_prep_dump()
17 fw_dump->fw_attributes = htonl(ha->fw_attributes); in qla2xxx_prep_dump()
19 fw_dump->vendor = htonl(ha->pdev->vendor); in qla2xxx_prep_dump()
20 fw_dump->device = htonl(ha->pdev->device); in qla2xxx_prep_dump()
21 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor); in qla2xxx_prep_dump()
22 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device); in qla2xxx_prep_dump()
26 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr) in qla2xxx_copy_queues() argument
[all …]
Dqla_gs.c27 struct qla_hw_data *ha = vha->hw; in qla2x00_prep_ms_iocb() local
30 ms_pkt = ha->ms_iocb; in qla2x00_prep_ms_iocb()
35 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER); in qla2x00_prep_ms_iocb()
37 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); in qla2x00_prep_ms_iocb()
43 ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); in qla2x00_prep_ms_iocb()
44 ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); in qla2x00_prep_ms_iocb()
47 ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); in qla2x00_prep_ms_iocb()
48 ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); in qla2x00_prep_ms_iocb()
65 struct qla_hw_data *ha = vha->hw; in qla24xx_prep_ms_iocb() local
68 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; in qla24xx_prep_ms_iocb()
[all …]
Dqla_iocb.c291 struct qla_hw_data *ha; in qla2x00_start_scsi() local
298 ha = vha->hw; in qla2x00_start_scsi()
299 reg = &ha->iobase->isp; in qla2x00_start_scsi()
301 req = ha->req_q_map[0]; in qla2x00_start_scsi()
302 rsp = ha->rsp_q_map[0]; in qla2x00_start_scsi()
315 spin_lock_irqsave(&ha->hardware_lock, flags); in qla2x00_start_scsi()
331 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), in qla2x00_start_scsi()
341 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); in qla2x00_start_scsi()
343 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); in qla2x00_start_scsi()
368 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); in qla2x00_start_scsi()
[all …]
Dqla_def.h387 #define ISP_REQ_Q_IN(ha, reg) \ argument
388 (IS_QLA2100(ha) || IS_QLA2200(ha) ? \
391 #define ISP_REQ_Q_OUT(ha, reg) \ argument
392 (IS_QLA2100(ha) || IS_QLA2200(ha) ? \
395 #define ISP_RSP_Q_IN(ha, reg) \ argument
396 (IS_QLA2100(ha) || IS_QLA2200(ha) ? \
399 #define ISP_RSP_Q_OUT(ha, reg) \ argument
400 (IS_QLA2100(ha) || IS_QLA2200(ha) ? \
404 #define MAILBOX_REG(ha, reg, num) \ argument
405 (IS_QLA2100(ha) || IS_QLA2200(ha) ? \
[all …]
/drivers/scsi/
Dips.c281 static void ips_free_flash_copperhead(ips_ha_t * ha);
333 static int ips_abort_init(ips_ha_t * ha, int index);
339 static int ips_poll_for_flush_complete(ips_ha_t * ha);
340 static void ips_flush_and_reset(ips_ha_t *ha);
586 ips_setup_funclist(ips_ha_t * ha) in ips_setup_funclist() argument
592 if (IPS_IS_MORPHEUS(ha) || IPS_IS_MARCO(ha)) { in ips_setup_funclist()
594 ha->func.isintr = ips_isintr_morpheus; in ips_setup_funclist()
595 ha->func.isinit = ips_isinit_morpheus; in ips_setup_funclist()
596 ha->func.issue = ips_issue_i2o_memio; in ips_setup_funclist()
597 ha->func.init = ips_init_morpheus; in ips_setup_funclist()
[all …]
Dgdth.c145 static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
147 static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index,
149 static int gdth_async_event(gdth_ha_str *ha);
152 static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority);
153 static void gdth_next(gdth_ha_str *ha);
154 static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b);
155 static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
156 static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, ushort source,
158 static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
159 static void gdth_readapp_event(gdth_ha_str *ha, unchar application,
[all …]
Dqla1280.c414 #define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020) argument
415 #define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \ argument
416 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
417 #define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \ argument
418 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
608 static int qla1280_read_nvram(struct scsi_qla_host *ha) in qla1280_read_nvram() argument
620 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no); in qla1280_read_nvram()
622 wptr = (uint16_t *)&ha->nvram; in qla1280_read_nvram()
623 nv = &ha->nvram; in qla1280_read_nvram()
626 *wptr = qla1280_get_nvram_word(ha, cnt); in qla1280_read_nvram()
[all …]
Deata.c1087 struct hostdata *ha; in port_detect() local
1287 ha = (struct hostdata *)shost->hostdata; in port_detect()
1289 memset(ha, 0, sizeof(struct hostdata)); in port_detect()
1290 ha->subversion = subversion; in port_detect()
1291 ha->protocol_rev = protocol_rev; in port_detect()
1292 ha->is_pci = is_pci; in port_detect()
1293 ha->pdev = pdev; in port_detect()
1294 ha->board_number = j; in port_detect()
1296 if (ha->subversion == ESA) in port_detect()
1311 strcpy(ha->board_name, name); in port_detect()
[all …]
Dgdth_proc.c10 gdth_ha_str *ha = shost_priv(host); in gdth_proc_info() local
16 return(gdth_set_info(buffer,length,host,ha)); in gdth_proc_info()
18 return(gdth_get_info(buffer,start,offset,length,host,ha)); in gdth_proc_info()
22 gdth_ha_str *ha) in gdth_set_info() argument
26 TRACE2(("gdth_set_info() ha %d\n",ha->hanum,)); in gdth_set_info()
32 ret_val = gdth_set_asc_info(host, buffer, length, ha); in gdth_set_info()
40 int length, gdth_ha_str *ha) in gdth_set_asc_info() argument
52 TRACE2(("gdth_set_asc_info() ha %d\n",ha->hanum)); in gdth_set_asc_info()
73 if (ha->hdr[i].present) { in gdth_set_asc_info()
79 if (ha->cache_feat & GDT_64BIT) { in gdth_set_asc_info()
[all …]

123