• Home
  • Raw
  • Download

Lines Matching +full:use +full:- +full:advanced +full:- +full:sector +full:- +full:protection

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * ipr.c -- driver for IBM Power Linux RAID adapters
17 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
18 * PCI-X Dual Channel Ultra 320 SCSI Adapter
19 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
23 * - Ultra 320 SCSI controller
24 * - PCI-X host interface
25 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
26 * - Non-Volatile Write Cache
27 * - Supports attachment of non-RAID disks, tape, and optical devices
28 * - RAID Levels 0, 5, 10
29 * - Hot spare
30 * - Background Parity Checking
31 * - Background Data Scrubbing
32 * - Ability to increase the capacity of an existing RAID 5 disk array
36 * - Tagged command queuing
37 * - Adapter microcode download
38 * - PCI hot plug
39 * - SCSI device hot plug
93 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
194 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2…
196 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211 …ESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (…
247 "FFF9: Device sector reassign successful"},
251 "7001: IOA sector reassignment successful"},
253 "FFF9: Soft media error. Sector reassignment recommended"},
379 "9073: Invalid multi-adapter configuration"},
401 "Illegal request, command not allowed to a non-optimized resource"},
417 "9031: Array protection temporarily suspended, protection resuming"},
419 "9040: Array protection temporarily suspended, protection resuming"},
451 "9074: Asymmetric advanced function disk configuration"},
473 "9041: Array protection temporarily suspended"},
525 "9092: Disk unit requires initialization before use"},
551 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
552 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
557 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
558 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
578 * ipr_trc_hook - Add a trace entry to the driver trace
590 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_trc_hook()
593 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; in ipr_trc_hook()
594 trace_entry = &ioa_cfg->trace[trace_index]; in ipr_trc_hook()
595 trace_entry->time = jiffies; in ipr_trc_hook()
596 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; in ipr_trc_hook()
597 trace_entry->type = type; in ipr_trc_hook()
598 if (ipr_cmd->ioa_cfg->sis64) in ipr_trc_hook()
599 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command; in ipr_trc_hook()
601 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command; in ipr_trc_hook()
602 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; in ipr_trc_hook()
603 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; in ipr_trc_hook()
604 trace_entry->u.add_data = add_data; in ipr_trc_hook()
612 * ipr_lock_and_done - Acquire lock and complete command
621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_lock_and_done()
623 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_lock_and_done()
624 ipr_cmd->done(ipr_cmd); in ipr_lock_and_done()
625 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_lock_and_done()
629 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
637 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_reinit_ipr_cmnd()
638 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_reinit_ipr_cmnd()
639 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; in ipr_reinit_ipr_cmnd()
640 dma_addr_t dma_addr = ipr_cmd->dma_addr; in ipr_reinit_ipr_cmnd()
643 hrrq_id = ioarcb->cmd_pkt.hrrq_id; in ipr_reinit_ipr_cmnd()
644 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); in ipr_reinit_ipr_cmnd()
645 ioarcb->cmd_pkt.hrrq_id = hrrq_id; in ipr_reinit_ipr_cmnd()
646 ioarcb->data_transfer_length = 0; in ipr_reinit_ipr_cmnd()
647 ioarcb->read_data_transfer_length = 0; in ipr_reinit_ipr_cmnd()
648 ioarcb->ioadl_len = 0; in ipr_reinit_ipr_cmnd()
649 ioarcb->read_ioadl_len = 0; in ipr_reinit_ipr_cmnd()
651 if (ipr_cmd->ioa_cfg->sis64) { in ipr_reinit_ipr_cmnd()
652 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_reinit_ipr_cmnd()
654 ioasa64->u.gata.status = 0; in ipr_reinit_ipr_cmnd()
656 ioarcb->write_ioadl_addr = in ipr_reinit_ipr_cmnd()
658 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_reinit_ipr_cmnd()
659 ioasa->u.gata.status = 0; in ipr_reinit_ipr_cmnd()
662 ioasa->hdr.ioasc = 0; in ipr_reinit_ipr_cmnd()
663 ioasa->hdr.residual_data_len = 0; in ipr_reinit_ipr_cmnd()
664 ipr_cmd->scsi_cmd = NULL; in ipr_reinit_ipr_cmnd()
665 ipr_cmd->qc = NULL; in ipr_reinit_ipr_cmnd()
666 ipr_cmd->sense_buffer[0] = 0; in ipr_reinit_ipr_cmnd()
667 ipr_cmd->dma_use_sg = 0; in ipr_reinit_ipr_cmnd()
671 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
673 * @fast_done: fast done function call-back
682 ipr_cmd->u.scratch = 0; in ipr_init_ipr_cmnd()
683 ipr_cmd->sibling = NULL; in ipr_init_ipr_cmnd()
684 ipr_cmd->eh_comp = NULL; in ipr_init_ipr_cmnd()
685 ipr_cmd->fast_done = fast_done; in ipr_init_ipr_cmnd()
686 timer_setup(&ipr_cmd->timer, NULL, 0); in ipr_init_ipr_cmnd()
690 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
701 if (likely(!list_empty(&hrrq->hrrq_free_q))) { in __ipr_get_free_ipr_cmnd()
702 ipr_cmd = list_entry(hrrq->hrrq_free_q.next, in __ipr_get_free_ipr_cmnd()
704 list_del(&ipr_cmd->queue); in __ipr_get_free_ipr_cmnd()
712 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
722 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]); in ipr_get_free_ipr_cmnd()
728 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
744 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_mask_and_clear_interrupts()
745 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
746 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_mask_and_clear_interrupts()
747 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_mask_and_clear_interrupts()
751 if (ioa_cfg->sis64) in ipr_mask_and_clear_interrupts()
752 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_mask_and_clear_interrupts()
754 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_mask_and_clear_interrupts()
757 if (ioa_cfg->sis64) in ipr_mask_and_clear_interrupts()
758 writel(~0, ioa_cfg->regs.clr_interrupt_reg); in ipr_mask_and_clear_interrupts()
759 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32); in ipr_mask_and_clear_interrupts()
760 readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_mask_and_clear_interrupts()
764 * ipr_save_pcix_cmd_reg - Save PCI-X command register
768 * 0 on success / -EIO on failure
772 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); in ipr_save_pcix_cmd_reg()
777 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, in ipr_save_pcix_cmd_reg()
778 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { in ipr_save_pcix_cmd_reg()
779 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); in ipr_save_pcix_cmd_reg()
780 return -EIO; in ipr_save_pcix_cmd_reg()
783 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO; in ipr_save_pcix_cmd_reg()
788 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
792 * 0 on success / -EIO on failure
796 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); in ipr_set_pcix_cmd_reg()
799 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD, in ipr_set_pcix_cmd_reg()
800 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { in ipr_set_pcix_cmd_reg()
801 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); in ipr_set_pcix_cmd_reg()
802 return -EIO; in ipr_set_pcix_cmd_reg()
810 * __ipr_sata_eh_done - done function for aborted SATA commands
821 struct ata_queued_cmd *qc = ipr_cmd->qc; in __ipr_sata_eh_done()
822 struct ipr_sata_port *sata_port = qc->ap->private_data; in __ipr_sata_eh_done()
824 qc->err_mask |= AC_ERR_OTHER; in __ipr_sata_eh_done()
825 sata_port->ioasa.status |= ATA_BUSY; in __ipr_sata_eh_done()
827 if (ipr_cmd->eh_comp) in __ipr_sata_eh_done()
828 complete(ipr_cmd->eh_comp); in __ipr_sata_eh_done()
829 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in __ipr_sata_eh_done()
833 * ipr_sata_eh_done - done function for aborted SATA commands
844 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_sata_eh_done()
847 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_sata_eh_done()
849 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_sata_eh_done()
853 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
857 * ops generated by the SCSI mid-layer which are being aborted.
864 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in __ipr_scsi_eh_done()
866 scsi_cmd->result |= (DID_ERROR << 16); in __ipr_scsi_eh_done()
868 scsi_dma_unmap(ipr_cmd->scsi_cmd); in __ipr_scsi_eh_done()
869 scsi_cmd->scsi_done(scsi_cmd); in __ipr_scsi_eh_done()
870 if (ipr_cmd->eh_comp) in __ipr_scsi_eh_done()
871 complete(ipr_cmd->eh_comp); in __ipr_scsi_eh_done()
872 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in __ipr_scsi_eh_done()
876 * ipr_scsi_eh_done - mid-layer done function for aborted ops
880 * ops generated by the SCSI mid-layer which are being aborted.
888 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_scsi_eh_done()
890 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_scsi_eh_done()
892 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_scsi_eh_done()
896 * ipr_fail_all_ops - Fails all outstanding ops.
911 spin_lock(&hrrq->_lock); in ipr_fail_all_ops()
913 temp, &hrrq->hrrq_pending_q, queue) { in ipr_fail_all_ops()
914 list_del(&ipr_cmd->queue); in ipr_fail_all_ops()
916 ipr_cmd->s.ioasa.hdr.ioasc = in ipr_fail_all_ops()
918 ipr_cmd->s.ioasa.hdr.ilid = in ipr_fail_all_ops()
921 if (ipr_cmd->scsi_cmd) in ipr_fail_all_ops()
922 ipr_cmd->done = __ipr_scsi_eh_done; in ipr_fail_all_ops()
923 else if (ipr_cmd->qc) in ipr_fail_all_ops()
924 ipr_cmd->done = __ipr_sata_eh_done; in ipr_fail_all_ops()
928 del_timer(&ipr_cmd->timer); in ipr_fail_all_ops()
929 ipr_cmd->done(ipr_cmd); in ipr_fail_all_ops()
931 spin_unlock(&hrrq->_lock); in ipr_fail_all_ops()
937 * ipr_send_command - Send driver initiated requests.
949 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_send_command()
950 dma_addr_t send_dma_addr = ipr_cmd->dma_addr; in ipr_send_command()
952 if (ioa_cfg->sis64) { in ipr_send_command()
957 then use a 512 byte ioarcb */ in ipr_send_command()
958 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 ) in ipr_send_command()
960 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); in ipr_send_command()
962 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); in ipr_send_command()
966 * ipr_do_req - Send driver initiated requests.
982 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_do_req()
984 ipr_cmd->done = done; in ipr_do_req()
986 ipr_cmd->timer.expires = jiffies + timeout; in ipr_do_req()
987 ipr_cmd->timer.function = timeout_func; in ipr_do_req()
989 add_timer(&ipr_cmd->timer); in ipr_do_req()
997 * ipr_internal_cmd_done - Op done function for an internally generated op.
1008 if (ipr_cmd->sibling) in ipr_internal_cmd_done()
1009 ipr_cmd->sibling = NULL; in ipr_internal_cmd_done()
1011 complete(&ipr_cmd->completion); in ipr_internal_cmd_done()
1015 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_init_ioadl()
1031 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; in ipr_init_ioadl()
1033 ipr_cmd->dma_use_sg = 1; in ipr_init_ioadl()
1035 if (ipr_cmd->ioa_cfg->sis64) { in ipr_init_ioadl()
1036 ioadl64->flags = cpu_to_be32(flags); in ipr_init_ioadl()
1037 ioadl64->data_len = cpu_to_be32(len); in ipr_init_ioadl()
1038 ioadl64->address = cpu_to_be64(dma_addr); in ipr_init_ioadl()
1040 ipr_cmd->ioarcb.ioadl_len = in ipr_init_ioadl()
1042 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); in ipr_init_ioadl()
1044 ioadl->flags_and_data_len = cpu_to_be32(flags | len); in ipr_init_ioadl()
1045 ioadl->address = cpu_to_be32(dma_addr); in ipr_init_ioadl()
1048 ipr_cmd->ioarcb.read_ioadl_len = in ipr_init_ioadl()
1050 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len); in ipr_init_ioadl()
1052 ipr_cmd->ioarcb.ioadl_len = in ipr_init_ioadl()
1054 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); in ipr_init_ioadl()
1060 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1072 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_send_blocking_cmd()
1074 init_completion(&ipr_cmd->completion); in ipr_send_blocking_cmd()
1077 spin_unlock_irq(ioa_cfg->host->host_lock); in ipr_send_blocking_cmd()
1078 wait_for_completion(&ipr_cmd->completion); in ipr_send_blocking_cmd()
1079 spin_lock_irq(ioa_cfg->host->host_lock); in ipr_send_blocking_cmd()
1086 if (ioa_cfg->hrrq_num == 1) in ipr_get_hrrq_index()
1089 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); in ipr_get_hrrq_index()
1090 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; in ipr_get_hrrq_index()
1096 * ipr_send_hcam - Send an HCAM to the adapter.
1114 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_send_hcam()
1116 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_send_hcam()
1117 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); in ipr_send_hcam()
1119 ipr_cmd->u.hostrcb = hostrcb; in ipr_send_hcam()
1120 ioarcb = &ipr_cmd->ioarcb; in ipr_send_hcam()
1122 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_send_hcam()
1123 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM; in ipr_send_hcam()
1124 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC; in ipr_send_hcam()
1125 ioarcb->cmd_pkt.cdb[1] = type; in ipr_send_hcam()
1126 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; in ipr_send_hcam()
1127 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; in ipr_send_hcam()
1129 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma, in ipr_send_hcam()
1130 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST); in ipr_send_hcam()
1133 ipr_cmd->done = ipr_process_ccn; in ipr_send_hcam()
1135 ipr_cmd->done = ipr_process_error; in ipr_send_hcam()
1141 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_send_hcam()
1146 * ipr_update_ata_class - Update the ata class in the resource entry
1158 res->ata_class = ATA_DEV_ATA; in ipr_update_ata_class()
1162 res->ata_class = ATA_DEV_ATAPI; in ipr_update_ata_class()
1165 res->ata_class = ATA_DEV_UNKNOWN; in ipr_update_ata_class()
1171 * ipr_init_res_entry - Initialize a resource entry struct.
1183 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; in ipr_init_res_entry()
1186 res->needs_sync_complete = 0; in ipr_init_res_entry()
1187 res->in_erp = 0; in ipr_init_res_entry()
1188 res->add_to_ml = 0; in ipr_init_res_entry()
1189 res->del_from_ml = 0; in ipr_init_res_entry()
1190 res->resetting_device = 0; in ipr_init_res_entry()
1191 res->reset_occurred = 0; in ipr_init_res_entry()
1192 res->sdev = NULL; in ipr_init_res_entry()
1193 res->sata_port = NULL; in ipr_init_res_entry()
1195 if (ioa_cfg->sis64) { in ipr_init_res_entry()
1196 proto = cfgtew->u.cfgte64->proto; in ipr_init_res_entry()
1197 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); in ipr_init_res_entry()
1198 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); in ipr_init_res_entry()
1199 res->qmodel = IPR_QUEUEING_MODEL64(res); in ipr_init_res_entry()
1200 res->type = cfgtew->u.cfgte64->res_type; in ipr_init_res_entry()
1202 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, in ipr_init_res_entry()
1203 sizeof(res->res_path)); in ipr_init_res_entry()
1205 res->bus = 0; in ipr_init_res_entry()
1206 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, in ipr_init_res_entry()
1207 sizeof(res->dev_lun.scsi_lun)); in ipr_init_res_entry()
1208 res->lun = scsilun_to_int(&res->dev_lun); in ipr_init_res_entry()
1210 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) { in ipr_init_res_entry()
1211 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) { in ipr_init_res_entry()
1212 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) { in ipr_init_res_entry()
1214 res->target = gscsi_res->target; in ipr_init_res_entry()
1219 res->target = find_first_zero_bit(ioa_cfg->target_ids, in ipr_init_res_entry()
1220 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1221 set_bit(res->target, ioa_cfg->target_ids); in ipr_init_res_entry()
1223 } else if (res->type == IPR_RES_TYPE_IOAFP) { in ipr_init_res_entry()
1224 res->bus = IPR_IOAFP_VIRTUAL_BUS; in ipr_init_res_entry()
1225 res->target = 0; in ipr_init_res_entry()
1226 } else if (res->type == IPR_RES_TYPE_ARRAY) { in ipr_init_res_entry()
1227 res->bus = IPR_ARRAY_VIRTUAL_BUS; in ipr_init_res_entry()
1228 res->target = find_first_zero_bit(ioa_cfg->array_ids, in ipr_init_res_entry()
1229 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1230 set_bit(res->target, ioa_cfg->array_ids); in ipr_init_res_entry()
1231 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) { in ipr_init_res_entry()
1232 res->bus = IPR_VSET_VIRTUAL_BUS; in ipr_init_res_entry()
1233 res->target = find_first_zero_bit(ioa_cfg->vset_ids, in ipr_init_res_entry()
1234 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1235 set_bit(res->target, ioa_cfg->vset_ids); in ipr_init_res_entry()
1237 res->target = find_first_zero_bit(ioa_cfg->target_ids, in ipr_init_res_entry()
1238 ioa_cfg->max_devs_supported); in ipr_init_res_entry()
1239 set_bit(res->target, ioa_cfg->target_ids); in ipr_init_res_entry()
1242 proto = cfgtew->u.cfgte->proto; in ipr_init_res_entry()
1243 res->qmodel = IPR_QUEUEING_MODEL(res); in ipr_init_res_entry()
1244 res->flags = cfgtew->u.cfgte->flags; in ipr_init_res_entry()
1245 if (res->flags & IPR_IS_IOA_RESOURCE) in ipr_init_res_entry()
1246 res->type = IPR_RES_TYPE_IOAFP; in ipr_init_res_entry()
1248 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; in ipr_init_res_entry()
1250 res->bus = cfgtew->u.cfgte->res_addr.bus; in ipr_init_res_entry()
1251 res->target = cfgtew->u.cfgte->res_addr.target; in ipr_init_res_entry()
1252 res->lun = cfgtew->u.cfgte->res_addr.lun; in ipr_init_res_entry()
1253 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn); in ipr_init_res_entry()
1260 * ipr_is_same_device - Determine if two devices are the same.
1270 if (res->ioa_cfg->sis64) { in ipr_is_same_device()
1271 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id, in ipr_is_same_device()
1272 sizeof(cfgtew->u.cfgte64->dev_id)) && in ipr_is_same_device()
1273 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, in ipr_is_same_device()
1274 sizeof(cfgtew->u.cfgte64->lun))) { in ipr_is_same_device()
1278 if (res->bus == cfgtew->u.cfgte->res_addr.bus && in ipr_is_same_device()
1279 res->target == cfgtew->u.cfgte->res_addr.target && in ipr_is_same_device()
1280 res->lun == cfgtew->u.cfgte->res_addr.lun) in ipr_is_same_device()
1288 * __ipr_format_res_path - Format the resource path for printing.
1302 p += scnprintf(p, buffer + len - p, "%02X", res_path[0]); in __ipr_format_res_path()
1304 p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]); in __ipr_format_res_path()
1310 * ipr_format_res_path - Format the resource path for printing.
1325 p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no); in ipr_format_res_path()
1326 __ipr_format_res_path(res_path, p, len - (buffer - p)); in ipr_format_res_path()
1331 * ipr_update_res_entry - Update the resource entry.
1345 if (res->ioa_cfg->sis64) { in ipr_update_res_entry()
1346 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags); in ipr_update_res_entry()
1347 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags); in ipr_update_res_entry()
1348 res->type = cfgtew->u.cfgte64->res_type; in ipr_update_res_entry()
1350 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data, in ipr_update_res_entry()
1353 res->qmodel = IPR_QUEUEING_MODEL64(res); in ipr_update_res_entry()
1354 proto = cfgtew->u.cfgte64->proto; in ipr_update_res_entry()
1355 res->res_handle = cfgtew->u.cfgte64->res_handle; in ipr_update_res_entry()
1356 res->dev_id = cfgtew->u.cfgte64->dev_id; in ipr_update_res_entry()
1358 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, in ipr_update_res_entry()
1359 sizeof(res->dev_lun.scsi_lun)); in ipr_update_res_entry()
1361 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path, in ipr_update_res_entry()
1362 sizeof(res->res_path))) { in ipr_update_res_entry()
1363 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, in ipr_update_res_entry()
1364 sizeof(res->res_path)); in ipr_update_res_entry()
1368 if (res->sdev && new_path) in ipr_update_res_entry()
1369 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", in ipr_update_res_entry()
1370 ipr_format_res_path(res->ioa_cfg, in ipr_update_res_entry()
1371 res->res_path, buffer, sizeof(buffer))); in ipr_update_res_entry()
1373 res->flags = cfgtew->u.cfgte->flags; in ipr_update_res_entry()
1374 if (res->flags & IPR_IS_IOA_RESOURCE) in ipr_update_res_entry()
1375 res->type = IPR_RES_TYPE_IOAFP; in ipr_update_res_entry()
1377 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; in ipr_update_res_entry()
1379 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data, in ipr_update_res_entry()
1382 res->qmodel = IPR_QUEUEING_MODEL(res); in ipr_update_res_entry()
1383 proto = cfgtew->u.cfgte->proto; in ipr_update_res_entry()
1384 res->res_handle = cfgtew->u.cfgte->res_handle; in ipr_update_res_entry()
1391 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1401 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; in ipr_clear_res_target()
1403 if (!ioa_cfg->sis64) in ipr_clear_res_target()
1406 if (res->bus == IPR_ARRAY_VIRTUAL_BUS) in ipr_clear_res_target()
1407 clear_bit(res->target, ioa_cfg->array_ids); in ipr_clear_res_target()
1408 else if (res->bus == IPR_VSET_VIRTUAL_BUS) in ipr_clear_res_target()
1409 clear_bit(res->target, ioa_cfg->vset_ids); in ipr_clear_res_target()
1410 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) { in ipr_clear_res_target()
1411 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) in ipr_clear_res_target()
1412 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res) in ipr_clear_res_target()
1414 clear_bit(res->target, ioa_cfg->target_ids); in ipr_clear_res_target()
1416 } else if (res->bus == 0) in ipr_clear_res_target()
1417 clear_bit(res->target, ioa_cfg->target_ids); in ipr_clear_res_target()
1421 * ipr_handle_config_change - Handle a config change from the adapter
1437 if (ioa_cfg->sis64) { in ipr_handle_config_change()
1438 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64; in ipr_handle_config_change()
1439 cc_res_handle = cfgtew.u.cfgte64->res_handle; in ipr_handle_config_change()
1441 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte; in ipr_handle_config_change()
1442 cc_res_handle = cfgtew.u.cfgte->res_handle; in ipr_handle_config_change()
1445 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_handle_config_change()
1446 if (res->res_handle == cc_res_handle) { in ipr_handle_config_change()
1453 if (list_empty(&ioa_cfg->free_res_q)) { in ipr_handle_config_change()
1460 res = list_entry(ioa_cfg->free_res_q.next, in ipr_handle_config_change()
1463 list_del(&res->queue); in ipr_handle_config_change()
1465 list_add_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_handle_config_change()
1470 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { in ipr_handle_config_change()
1471 if (res->sdev) { in ipr_handle_config_change()
1472 res->del_from_ml = 1; in ipr_handle_config_change()
1473 res->res_handle = IPR_INVALID_RES_HANDLE; in ipr_handle_config_change()
1474 schedule_work(&ioa_cfg->work_q); in ipr_handle_config_change()
1477 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_handle_config_change()
1479 } else if (!res->sdev || res->del_from_ml) { in ipr_handle_config_change()
1480 res->add_to_ml = 1; in ipr_handle_config_change()
1481 schedule_work(&ioa_cfg->work_q); in ipr_handle_config_change()
1488 * ipr_process_ccn - Op done function for a CCN.
1499 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_process_ccn()
1500 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; in ipr_process_ccn()
1501 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_process_ccn()
1503 list_del_init(&hostrcb->queue); in ipr_process_ccn()
1504 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_process_ccn()
1509 dev_err(&ioa_cfg->pdev->dev, in ipr_process_ccn()
1519 * strip_whitespace - Strip and pad trailing whitespace.
1531 i--; in strip_whitespace()
1533 i--; in strip_whitespace()
1538 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1553 memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); in ipr_log_vpd_compact()
1556 memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN); in ipr_log_vpd_compact()
1559 memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN); in ipr_log_vpd_compact()
1567 * ipr_log_vpd - Log the passed VPD to the error log.
1578 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); in ipr_log_vpd()
1579 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id, in ipr_log_vpd()
1584 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN); in ipr_log_vpd()
1590 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1601 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd); in ipr_log_ext_vpd_compact()
1603 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1])); in ipr_log_ext_vpd_compact()
1607 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1615 ipr_log_vpd(&vpd->vpd); in ipr_log_ext_vpd()
1616 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]), in ipr_log_ext_vpd()
1617 be32_to_cpu(vpd->wwid[1])); in ipr_log_ext_vpd()
1621 * ipr_log_enhanced_cache_error - Log a cache error.
1633 if (ioa_cfg->sis64) in ipr_log_enhanced_cache_error()
1634 error = &hostrcb->hcam.u.error64.u.type_12_error; in ipr_log_enhanced_cache_error()
1636 error = &hostrcb->hcam.u.error.u.type_12_error; in ipr_log_enhanced_cache_error()
1638 ipr_err("-----Current Configuration-----\n"); in ipr_log_enhanced_cache_error()
1640 ipr_log_ext_vpd(&error->ioa_vpd); in ipr_log_enhanced_cache_error()
1642 ipr_log_ext_vpd(&error->cfc_vpd); in ipr_log_enhanced_cache_error()
1644 ipr_err("-----Expected Configuration-----\n"); in ipr_log_enhanced_cache_error()
1646 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd); in ipr_log_enhanced_cache_error()
1648 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd); in ipr_log_enhanced_cache_error()
1651 be32_to_cpu(error->ioa_data[0]), in ipr_log_enhanced_cache_error()
1652 be32_to_cpu(error->ioa_data[1]), in ipr_log_enhanced_cache_error()
1653 be32_to_cpu(error->ioa_data[2])); in ipr_log_enhanced_cache_error()
1657 * ipr_log_cache_error - Log a cache error.
1668 &hostrcb->hcam.u.error.u.type_02_error; in ipr_log_cache_error()
1670 ipr_err("-----Current Configuration-----\n"); in ipr_log_cache_error()
1672 ipr_log_vpd(&error->ioa_vpd); in ipr_log_cache_error()
1674 ipr_log_vpd(&error->cfc_vpd); in ipr_log_cache_error()
1676 ipr_err("-----Expected Configuration-----\n"); in ipr_log_cache_error()
1678 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd); in ipr_log_cache_error()
1680 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd); in ipr_log_cache_error()
1683 be32_to_cpu(error->ioa_data[0]), in ipr_log_cache_error()
1684 be32_to_cpu(error->ioa_data[1]), in ipr_log_cache_error()
1685 be32_to_cpu(error->ioa_data[2])); in ipr_log_cache_error()
1689 * ipr_log_enhanced_config_error - Log a configuration error.
1703 error = &hostrcb->hcam.u.error.u.type_13_error; in ipr_log_enhanced_config_error()
1704 errors_logged = be32_to_cpu(error->errors_logged); in ipr_log_enhanced_config_error()
1707 be32_to_cpu(error->errors_detected), errors_logged); in ipr_log_enhanced_config_error()
1709 dev_entry = error->dev; in ipr_log_enhanced_config_error()
1714 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); in ipr_log_enhanced_config_error()
1715 ipr_log_ext_vpd(&dev_entry->vpd); in ipr_log_enhanced_config_error()
1717 ipr_err("-----New Device Information-----\n"); in ipr_log_enhanced_config_error()
1718 ipr_log_ext_vpd(&dev_entry->new_vpd); in ipr_log_enhanced_config_error()
1721 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); in ipr_log_enhanced_config_error()
1724 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); in ipr_log_enhanced_config_error()
1729 * ipr_log_sis64_config_error - Log a device error.
1744 error = &hostrcb->hcam.u.error64.u.type_23_error; in ipr_log_sis64_config_error()
1745 errors_logged = be32_to_cpu(error->errors_logged); in ipr_log_sis64_config_error()
1748 be32_to_cpu(error->errors_detected), errors_logged); in ipr_log_sis64_config_error()
1750 dev_entry = error->dev; in ipr_log_sis64_config_error()
1756 __ipr_format_res_path(dev_entry->res_path, in ipr_log_sis64_config_error()
1758 ipr_log_ext_vpd(&dev_entry->vpd); in ipr_log_sis64_config_error()
1760 ipr_err("-----New Device Information-----\n"); in ipr_log_sis64_config_error()
1761 ipr_log_ext_vpd(&dev_entry->new_vpd); in ipr_log_sis64_config_error()
1764 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); in ipr_log_sis64_config_error()
1767 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); in ipr_log_sis64_config_error()
1772 * ipr_log_config_error - Log a configuration error.
1786 error = &hostrcb->hcam.u.error.u.type_03_error; in ipr_log_config_error()
1787 errors_logged = be32_to_cpu(error->errors_logged); in ipr_log_config_error()
1790 be32_to_cpu(error->errors_detected), errors_logged); in ipr_log_config_error()
1792 dev_entry = error->dev; in ipr_log_config_error()
1797 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1); in ipr_log_config_error()
1798 ipr_log_vpd(&dev_entry->vpd); in ipr_log_config_error()
1800 ipr_err("-----New Device Information-----\n"); in ipr_log_config_error()
1801 ipr_log_vpd(&dev_entry->new_vpd); in ipr_log_config_error()
1804 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd); in ipr_log_config_error()
1807 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd); in ipr_log_config_error()
1810 be32_to_cpu(dev_entry->ioa_data[0]), in ipr_log_config_error()
1811 be32_to_cpu(dev_entry->ioa_data[1]), in ipr_log_config_error()
1812 be32_to_cpu(dev_entry->ioa_data[2]), in ipr_log_config_error()
1813 be32_to_cpu(dev_entry->ioa_data[3]), in ipr_log_config_error()
1814 be32_to_cpu(dev_entry->ioa_data[4])); in ipr_log_config_error()
1819 * ipr_log_enhanced_array_error - Log an array configuration error.
1832 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; in ipr_log_enhanced_array_error()
1834 error = &hostrcb->hcam.u.error.u.type_14_error; in ipr_log_enhanced_array_error()
1839 error->protection_level, in ipr_log_enhanced_array_error()
1840 ioa_cfg->host->host_no, in ipr_log_enhanced_array_error()
1841 error->last_func_vset_res_addr.bus, in ipr_log_enhanced_array_error()
1842 error->last_func_vset_res_addr.target, in ipr_log_enhanced_array_error()
1843 error->last_func_vset_res_addr.lun); in ipr_log_enhanced_array_error()
1847 array_entry = error->array_member; in ipr_log_enhanced_array_error()
1848 num_entries = min_t(u32, be32_to_cpu(error->num_entries), in ipr_log_enhanced_array_error()
1849 ARRAY_SIZE(error->array_member)); in ipr_log_enhanced_array_error()
1852 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) in ipr_log_enhanced_array_error()
1855 if (be32_to_cpu(error->exposed_mode_adn) == i) in ipr_log_enhanced_array_error()
1860 ipr_log_ext_vpd(&array_entry->vpd); in ipr_log_enhanced_array_error()
1861 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); in ipr_log_enhanced_array_error()
1862 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, in ipr_log_enhanced_array_error()
1870 * ipr_log_array_error - Log an array configuration error.
1883 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; in ipr_log_array_error()
1885 error = &hostrcb->hcam.u.error.u.type_04_error; in ipr_log_array_error()
1890 error->protection_level, in ipr_log_array_error()
1891 ioa_cfg->host->host_no, in ipr_log_array_error()
1892 error->last_func_vset_res_addr.bus, in ipr_log_array_error()
1893 error->last_func_vset_res_addr.target, in ipr_log_array_error()
1894 error->last_func_vset_res_addr.lun); in ipr_log_array_error()
1898 array_entry = error->array_member; in ipr_log_array_error()
1901 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) in ipr_log_array_error()
1904 if (be32_to_cpu(error->exposed_mode_adn) == i) in ipr_log_array_error()
1909 ipr_log_vpd(&array_entry->vpd); in ipr_log_array_error()
1911 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location"); in ipr_log_array_error()
1912 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr, in ipr_log_array_error()
1918 array_entry = error->array_member2; in ipr_log_array_error()
1925 * ipr_log_hex_data - Log additional hex IOA error data.
1940 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) in ipr_log_hex_data()
1953 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1965 if (ioa_cfg->sis64) in ipr_log_enhanced_dual_ioa_error()
1966 error = &hostrcb->hcam.u.error64.u.type_17_error; in ipr_log_enhanced_dual_ioa_error()
1968 error = &hostrcb->hcam.u.error.u.type_17_error; in ipr_log_enhanced_dual_ioa_error()
1970 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_enhanced_dual_ioa_error()
1971 strim(error->failure_reason); in ipr_log_enhanced_dual_ioa_error()
1973 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, in ipr_log_enhanced_dual_ioa_error()
1974 be32_to_cpu(hostrcb->hcam.u.error.prc)); in ipr_log_enhanced_dual_ioa_error()
1975 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd); in ipr_log_enhanced_dual_ioa_error()
1976 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_enhanced_dual_ioa_error()
1977 be32_to_cpu(hostrcb->hcam.length) - in ipr_log_enhanced_dual_ioa_error()
1983 * ipr_log_dual_ioa_error - Log a dual adapter error.
1995 error = &hostrcb->hcam.u.error.u.type_07_error; in ipr_log_dual_ioa_error()
1996 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_dual_ioa_error()
1997 strim(error->failure_reason); in ipr_log_dual_ioa_error()
1999 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason, in ipr_log_dual_ioa_error()
2000 be32_to_cpu(hostrcb->hcam.u.error.prc)); in ipr_log_dual_ioa_error()
2001 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd); in ipr_log_dual_ioa_error()
2002 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_dual_ioa_error()
2003 be32_to_cpu(hostrcb->hcam.length) - in ipr_log_dual_ioa_error()
2028 * ipr_log_fabric_path - Log a fabric path error
2039 u8 path_state = fabric->path_state; in ipr_log_fabric_path()
2051 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) { in ipr_log_fabric_path()
2054 fabric->ioa_port); in ipr_log_fabric_path()
2055 } else if (fabric->cascaded_expander == 0xff) { in ipr_log_fabric_path()
2058 fabric->ioa_port, fabric->phy); in ipr_log_fabric_path()
2059 } else if (fabric->phy == 0xff) { in ipr_log_fabric_path()
2062 fabric->ioa_port, fabric->cascaded_expander); in ipr_log_fabric_path()
2066 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); in ipr_log_fabric_path()
2073 fabric->ioa_port, fabric->cascaded_expander, fabric->phy); in ipr_log_fabric_path()
2077 * ipr_log64_fabric_path - Log a fabric path error
2088 u8 path_state = fabric->path_state; in ipr_log64_fabric_path()
2103 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_fabric_path()
2104 fabric->res_path, in ipr_log64_fabric_path()
2111 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path, in ipr_log64_fabric_path()
2157 * ipr_log_path_elem - Log a fabric path element.
2168 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; in ipr_log_path_elem()
2169 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; in ipr_log_path_elem()
2185 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2186 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2188 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) { in ipr_log_path_elem()
2191 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2192 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2193 } else if (cfg->cascaded_expander == 0xff) { in ipr_log_path_elem()
2196 path_type_desc[i].desc, cfg->phy, in ipr_log_path_elem()
2197 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2198 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2199 } else if (cfg->phy == 0xff) { in ipr_log_path_elem()
2202 path_type_desc[i].desc, cfg->cascaded_expander, in ipr_log_path_elem()
2203 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2204 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2208 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy, in ipr_log_path_elem()
2209 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2210 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2218 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy, in ipr_log_path_elem()
2219 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log_path_elem()
2220 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log_path_elem()
2224 * ipr_log64_path_elem - Log a fabric path element.
2235 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK; in ipr_log64_path_elem()
2236 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; in ipr_log64_path_elem()
2237 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; in ipr_log64_path_elem()
2253 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_path_elem()
2254 cfg->res_path, buffer, sizeof(buffer)), in ipr_log64_path_elem()
2255 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log64_path_elem()
2256 be32_to_cpu(cfg->wwid[0]), in ipr_log64_path_elem()
2257 be32_to_cpu(cfg->wwid[1])); in ipr_log64_path_elem()
2262 "WWN=%08X%08X\n", cfg->type_status, in ipr_log64_path_elem()
2263 ipr_format_res_path(hostrcb->ioa_cfg, in ipr_log64_path_elem()
2264 cfg->res_path, buffer, sizeof(buffer)), in ipr_log64_path_elem()
2265 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], in ipr_log64_path_elem()
2266 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); in ipr_log64_path_elem()
2270 * ipr_log_fabric_error - Log a fabric error.
2285 error = &hostrcb->hcam.u.error.u.type_20_error; in ipr_log_fabric_error()
2286 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_fabric_error()
2287 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); in ipr_log_fabric_error()
2289 add_len = be32_to_cpu(hostrcb->hcam.length) - in ipr_log_fabric_error()
2293 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { in ipr_log_fabric_error()
2298 add_len -= be16_to_cpu(fabric->length); in ipr_log_fabric_error()
2300 ((unsigned long)fabric + be16_to_cpu(fabric->length)); in ipr_log_fabric_error()
2307 * ipr_log_sis64_array_error - Log a sis64 array error.
2321 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; in ipr_log_sis64_array_error()
2323 error = &hostrcb->hcam.u.error64.u.type_24_error; in ipr_log_sis64_array_error()
2328 error->protection_level, in ipr_log_sis64_array_error()
2329 ipr_format_res_path(ioa_cfg, error->last_res_path, in ipr_log_sis64_array_error()
2334 array_entry = error->array_member; in ipr_log_sis64_array_error()
2335 num_entries = min_t(u32, error->num_entries, in ipr_log_sis64_array_error()
2336 ARRAY_SIZE(error->array_member)); in ipr_log_sis64_array_error()
2340 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) in ipr_log_sis64_array_error()
2343 if (error->exposed_mode_adn == i) in ipr_log_sis64_array_error()
2349 ipr_log_ext_vpd(&array_entry->vpd); in ipr_log_sis64_array_error()
2351 ipr_format_res_path(ioa_cfg, array_entry->res_path, in ipr_log_sis64_array_error()
2355 array_entry->expected_res_path, in ipr_log_sis64_array_error()
2363 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2378 error = &hostrcb->hcam.u.error64.u.type_30_error; in ipr_log_sis64_fabric_error()
2380 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_sis64_fabric_error()
2381 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); in ipr_log_sis64_fabric_error()
2383 add_len = be32_to_cpu(hostrcb->hcam.length) - in ipr_log_sis64_fabric_error()
2387 for (i = 0, fabric = error->desc; i < error->num_entries; i++) { in ipr_log_sis64_fabric_error()
2392 add_len -= be16_to_cpu(fabric->length); in ipr_log_sis64_fabric_error()
2394 ((unsigned long)fabric + be16_to_cpu(fabric->length)); in ipr_log_sis64_fabric_error()
2401 * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2413 error = &hostrcb->hcam.u.error64.u.type_41_error; in ipr_log_sis64_service_required_error()
2415 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; in ipr_log_sis64_service_required_error()
2416 ipr_err("Primary Failure Reason: %s\n", error->failure_reason); in ipr_log_sis64_service_required_error()
2417 ipr_log_hex_data(ioa_cfg, error->data, in ipr_log_sis64_service_required_error()
2418 be32_to_cpu(hostrcb->hcam.length) - in ipr_log_sis64_service_required_error()
2423 * ipr_log_generic_error - Log an adapter error.
2433 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data, in ipr_log_generic_error()
2434 be32_to_cpu(hostrcb->hcam.length)); in ipr_log_generic_error()
2438 * ipr_log_sis64_device_error - Log a cache error.
2451 error = &hostrcb->hcam.u.error64.u.type_21_error; in ipr_log_sis64_device_error()
2453 ipr_err("-----Failing Device Information-----\n"); in ipr_log_sis64_device_error()
2455 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]), in ipr_log_sis64_device_error()
2456 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3])); in ipr_log_sis64_device_error()
2458 __ipr_format_res_path(error->res_path, in ipr_log_sis64_device_error()
2460 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0'; in ipr_log_sis64_device_error()
2461 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0'; in ipr_log_sis64_device_error()
2462 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc); in ipr_log_sis64_device_error()
2463 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc); in ipr_log_sis64_device_error()
2465 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data)); in ipr_log_sis64_device_error()
2467 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb)); in ipr_log_sis64_device_error()
2470 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error)); in ipr_log_sis64_device_error()
2474 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2496 * ipr_handle_log_data - Log an adapter error.
2512 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY) in ipr_handle_log_data()
2515 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) in ipr_handle_log_data()
2516 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); in ipr_handle_log_data()
2518 if (ioa_cfg->sis64) in ipr_handle_log_data()
2519 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); in ipr_handle_log_data()
2521 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); in ipr_handle_log_data()
2523 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET || in ipr_handle_log_data()
2526 scsi_report_bus_reset(ioa_cfg->host, in ipr_handle_log_data()
2527 hostrcb->hcam.u.error.fd_res_addr.bus); in ipr_handle_log_data()
2536 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) { in ipr_handle_log_data()
2537 error = &hostrcb->hcam.u.error64.u.type_21_error; in ipr_handle_log_data()
2539 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST && in ipr_handle_log_data()
2540 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL) in ipr_handle_log_data()
2547 ioa_cfg->errors_logged++; in ipr_handle_log_data()
2549 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam) in ipr_handle_log_data()
2551 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw)) in ipr_handle_log_data()
2552 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw)); in ipr_handle_log_data()
2554 switch (hostrcb->hcam.overlay_id) { in ipr_handle_log_data()
2612 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q, in ipr_get_free_hostrcb()
2616 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers."); in ipr_get_free_hostrcb()
2617 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q, in ipr_get_free_hostrcb()
2621 list_del_init(&hostrcb->queue); in ipr_get_free_hostrcb()
2626 * ipr_process_error - Op done function for an adapter error log.
2638 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_process_error()
2639 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; in ipr_process_error()
2640 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_process_error()
2643 if (ioa_cfg->sis64) in ipr_process_error()
2644 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); in ipr_process_error()
2646 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); in ipr_process_error()
2648 list_del_init(&hostrcb->queue); in ipr_process_error()
2649 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_process_error()
2657 dev_err(&ioa_cfg->pdev->dev, in ipr_process_error()
2661 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q); in ipr_process_error()
2662 schedule_work(&ioa_cfg->work_q); in ipr_process_error()
2669 * ipr_timeout - An internally generated op has timed out.
2682 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_timeout()
2685 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_timeout()
2687 ioa_cfg->errors_logged++; in ipr_timeout()
2688 dev_err(&ioa_cfg->pdev->dev, in ipr_timeout()
2691 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_timeout()
2692 ioa_cfg->sdt_state = GET_DUMP; in ipr_timeout()
2694 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) in ipr_timeout()
2697 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_timeout()
2702 * ipr_oper_timeout - Adapter timed out transitioning to operational
2715 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_oper_timeout()
2718 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_oper_timeout()
2720 ioa_cfg->errors_logged++; in ipr_oper_timeout()
2721 dev_err(&ioa_cfg->pdev->dev, in ipr_oper_timeout()
2724 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_oper_timeout()
2725 ioa_cfg->sdt_state = GET_DUMP; in ipr_oper_timeout()
2727 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) { in ipr_oper_timeout()
2729 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; in ipr_oper_timeout()
2733 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_oper_timeout()
2738 * ipr_find_ses_entry - Find matching SES in SES table
2753 if (ste->compare_product_id_byte[j] == 'X') { in ipr_find_ses_entry()
2754 vpids = &res->std_inq_data.vpids; in ipr_find_ses_entry()
2755 if (vpids->product_id[j] == ste->product_id[j]) in ipr_find_ses_entry()
2771 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2778 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2789 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_get_max_scsi_speed()
2790 if (!(IPR_IS_SES_DEVICE(res->std_inq_data))) in ipr_get_max_scsi_speed()
2793 if (bus != res->bus) in ipr_get_max_scsi_speed()
2799 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8); in ipr_get_max_scsi_speed()
2806 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2808 * @max_delay: max delay in micro-seconds to wait
2822 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_wait_iodbg_ack()
2835 return -EIO; in ipr_wait_iodbg_ack()
2839 * ipr_get_sis64_dump_data_section - Dump IOA memory
2855 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg); in ipr_get_sis64_dump_data_section()
2856 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg)); in ipr_get_sis64_dump_data_section()
2864 * ipr_get_ldump_data_section - Dump IOA memory
2871 * 0 on success / -EIO on failure
2880 if (ioa_cfg->sis64) in ipr_get_ldump_data_section()
2886 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2891 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ldump_data_section()
2893 return -EIO; in ipr_get_ldump_data_section()
2896 /* Signal LDUMP interlocked - clear IO debug ack */ in ipr_get_ldump_data_section()
2898 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2901 writel(start_addr, ioa_cfg->ioa_mailbox); in ipr_get_ldump_data_section()
2903 /* Signal address valid - clear IOA Reset alert */ in ipr_get_ldump_data_section()
2905 ioa_cfg->regs.clr_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2911 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ldump_data_section()
2913 return -EIO; in ipr_get_ldump_data_section()
2917 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox)); in ipr_get_ldump_data_section()
2921 if (i < (length_in_words - 1)) { in ipr_get_ldump_data_section()
2922 /* Signal dump data received - Clear IO debug Ack */ in ipr_get_ldump_data_section()
2924 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2930 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2933 ioa_cfg->regs.clr_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2935 /* Signal dump data received - Clear IO debug Ack */ in ipr_get_ldump_data_section()
2937 ioa_cfg->regs.clr_interrupt_reg); in ipr_get_ldump_data_section()
2939 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ in ipr_get_ldump_data_section()
2942 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); in ipr_get_ldump_data_section()
2956 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2973 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; in ipr_sdt_copy()
2975 if (ioa_cfg->sis64) in ipr_sdt_copy()
2981 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) { in ipr_sdt_copy()
2982 if (ioa_dump->page_offset >= PAGE_SIZE || in ipr_sdt_copy()
2983 ioa_dump->page_offset == 0) { in ipr_sdt_copy()
2991 ioa_dump->page_offset = 0; in ipr_sdt_copy()
2992 ioa_dump->ioa_data[ioa_dump->next_page_index] = page; in ipr_sdt_copy()
2993 ioa_dump->next_page_index++; in ipr_sdt_copy()
2995 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1]; in ipr_sdt_copy()
2997 rem_len = length - bytes_copied; in ipr_sdt_copy()
2998 rem_page_len = PAGE_SIZE - ioa_dump->page_offset; in ipr_sdt_copy()
3001 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sdt_copy()
3002 if (ioa_cfg->sdt_state == ABORT_DUMP) { in ipr_sdt_copy()
3003 rc = -EIO; in ipr_sdt_copy()
3007 &page[ioa_dump->page_offset / 4], in ipr_sdt_copy()
3010 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sdt_copy()
3013 ioa_dump->page_offset += cur_len; in ipr_sdt_copy()
3026 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3034 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER; in ipr_init_dump_entry_hdr()
3035 hdr->num_elems = 1; in ipr_init_dump_entry_hdr()
3036 hdr->offset = sizeof(*hdr); in ipr_init_dump_entry_hdr()
3037 hdr->status = IPR_DUMP_STATUS_SUCCESS; in ipr_init_dump_entry_hdr()
3041 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3051 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_dump_ioa_type_data()
3053 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr); in ipr_dump_ioa_type_data()
3054 driver_dump->ioa_type_entry.hdr.len = in ipr_dump_ioa_type_data()
3055 sizeof(struct ipr_dump_ioa_type_entry) - in ipr_dump_ioa_type_data()
3057 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; in ipr_dump_ioa_type_data()
3058 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID; in ipr_dump_ioa_type_data()
3059 driver_dump->ioa_type_entry.type = ioa_cfg->type; in ipr_dump_ioa_type_data()
3060 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) | in ipr_dump_ioa_type_data()
3061 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) | in ipr_dump_ioa_type_data()
3062 ucode_vpd->minor_release[1]; in ipr_dump_ioa_type_data()
3063 driver_dump->hdr.num_entries++; in ipr_dump_ioa_type_data()
3067 * ipr_dump_version_data - Fill in the driver version in the dump.
3077 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr); in ipr_dump_version_data()
3078 driver_dump->version_entry.hdr.len = in ipr_dump_version_data()
3079 sizeof(struct ipr_dump_version_entry) - in ipr_dump_version_data()
3081 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; in ipr_dump_version_data()
3082 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID; in ipr_dump_version_data()
3083 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION); in ipr_dump_version_data()
3084 driver_dump->hdr.num_entries++; in ipr_dump_version_data()
3088 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3098 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr); in ipr_dump_trace_data()
3099 driver_dump->trace_entry.hdr.len = in ipr_dump_trace_data()
3100 sizeof(struct ipr_dump_trace_entry) - in ipr_dump_trace_data()
3102 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; in ipr_dump_trace_data()
3103 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID; in ipr_dump_trace_data()
3104 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); in ipr_dump_trace_data()
3105 driver_dump->hdr.num_entries++; in ipr_dump_trace_data()
3109 * ipr_dump_location_data - Fill in the IOA location in the dump.
3119 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr); in ipr_dump_location_data()
3120 driver_dump->location_entry.hdr.len = in ipr_dump_location_data()
3121 sizeof(struct ipr_dump_location_entry) - in ipr_dump_location_data()
3123 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; in ipr_dump_location_data()
3124 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID; in ipr_dump_location_data()
3125 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev)); in ipr_dump_location_data()
3126 driver_dump->hdr.num_entries++; in ipr_dump_location_data()
3130 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3141 struct ipr_driver_dump *driver_dump = &dump->driver_dump; in ipr_get_ioa_dump()
3142 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; in ipr_get_ioa_dump()
3151 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3153 if (ioa_cfg->sdt_state != READ_DUMP) { in ipr_get_ioa_dump()
3154 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3158 if (ioa_cfg->sis64) { in ipr_get_ioa_dump()
3159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3161 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3164 start_addr = readl(ioa_cfg->ioa_mailbox); in ipr_get_ioa_dump()
3166 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { in ipr_get_ioa_dump()
3167 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ioa_dump()
3169 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3173 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n"); in ipr_get_ioa_dump()
3175 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER; in ipr_get_ioa_dump()
3178 driver_dump->hdr.len = sizeof(struct ipr_driver_dump); in ipr_get_ioa_dump()
3179 driver_dump->hdr.num_entries = 1; in ipr_get_ioa_dump()
3180 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header); in ipr_get_ioa_dump()
3181 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS; in ipr_get_ioa_dump()
3182 driver_dump->hdr.os = IPR_DUMP_OS_LINUX; in ipr_get_ioa_dump()
3183 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME; in ipr_get_ioa_dump()
3191 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header); in ipr_get_ioa_dump()
3194 ipr_init_dump_entry_hdr(&ioa_dump->hdr); in ipr_get_ioa_dump()
3195 ioa_dump->hdr.len = 0; in ipr_get_ioa_dump()
3196 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; in ipr_get_ioa_dump()
3197 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; in ipr_get_ioa_dump()
3203 sdt = &ioa_dump->sdt; in ipr_get_ioa_dump()
3205 if (ioa_cfg->sis64) { in ipr_get_ioa_dump()
3218 /* Smart Dump table is ready to use and the first entry is valid */ in ipr_get_ioa_dump()
3219 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && in ipr_get_ioa_dump()
3220 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { in ipr_get_ioa_dump()
3221 dev_err(&ioa_cfg->pdev->dev, in ipr_get_ioa_dump()
3223 rc, be32_to_cpu(sdt->hdr.state)); in ipr_get_ioa_dump()
3224 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED; in ipr_get_ioa_dump()
3225 ioa_cfg->sdt_state = DUMP_OBTAINED; in ipr_get_ioa_dump()
3226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3230 num_entries = be32_to_cpu(sdt->hdr.num_entries_used); in ipr_get_ioa_dump()
3236 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header); in ipr_get_ioa_dump()
3237 if (ioa_cfg->sis64) in ipr_get_ioa_dump()
3238 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry); in ipr_get_ioa_dump()
3240 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry); in ipr_get_ioa_dump()
3242 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_get_ioa_dump()
3245 if (ioa_dump->hdr.len > max_dump_size) { in ipr_get_ioa_dump()
3246 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; in ipr_get_ioa_dump()
3250 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { in ipr_get_ioa_dump()
3251 sdt_word = be32_to_cpu(sdt->entry[i].start_token); in ipr_get_ioa_dump()
3252 if (ioa_cfg->sis64) in ipr_get_ioa_dump()
3253 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token); in ipr_get_ioa_dump()
3256 end_off = be32_to_cpu(sdt->entry[i].end_token); in ipr_get_ioa_dump()
3259 bytes_to_copy = end_off - start_off; in ipr_get_ioa_dump()
3265 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; in ipr_get_ioa_dump()
3273 ioa_dump->hdr.len += bytes_copied; in ipr_get_ioa_dump()
3276 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; in ipr_get_ioa_dump()
3283 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n"); in ipr_get_ioa_dump()
3286 driver_dump->hdr.len += ioa_dump->hdr.len; in ipr_get_ioa_dump()
3288 ioa_cfg->sdt_state = DUMP_OBTAINED; in ipr_get_ioa_dump()
3297 * ipr_release_dump - Free adapter dump memory
3306 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; in ipr_release_dump()
3311 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_release_dump()
3312 ioa_cfg->dump = NULL; in ipr_release_dump()
3313 ioa_cfg->sdt_state = INACTIVE; in ipr_release_dump()
3314 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_release_dump()
3316 for (i = 0; i < dump->ioa_dump.next_page_index; i++) in ipr_release_dump()
3317 free_page((unsigned long) dump->ioa_dump.ioa_data[i]); in ipr_release_dump()
3319 vfree(dump->ioa_dump.ioa_data); in ipr_release_dump()
3335 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3340 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { in ipr_add_remove_thread()
3341 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3345 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_add_remove_thread()
3346 if (res->del_from_ml && res->sdev) { in ipr_add_remove_thread()
3348 sdev = res->sdev; in ipr_add_remove_thread()
3350 if (!res->add_to_ml) in ipr_add_remove_thread()
3351 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_add_remove_thread()
3353 res->del_from_ml = 0; in ipr_add_remove_thread()
3354 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3357 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3364 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_add_remove_thread()
3365 if (res->add_to_ml) { in ipr_add_remove_thread()
3366 bus = res->bus; in ipr_add_remove_thread()
3367 target = res->target; in ipr_add_remove_thread()
3368 lun = res->lun; in ipr_add_remove_thread()
3369 res->add_to_ml = 0; in ipr_add_remove_thread()
3370 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3371 scsi_add_device(ioa_cfg->host, bus, target, lun); in ipr_add_remove_thread()
3372 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3377 ioa_cfg->scan_done = 1; in ipr_add_remove_thread()
3378 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_add_remove_thread()
3379 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); in ipr_add_remove_thread()
3384 * ipr_worker_thread - Worker thread
3388 * of adding and removing device from the mid-layer as configuration
3402 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3404 if (ioa_cfg->sdt_state == READ_DUMP) { in ipr_worker_thread()
3405 dump = ioa_cfg->dump; in ipr_worker_thread()
3407 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3410 kref_get(&dump->kref); in ipr_worker_thread()
3411 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3413 kref_put(&dump->kref, ipr_release_dump); in ipr_worker_thread()
3415 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3416 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout) in ipr_worker_thread()
3418 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3422 if (ioa_cfg->scsi_unblock) { in ipr_worker_thread()
3423 ioa_cfg->scsi_unblock = 0; in ipr_worker_thread()
3424 ioa_cfg->scsi_blocked = 0; in ipr_worker_thread()
3425 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3426 scsi_unblock_requests(ioa_cfg->host); in ipr_worker_thread()
3427 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3428 if (ioa_cfg->scsi_blocked) in ipr_worker_thread()
3429 scsi_block_requests(ioa_cfg->host); in ipr_worker_thread()
3432 if (!ioa_cfg->scan_enabled) { in ipr_worker_thread()
3433 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3437 schedule_work(&ioa_cfg->scsi_add_work_q); in ipr_worker_thread()
3439 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_worker_thread()
3445 * ipr_read_trace - Dump the adapter trace
3462 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_trace()
3466 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_trace()
3467 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace, in ipr_read_trace()
3469 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_trace()
3485 * ipr_show_fw_version - Show the firmware version
3497 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_fw_version()
3498 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_show_fw_version()
3502 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_version()
3504 ucode_vpd->major_release, ucode_vpd->card_type, in ipr_show_fw_version()
3505 ucode_vpd->minor_release[0], in ipr_show_fw_version()
3506 ucode_vpd->minor_release[1]); in ipr_show_fw_version()
3507 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_version()
3520 * ipr_show_log_level - Show the adapter's error logging level
3532 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_log_level()
3536 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_log_level()
3537 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level); in ipr_show_log_level()
3538 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_log_level()
3543 * ipr_store_log_level - Change the adapter's error logging level
3557 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_log_level()
3560 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_log_level()
3561 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10); in ipr_store_log_level()
3562 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_log_level()
3576 * ipr_store_diagnostics - IOA Diagnostics interface
3593 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_diagnostics()
3598 return -EACCES; in ipr_store_diagnostics()
3600 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3601 while (ioa_cfg->in_reset_reload) { in ipr_store_diagnostics()
3602 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3603 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_diagnostics()
3604 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3607 ioa_cfg->errors_logged = 0; in ipr_store_diagnostics()
3610 if (ioa_cfg->in_reset_reload) { in ipr_store_diagnostics()
3611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3612 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_diagnostics()
3617 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3618 return -EIO; in ipr_store_diagnostics()
3621 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3622 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged) in ipr_store_diagnostics()
3623 rc = -EIO; in ipr_store_diagnostics()
3624 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_diagnostics()
3638 * ipr_show_adapter_state - Show the adapter's state
3650 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_adapter_state()
3654 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_state()
3655 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_show_adapter_state()
3659 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_state()
3664 * ipr_store_adapter_state - Change adapter state
3680 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_adapter_state()
3685 return -EACCES; in ipr_store_adapter_state()
3687 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_adapter_state()
3688 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && in ipr_store_adapter_state()
3690 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_store_adapter_state()
3691 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3692 ioa_cfg->hrrq[i].ioa_is_dead = 0; in ipr_store_adapter_state()
3693 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_store_adapter_state()
3696 ioa_cfg->reset_retries = 0; in ipr_store_adapter_state()
3697 ioa_cfg->in_ioa_bringdown = 0; in ipr_store_adapter_state()
3700 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_adapter_state()
3701 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_adapter_state()
3716 * ipr_store_reset_adapter - Reset the adapter
3732 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_reset_adapter()
3737 return -EACCES; in ipr_store_reset_adapter()
3739 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_reset_adapter()
3740 if (!ioa_cfg->in_reset_reload) in ipr_store_reset_adapter()
3742 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_reset_adapter()
3743 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_store_reset_adapter()
3758 * ipr_show_iopoll_weight - Show ipr polling mode
3770 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_iopoll_weight()
3774 spin_lock_irqsave(shost->host_lock, lock_flags); in ipr_show_iopoll_weight()
3775 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight); in ipr_show_iopoll_weight()
3776 spin_unlock_irqrestore(shost->host_lock, lock_flags); in ipr_show_iopoll_weight()
3782 * ipr_store_iopoll_weight - Change the adapter's polling mode
3796 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_iopoll_weight()
3801 if (!ioa_cfg->sis64) { in ipr_store_iopoll_weight()
3802 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n"); in ipr_store_iopoll_weight()
3803 return -EINVAL; in ipr_store_iopoll_weight()
3806 return -EINVAL; in ipr_store_iopoll_weight()
3809 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n"); in ipr_store_iopoll_weight()
3810 return -EINVAL; in ipr_store_iopoll_weight()
3813 if (user_iopoll_weight == ioa_cfg->iopoll_weight) { in ipr_store_iopoll_weight()
3814 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n"); in ipr_store_iopoll_weight()
3818 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_store_iopoll_weight()
3819 for (i = 1; i < ioa_cfg->hrrq_num; i++) in ipr_store_iopoll_weight()
3820 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_store_iopoll_weight()
3823 spin_lock_irqsave(shost->host_lock, lock_flags); in ipr_store_iopoll_weight()
3824 ioa_cfg->iopoll_weight = user_iopoll_weight; in ipr_store_iopoll_weight()
3825 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_store_iopoll_weight()
3826 for (i = 1; i < ioa_cfg->hrrq_num; i++) { in ipr_store_iopoll_weight()
3827 irq_poll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_store_iopoll_weight()
3828 ioa_cfg->iopoll_weight, ipr_iopoll); in ipr_store_iopoll_weight()
3831 spin_unlock_irqrestore(shost->host_lock, lock_flags); in ipr_store_iopoll_weight()
3846 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3850 * list to use for microcode download
3861 sg_size = buf_len / (IPR_MAX_SGLIST - 1); in ipr_alloc_ucode_buffer()
3872 sglist->order = order; in ipr_alloc_ucode_buffer()
3873 sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL, in ipr_alloc_ucode_buffer()
3874 &sglist->num_sg); in ipr_alloc_ucode_buffer()
3875 if (!sglist->scatterlist) { in ipr_alloc_ucode_buffer()
3884 * ipr_free_ucode_buffer - Frees a microcode download buffer
3895 sgl_free_order(sglist->scatterlist, sglist->order); in ipr_free_ucode_buffer()
3900 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3919 bsize_elem = PAGE_SIZE * (1 << sglist->order); in ipr_copy_ucode_buffer()
3921 sg = sglist->scatterlist; in ipr_copy_ucode_buffer()
3931 sg->length = bsize_elem; in ipr_copy_ucode_buffer()
3946 sg->length = len % bsize_elem; in ipr_copy_ucode_buffer()
3949 sglist->buffer_len = len; in ipr_copy_ucode_buffer()
3954 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3964 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ucode_ioadl64()
3965 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; in ipr_build_ucode_ioadl64()
3966 struct scatterlist *scatterlist = sglist->scatterlist; in ipr_build_ucode_ioadl64()
3970 ipr_cmd->dma_use_sg = sglist->num_dma_sg; in ipr_build_ucode_ioadl64()
3971 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ucode_ioadl64()
3972 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); in ipr_build_ucode_ioadl64()
3974 ioarcb->ioadl_len = in ipr_build_ucode_ioadl64()
3975 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); in ipr_build_ucode_ioadl64()
3976 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ucode_ioadl64()
3982 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ucode_ioadl64()
3986 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3996 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ucode_ioadl()
3997 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_build_ucode_ioadl()
3998 struct scatterlist *scatterlist = sglist->scatterlist; in ipr_build_ucode_ioadl()
4002 ipr_cmd->dma_use_sg = sglist->num_dma_sg; in ipr_build_ucode_ioadl()
4003 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ucode_ioadl()
4004 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); in ipr_build_ucode_ioadl()
4006 ioarcb->ioadl_len = in ipr_build_ucode_ioadl()
4007 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ucode_ioadl()
4009 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ucode_ioadl()
4016 ioadl[i-1].flags_and_data_len |= in ipr_build_ucode_ioadl()
4021 * ipr_update_ioa_ucode - Update IOA's microcode
4028 * 0 on success / -EIO on failure
4035 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4036 while (ioa_cfg->in_reset_reload) { in ipr_update_ioa_ucode()
4037 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4038 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_update_ioa_ucode()
4039 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4042 if (ioa_cfg->ucode_sglist) { in ipr_update_ioa_ucode()
4043 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4044 dev_err(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
4046 return -EIO; in ipr_update_ioa_ucode()
4049 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
4050 sglist->scatterlist, sglist->num_sg, in ipr_update_ioa_ucode()
4053 if (!sglist->num_dma_sg) { in ipr_update_ioa_ucode()
4054 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4055 dev_err(&ioa_cfg->pdev->dev, in ipr_update_ioa_ucode()
4057 return -EIO; in ipr_update_ioa_ucode()
4060 ioa_cfg->ucode_sglist = sglist; in ipr_update_ioa_ucode()
4062 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4063 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_update_ioa_ucode()
4065 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4066 ioa_cfg->ucode_sglist = NULL; in ipr_update_ioa_ucode()
4067 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_update_ioa_ucode()
4072 * ipr_store_update_fw - Update the firmware on the adapter
4088 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_store_update_fw()
4098 return -EACCES; in ipr_store_update_fw()
4106 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { in ipr_store_update_fw()
4107 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); in ipr_store_update_fw()
4108 return -EIO; in ipr_store_update_fw()
4111 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data; in ipr_store_update_fw()
4113 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length); in ipr_store_update_fw()
4114 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length); in ipr_store_update_fw()
4118 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n"); in ipr_store_update_fw()
4120 return -ENOMEM; in ipr_store_update_fw()
4126 dev_err(&ioa_cfg->pdev->dev, in ipr_store_update_fw()
4152 * ipr_show_fw_type - Show the adapter's firmware type.
4164 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_show_fw_type()
4168 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_type()
4169 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64); in ipr_show_fw_type()
4170 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_fw_type()
4188 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_async_err_log()
4193 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4194 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, in ipr_read_async_err_log()
4197 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4200 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam, in ipr_read_async_err_log()
4201 sizeof(hostrcb->hcam)); in ipr_read_async_err_log()
4202 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_async_err_log()
4212 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_next_async_err_log()
4216 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4217 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q, in ipr_next_async_err_log()
4220 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4225 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_next_async_err_log()
4226 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_next_async_err_log()
4254 * ipr_read_dump - Dump the adapter
4271 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_read_dump()
4279 return -EACCES; in ipr_read_dump()
4281 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4282 dump = ioa_cfg->dump; in ipr_read_dump()
4284 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) { in ipr_read_dump()
4285 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4288 kref_get(&dump->kref); in ipr_read_dump()
4289 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_read_dump()
4291 if (off > dump->driver_dump.hdr.len) { in ipr_read_dump()
4292 kref_put(&dump->kref, ipr_release_dump); in ipr_read_dump()
4296 if (off + count > dump->driver_dump.hdr.len) { in ipr_read_dump()
4297 count = dump->driver_dump.hdr.len - off; in ipr_read_dump()
4301 if (count && off < sizeof(dump->driver_dump)) { in ipr_read_dump()
4302 if (off + count > sizeof(dump->driver_dump)) in ipr_read_dump()
4303 len = sizeof(dump->driver_dump) - off; in ipr_read_dump()
4306 src = (u8 *)&dump->driver_dump + off; in ipr_read_dump()
4310 count -= len; in ipr_read_dump()
4313 off -= sizeof(dump->driver_dump); in ipr_read_dump()
4315 if (ioa_cfg->sis64) in ipr_read_dump()
4317 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) * in ipr_read_dump()
4325 len = sdt_end - off; in ipr_read_dump()
4328 src = (u8 *)&dump->ioa_dump + off; in ipr_read_dump()
4332 count -= len; in ipr_read_dump()
4335 off -= sdt_end; in ipr_read_dump()
4339 len = PAGE_ALIGN(off) - off; in ipr_read_dump()
4342 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT]; in ipr_read_dump()
4347 count -= len; in ipr_read_dump()
4350 kref_put(&dump->kref, ipr_release_dump); in ipr_read_dump()
4355 * ipr_alloc_dump - Prepare for adapter dump
4371 return -ENOMEM; in ipr_alloc_dump()
4374 if (ioa_cfg->sis64) in ipr_alloc_dump()
4384 return -ENOMEM; in ipr_alloc_dump()
4387 dump->ioa_dump.ioa_data = ioa_data; in ipr_alloc_dump()
4389 kref_init(&dump->kref); in ipr_alloc_dump()
4390 dump->ioa_cfg = ioa_cfg; in ipr_alloc_dump()
4392 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4394 if (INACTIVE != ioa_cfg->sdt_state) { in ipr_alloc_dump()
4395 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4396 vfree(dump->ioa_dump.ioa_data); in ipr_alloc_dump()
4401 ioa_cfg->dump = dump; in ipr_alloc_dump()
4402 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_alloc_dump()
4403 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) { in ipr_alloc_dump()
4404 ioa_cfg->dump_taken = 1; in ipr_alloc_dump()
4405 schedule_work(&ioa_cfg->work_q); in ipr_alloc_dump()
4407 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_alloc_dump()
4413 * ipr_free_dump - Free adapter dump memory
4426 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4427 dump = ioa_cfg->dump; in ipr_free_dump()
4429 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4433 ioa_cfg->dump = NULL; in ipr_free_dump()
4434 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_free_dump()
4436 kref_put(&dump->kref, ipr_release_dump); in ipr_free_dump()
4443 * ipr_write_dump - Setup dump state of adapter
4460 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_write_dump()
4464 return -EACCES; in ipr_write_dump()
4471 return -EINVAL; in ipr_write_dump()
4493 * ipr_change_queue_depth - Change the device's queue depth
4502 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_change_queue_depth()
4506 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_change_queue_depth()
4507 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_change_queue_depth()
4511 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_change_queue_depth()
4514 return sdev->queue_depth; in ipr_change_queue_depth()
4518 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4529 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_adapter_handle()
4532 ssize_t len = -ENXIO; in ipr_show_adapter_handle()
4534 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_handle()
4535 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_adapter_handle()
4537 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle); in ipr_show_adapter_handle()
4538 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_adapter_handle()
4551 * ipr_show_resource_path - Show the resource path or the resource address for
4563 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_resource_path()
4566 ssize_t len = -ENXIO; in ipr_show_resource_path()
4569 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_path()
4570 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_resource_path()
4571 if (res && ioa_cfg->sis64) in ipr_show_resource_path()
4573 __ipr_format_res_path(res->res_path, buffer, in ipr_show_resource_path()
4576 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no, in ipr_show_resource_path()
4577 res->bus, res->target, res->lun); in ipr_show_resource_path()
4579 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_path()
4592 * ipr_show_device_id - Show the device_id for this device.
4603 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_device_id()
4606 ssize_t len = -ENXIO; in ipr_show_device_id()
4608 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_device_id()
4609 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_device_id()
4610 if (res && ioa_cfg->sis64) in ipr_show_device_id()
4611 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id)); in ipr_show_device_id()
4613 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn); in ipr_show_device_id()
4615 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_device_id()
4628 * ipr_show_resource_type - Show the resource type for this device.
4639 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_resource_type()
4642 ssize_t len = -ENXIO; in ipr_show_resource_type()
4644 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_type()
4645 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_resource_type()
4648 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type); in ipr_show_resource_type()
4650 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_resource_type()
4663 * ipr_show_raw_mode - Show the adapter's raw mode
4675 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_show_raw_mode()
4680 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_show_raw_mode()
4681 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_show_raw_mode()
4683 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode); in ipr_show_raw_mode()
4685 len = -ENXIO; in ipr_show_raw_mode()
4686 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_show_raw_mode()
4691 * ipr_store_raw_mode - Change the adapter's raw mode
4705 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; in ipr_store_raw_mode()
4710 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_store_raw_mode()
4711 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_store_raw_mode()
4714 res->raw_mode = simple_strtoul(buf, NULL, 10); in ipr_store_raw_mode()
4716 if (res->sdev) in ipr_store_raw_mode()
4717 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n", in ipr_store_raw_mode()
4718 res->raw_mode ? "enabled" : "disabled"); in ipr_store_raw_mode()
4720 len = -EINVAL; in ipr_store_raw_mode()
4722 len = -ENXIO; in ipr_store_raw_mode()
4723 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_store_raw_mode()
4746 * ipr_biosparam - Return the HSC mapping
4781 * ipr_find_starget - Find target based on bus/target.
4789 struct Scsi_Host *shost = dev_to_shost(&starget->dev); in ipr_find_starget()
4790 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_find_starget()
4793 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_find_starget()
4794 if ((res->bus == starget->channel) && in ipr_find_starget()
4795 (res->target == starget->id)) { in ipr_find_starget()
4806 * ipr_target_alloc - Prepare for commands to a SCSI target
4813 * 0 on success / non-0 on failure
4817 struct Scsi_Host *shost = dev_to_shost(&starget->dev); in ipr_target_alloc()
4818 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_target_alloc()
4824 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4826 starget->hostdata = NULL; in ipr_target_alloc()
4829 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4832 return -ENOMEM; in ipr_target_alloc()
4834 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost); in ipr_target_alloc()
4836 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4837 sata_port->ioa_cfg = ioa_cfg; in ipr_target_alloc()
4838 sata_port->ap = ap; in ipr_target_alloc()
4839 sata_port->res = res; in ipr_target_alloc()
4841 res->sata_port = sata_port; in ipr_target_alloc()
4842 ap->private_data = sata_port; in ipr_target_alloc()
4843 starget->hostdata = sata_port; in ipr_target_alloc()
4846 return -ENOMEM; in ipr_target_alloc()
4849 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_target_alloc()
4855 * ipr_target_destroy - Destroy a SCSI target
4864 struct ipr_sata_port *sata_port = starget->hostdata; in ipr_target_destroy()
4865 struct Scsi_Host *shost = dev_to_shost(&starget->dev); in ipr_target_destroy()
4866 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_target_destroy()
4868 if (ioa_cfg->sis64) { in ipr_target_destroy()
4870 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) in ipr_target_destroy()
4871 clear_bit(starget->id, ioa_cfg->array_ids); in ipr_target_destroy()
4872 else if (starget->channel == IPR_VSET_VIRTUAL_BUS) in ipr_target_destroy()
4873 clear_bit(starget->id, ioa_cfg->vset_ids); in ipr_target_destroy()
4874 else if (starget->channel == 0) in ipr_target_destroy()
4875 clear_bit(starget->id, ioa_cfg->target_ids); in ipr_target_destroy()
4880 starget->hostdata = NULL; in ipr_target_destroy()
4881 ata_sas_port_destroy(sata_port->ap); in ipr_target_destroy()
4887 * ipr_find_sdev - Find device based on bus/target/lun.
4895 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_find_sdev()
4898 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_find_sdev()
4899 if ((res->bus == sdev->channel) && in ipr_find_sdev()
4900 (res->target == sdev->id) && in ipr_find_sdev()
4901 (res->lun == sdev->lun)) in ipr_find_sdev()
4909 * ipr_slave_destroy - Unconfigure a SCSI device
4921 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_destroy()
4923 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_destroy()
4924 res = (struct ipr_resource_entry *) sdev->hostdata; in ipr_slave_destroy()
4926 if (res->sata_port) in ipr_slave_destroy()
4927 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE; in ipr_slave_destroy()
4928 sdev->hostdata = NULL; in ipr_slave_destroy()
4929 res->sdev = NULL; in ipr_slave_destroy()
4930 res->sata_port = NULL; in ipr_slave_destroy()
4932 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_destroy()
4936 * ipr_slave_configure - Configure a SCSI device
4946 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_configure()
4952 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4953 res = sdev->hostdata; in ipr_slave_configure()
4956 sdev->type = TYPE_RAID; in ipr_slave_configure()
4958 sdev->scsi_level = 4; in ipr_slave_configure()
4959 sdev->no_uld_attach = 1; in ipr_slave_configure()
4962 sdev->scsi_level = SCSI_SPC_3; in ipr_slave_configure()
4963 sdev->no_report_opcodes = 1; in ipr_slave_configure()
4964 blk_queue_rq_timeout(sdev->request_queue, in ipr_slave_configure()
4966 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); in ipr_slave_configure()
4968 if (ipr_is_gata(res) && res->sata_port) in ipr_slave_configure()
4969 ap = res->sata_port->ap; in ipr_slave_configure()
4970 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4977 if (ioa_cfg->sis64) in ipr_slave_configure()
4980 res->res_path, buffer, sizeof(buffer))); in ipr_slave_configure()
4983 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_configure()
4988 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
5000 int rc = -ENXIO; in ipr_ata_slave_alloc()
5003 if (sdev->sdev_target) in ipr_ata_slave_alloc()
5004 sata_port = sdev->sdev_target->hostdata; in ipr_ata_slave_alloc()
5006 rc = ata_sas_port_init(sata_port->ap); in ipr_ata_slave_alloc()
5008 rc = ata_sas_sync_probe(sata_port->ap); in ipr_ata_slave_alloc()
5019 * ipr_slave_alloc - Prepare for commands to a device.
5024 * can then use this pointer in ipr_queuecommand when
5028 * 0 on success / -ENXIO if device does not exist
5032 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; in ipr_slave_alloc()
5035 int rc = -ENXIO; in ipr_slave_alloc()
5037 sdev->hostdata = NULL; in ipr_slave_alloc()
5039 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
5043 res->sdev = sdev; in ipr_slave_alloc()
5044 res->add_to_ml = 0; in ipr_slave_alloc()
5045 res->in_erp = 0; in ipr_slave_alloc()
5046 sdev->hostdata = res; in ipr_slave_alloc()
5048 res->needs_sync_complete = 1; in ipr_slave_alloc()
5051 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
5056 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_slave_alloc()
5062 * ipr_match_lun - Match function for specified LUN
5071 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device) in ipr_match_lun()
5077 * ipr_cmnd_is_free - Check if a command is free or not
5087 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) { in ipr_cmnd_is_free()
5096 * ipr_match_res - Match function for specified resource entry
5107 if (res && ipr_cmd->ioarcb.res_handle == res->res_handle) in ipr_match_res()
5113 * ipr_wait_for_ops - Wait for matching commands to complete
5116 * @match: match function to use
5136 spin_lock_irqsave(hrrq->lock, flags); in ipr_wait_for_ops()
5137 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in ipr_wait_for_ops()
5138 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; in ipr_wait_for_ops()
5141 ipr_cmd->eh_comp = &comp; in ipr_wait_for_ops()
5146 spin_unlock_irqrestore(hrrq->lock, flags); in ipr_wait_for_ops()
5156 spin_lock_irqsave(hrrq->lock, flags); in ipr_wait_for_ops()
5157 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in ipr_wait_for_ops()
5158 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; in ipr_wait_for_ops()
5161 ipr_cmd->eh_comp = NULL; in ipr_wait_for_ops()
5166 spin_unlock_irqrestore(hrrq->lock, flags); in ipr_wait_for_ops()
5170 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); in ipr_wait_for_ops()
5188 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; in ipr_eh_host_reset()
5189 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5191 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
5193 dev_err(&ioa_cfg->pdev->dev, in ipr_eh_host_reset()
5196 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_eh_host_reset()
5197 ioa_cfg->sdt_state = GET_DUMP; in ipr_eh_host_reset()
5200 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5201 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_eh_host_reset()
5202 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5206 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_eh_host_reset()
5211 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_eh_host_reset()
5217 * ipr_device_reset - Reset the device
5228 * 0 on success / non-zero on failure
5241 ioarcb = &ipr_cmd->ioarcb; in ipr_device_reset()
5242 cmd_pkt = &ioarcb->cmd_pkt; in ipr_device_reset()
5244 if (ipr_cmd->ioa_cfg->sis64) { in ipr_device_reset()
5245 regs = &ipr_cmd->i.ata_ioadl.regs; in ipr_device_reset()
5246 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); in ipr_device_reset()
5248 regs = &ioarcb->u.add_data.u.regs; in ipr_device_reset()
5250 ioarcb->res_handle = res->res_handle; in ipr_device_reset()
5251 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_device_reset()
5252 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; in ipr_device_reset()
5254 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET; in ipr_device_reset()
5255 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags)); in ipr_device_reset()
5256 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; in ipr_device_reset()
5260 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_device_reset()
5261 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_device_reset()
5262 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) { in ipr_device_reset()
5263 if (ipr_cmd->ioa_cfg->sis64) in ipr_device_reset()
5264 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, in ipr_device_reset()
5267 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, in ipr_device_reset()
5272 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0; in ipr_device_reset()
5276 * ipr_sata_reset - Reset the SATA port
5284 * 0 on success / non-zero on failure
5289 struct ipr_sata_port *sata_port = link->ap->private_data; in ipr_sata_reset()
5290 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_sata_reset()
5293 int rc = -ENXIO, ret; in ipr_sata_reset()
5296 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5297 while (ioa_cfg->in_reset_reload) { in ipr_sata_reset()
5298 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5299 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_sata_reset()
5300 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5303 res = sata_port->res; in ipr_sata_reset()
5306 *classes = res->ata_class; in ipr_sata_reset()
5307 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5311 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5313 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5315 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_sata_reset()
5318 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_sata_reset()
5325 * ipr_eh_dev_reset - Reset the device
5345 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; in __ipr_eh_dev_reset()
5346 res = scsi_cmd->device->hostdata; in __ipr_eh_dev_reset()
5350 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the in __ipr_eh_dev_reset()
5353 if (ioa_cfg->in_reset_reload) in __ipr_eh_dev_reset()
5355 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in __ipr_eh_dev_reset()
5359 spin_lock(&hrrq->_lock); in __ipr_eh_dev_reset()
5360 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in __ipr_eh_dev_reset()
5361 ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; in __ipr_eh_dev_reset()
5363 if (ipr_cmd->ioarcb.res_handle == res->res_handle) { in __ipr_eh_dev_reset()
5364 if (!ipr_cmd->qc) in __ipr_eh_dev_reset()
5369 ipr_cmd->done = ipr_sata_eh_done; in __ipr_eh_dev_reset()
5370 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) { in __ipr_eh_dev_reset()
5371 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT; in __ipr_eh_dev_reset()
5372 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED; in __ipr_eh_dev_reset()
5376 spin_unlock(&hrrq->_lock); in __ipr_eh_dev_reset()
5378 res->resetting_device = 1; in __ipr_eh_dev_reset()
5381 if (ipr_is_gata(res) && res->sata_port) { in __ipr_eh_dev_reset()
5382 ap = res->sata_port->ap; in __ipr_eh_dev_reset()
5383 spin_unlock_irq(scsi_cmd->device->host->host_lock); in __ipr_eh_dev_reset()
5385 spin_lock_irq(scsi_cmd->device->host->host_lock); in __ipr_eh_dev_reset()
5388 res->resetting_device = 0; in __ipr_eh_dev_reset()
5389 res->reset_occurred = 1; in __ipr_eh_dev_reset()
5401 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; in ipr_eh_dev_reset()
5402 res = cmd->device->hostdata; in ipr_eh_dev_reset()
5407 spin_lock_irq(cmd->device->host->host_lock); in ipr_eh_dev_reset()
5409 spin_unlock_irq(cmd->device->host->host_lock); in ipr_eh_dev_reset()
5412 if (ipr_is_gata(res) && res->sata_port) in ipr_eh_dev_reset()
5415 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); in ipr_eh_dev_reset()
5422 * ipr_bus_reset_done - Op done function for bus reset.
5432 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_bus_reset_done()
5436 if (!ioa_cfg->sis64) in ipr_bus_reset_done()
5437 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_bus_reset_done()
5438 if (res->res_handle == ipr_cmd->ioarcb.res_handle) { in ipr_bus_reset_done()
5439 scsi_report_bus_reset(ioa_cfg->host, res->bus); in ipr_bus_reset_done()
5448 if (ipr_cmd->sibling->sibling) in ipr_bus_reset_done()
5449 ipr_cmd->sibling->sibling = NULL; in ipr_bus_reset_done()
5451 ipr_cmd->sibling->done(ipr_cmd->sibling); in ipr_bus_reset_done()
5453 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_bus_reset_done()
5458 * ipr_abort_timeout - An abort task has timed out
5472 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_abort_timeout()
5477 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5478 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) { in ipr_abort_timeout()
5479 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5483 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n"); in ipr_abort_timeout()
5485 ipr_cmd->sibling = reset_cmd; in ipr_abort_timeout()
5486 reset_cmd->sibling = ipr_cmd; in ipr_abort_timeout()
5487 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle; in ipr_abort_timeout()
5488 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt; in ipr_abort_timeout()
5489 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_abort_timeout()
5490 cmd_pkt->cdb[0] = IPR_RESET_DEVICE; in ipr_abort_timeout()
5491 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET; in ipr_abort_timeout()
5494 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_abort_timeout()
5499 * ipr_cancel_op - Cancel specified op
5518 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; in ipr_cancel_op()
5519 res = scsi_cmd->device->hostdata; in ipr_cancel_op()
5522 * This will force the mid-layer to call ipr_eh_host_reset, in ipr_cancel_op()
5525 if (ioa_cfg->in_reset_reload || in ipr_cancel_op()
5526 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_cancel_op()
5536 readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_cancel_op()
5542 spin_lock(&hrrq->_lock); in ipr_cancel_op()
5543 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { in ipr_cancel_op()
5544 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) { in ipr_cancel_op()
5545 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) { in ipr_cancel_op()
5551 spin_unlock(&hrrq->_lock); in ipr_cancel_op()
5558 ipr_cmd->ioarcb.res_handle = res->res_handle; in ipr_cancel_op()
5559 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in ipr_cancel_op()
5560 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_cancel_op()
5561 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; in ipr_cancel_op()
5562 ipr_cmd->u.sdev = scsi_cmd->device; in ipr_cancel_op()
5565 scsi_cmd->cmnd[0]); in ipr_cancel_op()
5567 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_cancel_op()
5578 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_cancel_op()
5580 res->needs_sync_complete = 1; in ipr_cancel_op()
5587 * ipr_eh_abort - Abort a single op
5597 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; in ipr_scan_finished()
5600 spin_lock_irqsave(shost->host_lock, lock_flags); in ipr_scan_finished()
5601 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done) in ipr_scan_finished()
5603 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2)) in ipr_scan_finished()
5605 spin_unlock_irqrestore(shost->host_lock, lock_flags); in ipr_scan_finished()
5610 * ipr_eh_host_reset - Reset the host adapter
5624 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; in ipr_eh_abort()
5626 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); in ipr_eh_abort()
5628 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); in ipr_eh_abort()
5631 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); in ipr_eh_abort()
5637 * ipr_handle_other_interrupt - Handle "other" interrupts
5650 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); in ipr_handle_other_interrupt()
5657 if (ioa_cfg->sis64) { in ipr_handle_other_interrupt()
5658 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_handle_other_interrupt()
5659 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; in ipr_handle_other_interrupt()
5663 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); in ipr_handle_other_interrupt()
5664 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; in ipr_handle_other_interrupt()
5665 list_del(&ioa_cfg->reset_cmd->queue); in ipr_handle_other_interrupt()
5666 del_timer(&ioa_cfg->reset_cmd->timer); in ipr_handle_other_interrupt()
5667 ipr_reset_ioa_job(ioa_cfg->reset_cmd); in ipr_handle_other_interrupt()
5677 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_handle_other_interrupt()
5678 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_handle_other_interrupt()
5680 list_del(&ioa_cfg->reset_cmd->queue); in ipr_handle_other_interrupt()
5681 del_timer(&ioa_cfg->reset_cmd->timer); in ipr_handle_other_interrupt()
5682 ipr_reset_ioa_job(ioa_cfg->reset_cmd); in ipr_handle_other_interrupt()
5684 if (ioa_cfg->clear_isr) { in ipr_handle_other_interrupt()
5686 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5688 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); in ipr_handle_other_interrupt()
5689 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_handle_other_interrupt()
5694 ioa_cfg->ioa_unit_checked = 1; in ipr_handle_other_interrupt()
5696 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5699 dev_err(&ioa_cfg->pdev->dev, in ipr_handle_other_interrupt()
5702 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_handle_other_interrupt()
5703 ioa_cfg->sdt_state = GET_DUMP; in ipr_handle_other_interrupt()
5713 * ipr_isr_eh - Interrupt service routine error handler
5723 ioa_cfg->errors_logged++; in ipr_isr_eh()
5724 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number); in ipr_isr_eh()
5726 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) in ipr_isr_eh()
5727 ioa_cfg->sdt_state = GET_DUMP; in ipr_isr_eh()
5738 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg; in ipr_process_hrrq()
5742 if (!hrr_queue->allow_interrupts) in ipr_process_hrrq()
5745 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_process_hrrq()
5746 hrr_queue->toggle_bit) { in ipr_process_hrrq()
5748 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) & in ipr_process_hrrq()
5752 if (unlikely(cmd_index > hrr_queue->max_cmd_id || in ipr_process_hrrq()
5753 cmd_index < hrr_queue->min_cmd_id)) { in ipr_process_hrrq()
5760 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; in ipr_process_hrrq()
5761 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_process_hrrq()
5765 list_move_tail(&ipr_cmd->queue, doneq); in ipr_process_hrrq()
5767 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) { in ipr_process_hrrq()
5768 hrr_queue->hrrq_curr++; in ipr_process_hrrq()
5770 hrr_queue->hrrq_curr = hrr_queue->hrrq_start; in ipr_process_hrrq()
5771 hrr_queue->toggle_bit ^= 1u; in ipr_process_hrrq()
5791 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_iopoll()
5796 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_iopoll()
5799 list_del(&ipr_cmd->queue); in ipr_iopoll()
5800 del_timer(&ipr_cmd->timer); in ipr_iopoll()
5801 ipr_cmd->fast_done(ipr_cmd); in ipr_iopoll()
5808 * ipr_isr - Interrupt service routine
5818 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr()
5827 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_isr()
5829 if (!hrrq->allow_interrupts) { in ipr_isr()
5830 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr()
5835 if (ipr_process_hrrq(hrrq, -1, &doneq)) { in ipr_isr()
5838 if (!ioa_cfg->clear_isr) in ipr_isr()
5845 ioa_cfg->regs.clr_interrupt_reg32); in ipr_isr()
5846 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_isr()
5851 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_isr()
5866 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr()
5868 list_del(&ipr_cmd->queue); in ipr_isr()
5869 del_timer(&ipr_cmd->timer); in ipr_isr()
5870 ipr_cmd->fast_done(ipr_cmd); in ipr_isr()
5876 * ipr_isr_mhrrq - Interrupt service routine
5886 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; in ipr_isr_mhrrq()
5892 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5895 if (!hrrq->allow_interrupts) { in ipr_isr_mhrrq()
5896 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5900 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_isr_mhrrq()
5901 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_isr_mhrrq()
5902 hrrq->toggle_bit) { in ipr_isr_mhrrq()
5903 irq_poll_sched(&hrrq->iopoll); in ipr_isr_mhrrq()
5904 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5908 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == in ipr_isr_mhrrq()
5909 hrrq->toggle_bit) in ipr_isr_mhrrq()
5911 if (ipr_process_hrrq(hrrq, -1, &doneq)) in ipr_isr_mhrrq()
5915 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_isr_mhrrq()
5918 list_del(&ipr_cmd->queue); in ipr_isr_mhrrq()
5919 del_timer(&ipr_cmd->timer); in ipr_isr_mhrrq()
5920 ipr_cmd->fast_done(ipr_cmd); in ipr_isr_mhrrq()
5926 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5931 * 0 on success / -1 on failure
5940 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_build_ioadl64()
5941 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ioadl64()
5942 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; in ipr_build_ioadl64()
5951 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); in ipr_build_ioadl64()
5952 return -1; in ipr_build_ioadl64()
5955 ipr_cmd->dma_use_sg = nseg; in ipr_build_ioadl64()
5957 ioarcb->data_transfer_length = cpu_to_be32(length); in ipr_build_ioadl64()
5958 ioarcb->ioadl_len = in ipr_build_ioadl64()
5959 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); in ipr_build_ioadl64()
5961 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { in ipr_build_ioadl64()
5963 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ioadl64()
5964 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) in ipr_build_ioadl64()
5967 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ioadl64()
5973 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ioadl64()
5978 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5983 * 0 on success / -1 on failure
5992 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_build_ioadl()
5993 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ioadl()
5994 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_build_ioadl()
6002 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n"); in ipr_build_ioadl()
6003 return -1; in ipr_build_ioadl()
6006 ipr_cmd->dma_use_sg = nseg; in ipr_build_ioadl()
6008 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { in ipr_build_ioadl()
6010 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ioadl()
6011 ioarcb->data_transfer_length = cpu_to_be32(length); in ipr_build_ioadl()
6012 ioarcb->ioadl_len = in ipr_build_ioadl()
6013 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ioadl()
6014 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { in ipr_build_ioadl()
6016 ioarcb->read_data_transfer_length = cpu_to_be32(length); in ipr_build_ioadl()
6017 ioarcb->read_ioadl_len = in ipr_build_ioadl()
6018 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ioadl()
6021 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) { in ipr_build_ioadl()
6022 ioadl = ioarcb->u.add_data.u.ioadl; in ipr_build_ioadl()
6023 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) + in ipr_build_ioadl()
6025 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_build_ioadl()
6028 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { in ipr_build_ioadl()
6034 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ioadl()
6039 * __ipr_erp_done - Process completion of ERP for a device
6050 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in __ipr_erp_done()
6051 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; in __ipr_erp_done()
6052 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in __ipr_erp_done()
6055 scsi_cmd->result |= (DID_ERROR << 16); in __ipr_erp_done()
6059 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer, in __ipr_erp_done()
6065 res->needs_sync_complete = 1; in __ipr_erp_done()
6066 res->in_erp = 0; in __ipr_erp_done()
6068 scsi_dma_unmap(ipr_cmd->scsi_cmd); in __ipr_erp_done()
6069 scsi_cmd->scsi_done(scsi_cmd); in __ipr_erp_done()
6070 if (ipr_cmd->eh_comp) in __ipr_erp_done()
6071 complete(ipr_cmd->eh_comp); in __ipr_erp_done()
6072 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in __ipr_erp_done()
6076 * ipr_erp_done - Process completion of ERP for a device
6087 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_erp_done()
6090 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_erp_done()
6092 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_erp_done()
6096 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6104 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_reinit_ipr_cmnd_for_erp()
6105 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_reinit_ipr_cmnd_for_erp()
6106 dma_addr_t dma_addr = ipr_cmd->dma_addr; in ipr_reinit_ipr_cmnd_for_erp()
6108 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); in ipr_reinit_ipr_cmnd_for_erp()
6109 ioarcb->data_transfer_length = 0; in ipr_reinit_ipr_cmnd_for_erp()
6110 ioarcb->read_data_transfer_length = 0; in ipr_reinit_ipr_cmnd_for_erp()
6111 ioarcb->ioadl_len = 0; in ipr_reinit_ipr_cmnd_for_erp()
6112 ioarcb->read_ioadl_len = 0; in ipr_reinit_ipr_cmnd_for_erp()
6113 ioasa->hdr.ioasc = 0; in ipr_reinit_ipr_cmnd_for_erp()
6114 ioasa->hdr.residual_data_len = 0; in ipr_reinit_ipr_cmnd_for_erp()
6116 if (ipr_cmd->ioa_cfg->sis64) in ipr_reinit_ipr_cmnd_for_erp()
6117 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_reinit_ipr_cmnd_for_erp()
6120 ioarcb->write_ioadl_addr = in ipr_reinit_ipr_cmnd_for_erp()
6122 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_reinit_ipr_cmnd_for_erp()
6127 * __ipr_erp_request_sense - Send request sense to a device
6138 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in __ipr_erp_request_sense()
6139 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in __ipr_erp_request_sense()
6148 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB; in __ipr_erp_request_sense()
6149 cmd_pkt->cdb[0] = REQUEST_SENSE; in __ipr_erp_request_sense()
6150 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE; in __ipr_erp_request_sense()
6151 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE; in __ipr_erp_request_sense()
6152 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in __ipr_erp_request_sense()
6153 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); in __ipr_erp_request_sense()
6155 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma, in __ipr_erp_request_sense()
6163 * ipr_erp_request_sense - Send request sense to a device
6174 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; in ipr_erp_request_sense()
6177 spin_lock_irqsave(&hrrq->_lock, hrrq_flags); in ipr_erp_request_sense()
6179 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); in ipr_erp_request_sense()
6183 * ipr_erp_cancel_all - Send cancel all to a device
6196 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_erp_cancel_all()
6197 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; in ipr_erp_cancel_all()
6200 res->in_erp = 1; in ipr_erp_cancel_all()
6204 if (!scsi_cmd->device->simple_tags) { in ipr_erp_cancel_all()
6209 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in ipr_erp_cancel_all()
6210 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_erp_cancel_all()
6211 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; in ipr_erp_cancel_all()
6218 * ipr_dump_ioasa - Dump contents of IOASA
6236 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_dump_ioasa()
6240 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK; in ipr_dump_ioasa()
6241 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK; in ipr_dump_ioasa()
6246 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) in ipr_dump_ioasa()
6254 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { in ipr_dump_ioasa()
6256 if (ioasa->hdr.ilid != 0) in ipr_dump_ioasa()
6268 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len); in ipr_dump_ioasa()
6269 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) in ipr_dump_ioasa()
6271 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) in ipr_dump_ioasa()
6286 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6295 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; in ipr_gen_sense()
6296 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; in ipr_gen_sense()
6297 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_gen_sense()
6298 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc); in ipr_gen_sense()
6305 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION; in ipr_gen_sense()
6309 ioasa->u.vset.failing_lba_hi != 0) { in ipr_gen_sense()
6320 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi); in ipr_gen_sense()
6327 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); in ipr_gen_sense()
6341 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) { in ipr_gen_sense()
6352 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff; in ipr_gen_sense()
6355 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff; in ipr_gen_sense()
6359 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); in ipr_gen_sense()
6361 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba); in ipr_gen_sense()
6376 * ipr_get_autosense - Copy autosense data to sense buffer
6387 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; in ipr_get_autosense()
6388 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; in ipr_get_autosense()
6390 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) in ipr_get_autosense()
6393 if (ipr_cmd->ioa_cfg->sis64) in ipr_get_autosense()
6394 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data, in ipr_get_autosense()
6395 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len), in ipr_get_autosense()
6398 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, in ipr_get_autosense()
6399 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), in ipr_get_autosense()
6405 * ipr_erp_start - Process an error response for a SCSI op
6418 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_erp_start()
6419 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; in ipr_erp_start()
6420 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_erp_start()
6436 scsi_cmd->result |= (DID_ABORT << 16); in ipr_erp_start()
6438 scsi_cmd->result |= (DID_IMM_RETRY << 16); in ipr_erp_start()
6442 scsi_cmd->result |= (DID_NO_CONNECT << 16); in ipr_erp_start()
6445 scsi_cmd->result |= (DID_NO_CONNECT << 16); in ipr_erp_start()
6447 res->needs_sync_complete = 1; in ipr_erp_start()
6450 if (!res->in_erp) in ipr_erp_start()
6451 res->needs_sync_complete = 1; in ipr_erp_start()
6452 scsi_cmd->result |= (DID_IMM_RETRY << 16); in ipr_erp_start()
6458 * so SCSI mid-layer and upper layers handle it accordingly. in ipr_erp_start()
6460 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION) in ipr_erp_start()
6461 scsi_cmd->result |= (DID_PASSTHROUGH << 16); in ipr_erp_start()
6469 if (!res->resetting_device) in ipr_erp_start()
6470 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); in ipr_erp_start()
6471 scsi_cmd->result |= (DID_ERROR << 16); in ipr_erp_start()
6473 res->needs_sync_complete = 1; in ipr_erp_start()
6476 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc); in ipr_erp_start()
6486 res->needs_sync_complete = 1; in ipr_erp_start()
6491 if (res->raw_mode) { in ipr_erp_start()
6492 res->raw_mode = 0; in ipr_erp_start()
6493 scsi_cmd->result |= (DID_IMM_RETRY << 16); in ipr_erp_start()
6495 scsi_cmd->result |= (DID_ERROR << 16); in ipr_erp_start()
6499 scsi_cmd->result |= (DID_ERROR << 16); in ipr_erp_start()
6501 res->needs_sync_complete = 1; in ipr_erp_start()
6505 scsi_dma_unmap(ipr_cmd->scsi_cmd); in ipr_erp_start()
6506 scsi_cmd->scsi_done(scsi_cmd); in ipr_erp_start()
6507 if (ipr_cmd->eh_comp) in ipr_erp_start()
6508 complete(ipr_cmd->eh_comp); in ipr_erp_start()
6509 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_erp_start()
6513 * ipr_scsi_done - mid-layer done function
6517 * ops generated by the SCSI mid-layer
6524 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_scsi_done()
6525 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; in ipr_scsi_done()
6526 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_scsi_done()
6529 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); in ipr_scsi_done()
6534 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); in ipr_scsi_done()
6535 scsi_cmd->scsi_done(scsi_cmd); in ipr_scsi_done()
6536 if (ipr_cmd->eh_comp) in ipr_scsi_done()
6537 complete(ipr_cmd->eh_comp); in ipr_scsi_done()
6538 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_scsi_done()
6539 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); in ipr_scsi_done()
6541 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_scsi_done()
6542 spin_lock(&ipr_cmd->hrrq->_lock); in ipr_scsi_done()
6544 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_scsi_done()
6545 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_scsi_done()
6550 * ipr_queuecommand - Queue a mid-layer request
6554 * This function queues a request generated by the mid-layer.
6573 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; in ipr_queuecommand()
6575 scsi_cmd->result = (DID_OK << 16); in ipr_queuecommand()
6576 res = scsi_cmd->device->hostdata; in ipr_queuecommand()
6578 if (ipr_is_gata(res) && res->sata_port) { in ipr_queuecommand()
6579 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_queuecommand()
6580 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap); in ipr_queuecommand()
6581 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_queuecommand()
6586 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_queuecommand()
6588 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6594 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) { in ipr_queuecommand()
6595 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6600 * FIXME - Create scsi_set_host_offline interface in ipr_queuecommand()
6603 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) { in ipr_queuecommand()
6604 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6610 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6613 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6616 ioarcb = &ipr_cmd->ioarcb; in ipr_queuecommand()
6618 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); in ipr_queuecommand()
6619 ipr_cmd->scsi_cmd = scsi_cmd; in ipr_queuecommand()
6620 ipr_cmd->done = ipr_scsi_eh_done; in ipr_queuecommand()
6623 if (scsi_cmd->underflow == 0) in ipr_queuecommand()
6624 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in ipr_queuecommand()
6626 if (res->reset_occurred) { in ipr_queuecommand()
6627 res->reset_occurred = 0; in ipr_queuecommand()
6628 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; in ipr_queuecommand()
6633 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; in ipr_queuecommand()
6635 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; in ipr_queuecommand()
6636 if (scsi_cmd->flags & SCMD_TAGGED) in ipr_queuecommand()
6637 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK; in ipr_queuecommand()
6639 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK; in ipr_queuecommand()
6642 if (scsi_cmd->cmnd[0] >= 0xC0 && in ipr_queuecommand()
6643 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) { in ipr_queuecommand()
6644 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_queuecommand()
6646 if (res->raw_mode && ipr_is_af_dasd_device(res)) { in ipr_queuecommand()
6647 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE; in ipr_queuecommand()
6649 if (scsi_cmd->underflow == 0) in ipr_queuecommand()
6650 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in ipr_queuecommand()
6653 if (ioa_cfg->sis64) in ipr_queuecommand()
6658 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6659 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) { in ipr_queuecommand()
6660 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); in ipr_queuecommand()
6661 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6667 if (unlikely(hrrq->ioa_is_dead)) { in ipr_queuecommand()
6668 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); in ipr_queuecommand()
6669 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6674 ioarcb->res_handle = res->res_handle; in ipr_queuecommand()
6675 if (res->needs_sync_complete) { in ipr_queuecommand()
6676 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; in ipr_queuecommand()
6677 res->needs_sync_complete = 0; in ipr_queuecommand()
6679 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q); in ipr_queuecommand()
6682 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6686 spin_lock_irqsave(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6687 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); in ipr_queuecommand()
6688 scsi_cmd->result = (DID_NO_CONNECT << 16); in ipr_queuecommand()
6689 scsi_cmd->scsi_done(scsi_cmd); in ipr_queuecommand()
6690 spin_unlock_irqrestore(hrrq->lock, hrrq_flags); in ipr_queuecommand()
6695 * ipr_ioctl - IOCTL handler
6708 res = (struct ipr_resource_entry *)sdev->hostdata; in ipr_ioctl()
6711 return -ENOTTY; in ipr_ioctl()
6712 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg); in ipr_ioctl()
6715 return -EINVAL; in ipr_ioctl()
6719 * ipr_info - Get information about the card/driver
6731 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata; in ipr_ioa_info()
6733 spin_lock_irqsave(host->host_lock, lock_flags); in ipr_ioa_info()
6734 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type); in ipr_ioa_info()
6735 spin_unlock_irqrestore(host->host_lock, lock_flags); in ipr_ioa_info()
6762 .this_id = -1,
6772 * ipr_ata_phy_reset - libata phy_reset handler
6779 struct ipr_sata_port *sata_port = ap->private_data; in ipr_ata_phy_reset()
6780 struct ipr_resource_entry *res = sata_port->res; in ipr_ata_phy_reset()
6781 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_ata_phy_reset()
6785 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6786 while (ioa_cfg->in_reset_reload) { in ipr_ata_phy_reset()
6787 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6788 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_ata_phy_reset()
6789 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6792 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) in ipr_ata_phy_reset()
6798 ap->link.device[0].class = ATA_DEV_NONE; in ipr_ata_phy_reset()
6802 ap->link.device[0].class = res->ata_class; in ipr_ata_phy_reset()
6803 if (ap->link.device[0].class == ATA_DEV_UNKNOWN) in ipr_ata_phy_reset()
6804 ap->link.device[0].class = ATA_DEV_NONE; in ipr_ata_phy_reset()
6807 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_phy_reset()
6812 * ipr_ata_post_internal - Cleanup after an internal command
6820 struct ipr_sata_port *sata_port = qc->ap->private_data; in ipr_ata_post_internal()
6821 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_ata_post_internal()
6826 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6827 while (ioa_cfg->in_reset_reload) { in ipr_ata_post_internal()
6828 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6829 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_ata_post_internal()
6830 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6834 spin_lock(&hrrq->_lock); in ipr_ata_post_internal()
6835 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_ata_post_internal()
6836 if (ipr_cmd->qc == qc) { in ipr_ata_post_internal()
6837 ipr_device_reset(ioa_cfg, sata_port->res); in ipr_ata_post_internal()
6841 spin_unlock(&hrrq->_lock); in ipr_ata_post_internal()
6843 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_ata_post_internal()
6847 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6857 regs->feature = tf->feature; in ipr_copy_sata_tf()
6858 regs->nsect = tf->nsect; in ipr_copy_sata_tf()
6859 regs->lbal = tf->lbal; in ipr_copy_sata_tf()
6860 regs->lbam = tf->lbam; in ipr_copy_sata_tf()
6861 regs->lbah = tf->lbah; in ipr_copy_sata_tf()
6862 regs->device = tf->device; in ipr_copy_sata_tf()
6863 regs->command = tf->command; in ipr_copy_sata_tf()
6864 regs->hob_feature = tf->hob_feature; in ipr_copy_sata_tf()
6865 regs->hob_nsect = tf->hob_nsect; in ipr_copy_sata_tf()
6866 regs->hob_lbal = tf->hob_lbal; in ipr_copy_sata_tf()
6867 regs->hob_lbam = tf->hob_lbam; in ipr_copy_sata_tf()
6868 regs->hob_lbah = tf->hob_lbah; in ipr_copy_sata_tf()
6869 regs->ctl = tf->ctl; in ipr_copy_sata_tf()
6873 * ipr_sata_done - done function for SATA commands
6877 * ops generated by the SCSI mid-layer to SATA devices
6884 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_sata_done()
6885 struct ata_queued_cmd *qc = ipr_cmd->qc; in ipr_sata_done()
6886 struct ipr_sata_port *sata_port = qc->ap->private_data; in ipr_sata_done()
6887 struct ipr_resource_entry *res = sata_port->res; in ipr_sata_done()
6888 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_sata_done()
6890 spin_lock(&ipr_cmd->hrrq->_lock); in ipr_sata_done()
6891 if (ipr_cmd->ioa_cfg->sis64) in ipr_sata_done()
6892 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, in ipr_sata_done()
6895 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, in ipr_sata_done()
6899 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) in ipr_sata_done()
6900 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); in ipr_sata_done()
6903 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status); in ipr_sata_done()
6905 qc->err_mask |= ac_err_mask(sata_port->ioasa.status); in ipr_sata_done()
6906 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_sata_done()
6907 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_sata_done()
6912 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6921 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ata_ioadl64()
6922 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64; in ipr_build_ata_ioadl64()
6924 int len = qc->nbytes; in ipr_build_ata_ioadl64()
6927 dma_addr_t dma_addr = ipr_cmd->dma_addr; in ipr_build_ata_ioadl64()
6932 if (qc->dma_dir == DMA_TO_DEVICE) { in ipr_build_ata_ioadl64()
6934 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ata_ioadl64()
6935 } else if (qc->dma_dir == DMA_FROM_DEVICE) in ipr_build_ata_ioadl64()
6938 ioarcb->data_transfer_length = cpu_to_be32(len); in ipr_build_ata_ioadl64()
6939 ioarcb->ioadl_len = in ipr_build_ata_ioadl64()
6940 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); in ipr_build_ata_ioadl64()
6941 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_build_ata_ioadl64()
6944 for_each_sg(qc->sg, sg, qc->n_elem, si) { in ipr_build_ata_ioadl64()
6945 ioadl64->flags = cpu_to_be32(ioadl_flags); in ipr_build_ata_ioadl64()
6946 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg)); in ipr_build_ata_ioadl64()
6947 ioadl64->address = cpu_to_be64(sg_dma_address(sg)); in ipr_build_ata_ioadl64()
6954 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ata_ioadl64()
6958 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6967 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ata_ioadl()
6968 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; in ipr_build_ata_ioadl()
6970 int len = qc->nbytes; in ipr_build_ata_ioadl()
6977 if (qc->dma_dir == DMA_TO_DEVICE) { in ipr_build_ata_ioadl()
6979 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_ata_ioadl()
6980 ioarcb->data_transfer_length = cpu_to_be32(len); in ipr_build_ata_ioadl()
6981 ioarcb->ioadl_len = in ipr_build_ata_ioadl()
6982 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ata_ioadl()
6983 } else if (qc->dma_dir == DMA_FROM_DEVICE) { in ipr_build_ata_ioadl()
6985 ioarcb->read_data_transfer_length = cpu_to_be32(len); in ipr_build_ata_ioadl()
6986 ioarcb->read_ioadl_len = in ipr_build_ata_ioadl()
6987 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); in ipr_build_ata_ioadl()
6990 for_each_sg(qc->sg, sg, qc->n_elem, si) { in ipr_build_ata_ioadl()
6991 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg)); in ipr_build_ata_ioadl()
6992 ioadl->address = cpu_to_be32(sg_dma_address(sg)); in ipr_build_ata_ioadl()
6999 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); in ipr_build_ata_ioadl()
7003 * ipr_qc_defer - Get a free ipr_cmd
7011 struct ata_port *ap = qc->ap; in ipr_qc_defer()
7012 struct ipr_sata_port *sata_port = ap->private_data; in ipr_qc_defer()
7013 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_qc_defer()
7019 hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_qc_defer()
7021 qc->lldd_task = NULL; in ipr_qc_defer()
7022 spin_lock(&hrrq->_lock); in ipr_qc_defer()
7023 if (unlikely(hrrq->ioa_is_dead)) { in ipr_qc_defer()
7024 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
7028 if (unlikely(!hrrq->allow_cmds)) { in ipr_qc_defer()
7029 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
7035 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
7039 qc->lldd_task = ipr_cmd; in ipr_qc_defer()
7040 spin_unlock(&hrrq->_lock); in ipr_qc_defer()
7045 * ipr_qc_issue - Issue a SATA qc to a device
7053 struct ata_port *ap = qc->ap; in ipr_qc_issue()
7054 struct ipr_sata_port *sata_port = ap->private_data; in ipr_qc_issue()
7055 struct ipr_resource_entry *res = sata_port->res; in ipr_qc_issue()
7056 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; in ipr_qc_issue()
7061 if (qc->lldd_task == NULL) in ipr_qc_issue()
7064 ipr_cmd = qc->lldd_task; in ipr_qc_issue()
7068 qc->lldd_task = NULL; in ipr_qc_issue()
7069 spin_lock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
7070 if (unlikely(!ipr_cmd->hrrq->allow_cmds || in ipr_qc_issue()
7071 ipr_cmd->hrrq->ioa_is_dead)) { in ipr_qc_issue()
7072 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_qc_issue()
7073 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
7078 ioarcb = &ipr_cmd->ioarcb; in ipr_qc_issue()
7080 if (ioa_cfg->sis64) { in ipr_qc_issue()
7081 regs = &ipr_cmd->i.ata_ioadl.regs; in ipr_qc_issue()
7082 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); in ipr_qc_issue()
7084 regs = &ioarcb->u.add_data.u.regs; in ipr_qc_issue()
7087 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs)); in ipr_qc_issue()
7089 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_qc_issue()
7090 ipr_cmd->qc = qc; in ipr_qc_issue()
7091 ipr_cmd->done = ipr_sata_done; in ipr_qc_issue()
7092 ipr_cmd->ioarcb.res_handle = res->res_handle; in ipr_qc_issue()
7093 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; in ipr_qc_issue()
7094 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; in ipr_qc_issue()
7095 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; in ipr_qc_issue()
7096 ipr_cmd->dma_use_sg = qc->n_elem; in ipr_qc_issue()
7098 if (ioa_cfg->sis64) in ipr_qc_issue()
7103 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; in ipr_qc_issue()
7104 ipr_copy_sata_tf(regs, &qc->tf); in ipr_qc_issue()
7105 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); in ipr_qc_issue()
7108 switch (qc->tf.protocol) { in ipr_qc_issue()
7114 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; in ipr_qc_issue()
7119 regs->flags |= IPR_ATA_FLAG_PACKET_CMD; in ipr_qc_issue()
7123 regs->flags |= IPR_ATA_FLAG_PACKET_CMD; in ipr_qc_issue()
7124 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA; in ipr_qc_issue()
7129 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
7134 spin_unlock(&ipr_cmd->hrrq->_lock); in ipr_qc_issue()
7140 * ipr_qc_fill_rtf - Read result TF
7148 struct ipr_sata_port *sata_port = qc->ap->private_data; in ipr_qc_fill_rtf()
7149 struct ipr_ioasa_gata *g = &sata_port->ioasa; in ipr_qc_fill_rtf()
7150 struct ata_taskfile *tf = &qc->result_tf; in ipr_qc_fill_rtf()
7152 tf->feature = g->error; in ipr_qc_fill_rtf()
7153 tf->nsect = g->nsect; in ipr_qc_fill_rtf()
7154 tf->lbal = g->lbal; in ipr_qc_fill_rtf()
7155 tf->lbam = g->lbam; in ipr_qc_fill_rtf()
7156 tf->lbah = g->lbah; in ipr_qc_fill_rtf()
7157 tf->device = g->device; in ipr_qc_fill_rtf()
7158 tf->command = g->status; in ipr_qc_fill_rtf()
7159 tf->hob_nsect = g->hob_nsect; in ipr_qc_fill_rtf()
7160 tf->hob_lbal = g->hob_lbal; in ipr_qc_fill_rtf()
7161 tf->hob_lbam = g->hob_lbam; in ipr_qc_fill_rtf()
7162 tf->hob_lbah = g->hob_lbah; in ipr_qc_fill_rtf()
7201 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7204 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7215 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { in ipr_invalid_adapter()
7228 * ipr_ioa_bringdown_done - IOA bring down completion.
7239 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioa_bringdown_done()
7243 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_ioa_bringdown_done()
7245 ioa_cfg->scsi_unblock = 1; in ipr_ioa_bringdown_done()
7246 schedule_work(&ioa_cfg->work_q); in ipr_ioa_bringdown_done()
7249 ioa_cfg->in_reset_reload = 0; in ipr_ioa_bringdown_done()
7250 ioa_cfg->reset_retries = 0; in ipr_ioa_bringdown_done()
7251 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_ioa_bringdown_done()
7252 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
7253 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_ioa_bringdown_done()
7254 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_ioa_bringdown_done()
7258 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_ioa_bringdown_done()
7259 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_ioa_bringdown_done()
7266 * ipr_ioa_reset_done - IOA reset completion.
7270 * It schedules any necessary mid-layer add/removes and
7278 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioa_reset_done()
7283 ioa_cfg->in_reset_reload = 0; in ipr_ioa_reset_done()
7284 for (j = 0; j < ioa_cfg->hrrq_num; j++) { in ipr_ioa_reset_done()
7285 spin_lock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
7286 ioa_cfg->hrrq[j].allow_cmds = 1; in ipr_ioa_reset_done()
7287 spin_unlock(&ioa_cfg->hrrq[j]._lock); in ipr_ioa_reset_done()
7290 ioa_cfg->reset_cmd = NULL; in ipr_ioa_reset_done()
7291 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; in ipr_ioa_reset_done()
7293 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { in ipr_ioa_reset_done()
7294 if (res->add_to_ml || res->del_from_ml) { in ipr_ioa_reset_done()
7299 schedule_work(&ioa_cfg->work_q); in ipr_ioa_reset_done()
7302 list_del_init(&ioa_cfg->hostrcb[j]->queue); in ipr_ioa_reset_done()
7306 ioa_cfg->hostrcb[j]); in ipr_ioa_reset_done()
7310 ioa_cfg->hostrcb[j]); in ipr_ioa_reset_done()
7313 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); in ipr_ioa_reset_done()
7314 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); in ipr_ioa_reset_done()
7316 ioa_cfg->reset_retries = 0; in ipr_ioa_reset_done()
7317 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_ioa_reset_done()
7318 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_ioa_reset_done()
7320 ioa_cfg->scsi_unblock = 1; in ipr_ioa_reset_done()
7321 schedule_work(&ioa_cfg->work_q); in ipr_ioa_reset_done()
7327 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7338 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids)); in ipr_set_sup_dev_dflt()
7339 supported_dev->num_records = 1; in ipr_set_sup_dev_dflt()
7340 supported_dev->data_length = in ipr_set_sup_dev_dflt()
7342 supported_dev->reserved = 0; in ipr_set_sup_dev_dflt()
7346 * ipr_set_supported_devs - Send Set Supported Devices for a device
7356 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_set_supported_devs()
7357 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; in ipr_set_supported_devs()
7358 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_set_supported_devs()
7359 struct ipr_resource_entry *res = ipr_cmd->u.res; in ipr_set_supported_devs()
7361 ipr_cmd->job_step = ipr_ioa_reset_done; in ipr_set_supported_devs()
7363 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { in ipr_set_supported_devs()
7367 ipr_cmd->u.res = res; in ipr_set_supported_devs()
7368 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids); in ipr_set_supported_devs()
7370 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_set_supported_devs()
7371 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_set_supported_devs()
7372 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_set_supported_devs()
7374 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; in ipr_set_supported_devs()
7375 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES; in ipr_set_supported_devs()
7376 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; in ipr_set_supported_devs()
7377 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; in ipr_set_supported_devs()
7380 ioa_cfg->vpd_cbs_dma + in ipr_set_supported_devs()
7388 if (!ioa_cfg->sis64) in ipr_set_supported_devs()
7389 ipr_cmd->job_step = ipr_set_supported_devs; in ipr_set_supported_devs()
7399 * ipr_get_mode_page - Locate specified mode page
7414 if (!mode_pages || (mode_pages->hdr.length == 0)) in ipr_get_mode_page()
7417 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len; in ipr_get_mode_page()
7419 (mode_pages->data + mode_pages->hdr.block_desc_len); in ipr_get_mode_page()
7423 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr))) in ipr_get_mode_page()
7428 mode_hdr->page_length); in ipr_get_mode_page()
7429 length -= page_length; in ipr_get_mode_page()
7438 * ipr_check_term_power - Check for term power errors
7458 entry_length = mode_page->entry_length; in ipr_check_term_power()
7460 bus = mode_page->bus; in ipr_check_term_power()
7462 for (i = 0; i < mode_page->num_entries; i++) { in ipr_check_term_power()
7463 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) { in ipr_check_term_power()
7464 dev_err(&ioa_cfg->pdev->dev, in ipr_check_term_power()
7466 bus->res_addr.bus); in ipr_check_term_power()
7474 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7491 ioa_cfg->bus_attr[i].bus_width); in ipr_scsi_bus_speed_limit()
7493 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate) in ipr_scsi_bus_speed_limit()
7494 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate; in ipr_scsi_bus_speed_limit()
7499 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7519 entry_length = mode_page->entry_length; in ipr_modify_ioafp_mode_page_28()
7522 for (i = 0, bus = mode_page->bus; in ipr_modify_ioafp_mode_page_28()
7523 i < mode_page->num_entries; in ipr_modify_ioafp_mode_page_28()
7525 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) { in ipr_modify_ioafp_mode_page_28()
7526 dev_err(&ioa_cfg->pdev->dev, in ipr_modify_ioafp_mode_page_28()
7528 IPR_GET_PHYS_LOC(bus->res_addr)); in ipr_modify_ioafp_mode_page_28()
7532 bus_attr = &ioa_cfg->bus_attr[i]; in ipr_modify_ioafp_mode_page_28()
7533 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY; in ipr_modify_ioafp_mode_page_28()
7534 bus->bus_width = bus_attr->bus_width; in ipr_modify_ioafp_mode_page_28()
7535 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate); in ipr_modify_ioafp_mode_page_28()
7536 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK; in ipr_modify_ioafp_mode_page_28()
7537 if (bus_attr->qas_enabled) in ipr_modify_ioafp_mode_page_28()
7538 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS; in ipr_modify_ioafp_mode_page_28()
7540 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS; in ipr_modify_ioafp_mode_page_28()
7545 * ipr_build_mode_select - Build a mode select command
7559 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_mode_select()
7561 ioarcb->res_handle = res_handle; in ipr_build_mode_select()
7562 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_build_mode_select()
7563 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; in ipr_build_mode_select()
7564 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT; in ipr_build_mode_select()
7565 ioarcb->cmd_pkt.cdb[1] = parm; in ipr_build_mode_select()
7566 ioarcb->cmd_pkt.cdb[4] = xfer_len; in ipr_build_mode_select()
7572 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7583 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_select_page28()
7584 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; in ipr_ioafp_mode_select_page28()
7591 length = mode_pages->hdr.length + 1; in ipr_ioafp_mode_select_page28()
7592 mode_pages->hdr.length = 0; in ipr_ioafp_mode_select_page28()
7595 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), in ipr_ioafp_mode_select_page28()
7598 ipr_cmd->job_step = ipr_set_supported_devs; in ipr_ioafp_mode_select_page28()
7599 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, in ipr_ioafp_mode_select_page28()
7608 * ipr_build_mode_sense - Builds a mode sense command
7622 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_mode_sense()
7624 ioarcb->res_handle = res_handle; in ipr_build_mode_sense()
7625 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE; in ipr_build_mode_sense()
7626 ioarcb->cmd_pkt.cdb[2] = parm; in ipr_build_mode_sense()
7627 ioarcb->cmd_pkt.cdb[4] = xfer_len; in ipr_build_mode_sense()
7628 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_build_mode_sense()
7634 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7644 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cmd_failed()
7645 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_cmd_failed()
7647 dev_err(&ioa_cfg->pdev->dev, in ipr_reset_cmd_failed()
7649 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); in ipr_reset_cmd_failed()
7652 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_cmd_failed()
7657 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7668 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_mode_sense_failed()
7669 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_mode_sense_failed()
7672 ipr_cmd->job_step = ipr_set_supported_devs; in ipr_reset_mode_sense_failed()
7673 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, in ipr_reset_mode_sense_failed()
7682 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7693 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_sense_page28()
7697 0x28, ioa_cfg->vpd_cbs_dma + in ipr_ioafp_mode_sense_page28()
7701 ipr_cmd->job_step = ipr_ioafp_mode_select_page28; in ipr_ioafp_mode_sense_page28()
7702 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed; in ipr_ioafp_mode_sense_page28()
7711 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7721 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_select_page24()
7722 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; in ipr_ioafp_mode_select_page24()
7731 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF; in ipr_ioafp_mode_select_page24()
7733 length = mode_pages->hdr.length + 1; in ipr_ioafp_mode_select_page24()
7734 mode_pages->hdr.length = 0; in ipr_ioafp_mode_select_page24()
7737 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), in ipr_ioafp_mode_select_page24()
7740 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; in ipr_ioafp_mode_select_page24()
7748 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7759 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_mode_sense_page24_failed()
7762 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; in ipr_reset_mode_sense_page24_failed()
7770 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7774 * the IOA Advanced Function Control mode page.
7781 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_mode_sense_page24()
7785 0x24, ioa_cfg->vpd_cbs_dma + in ipr_ioafp_mode_sense_page24()
7789 ipr_cmd->job_step = ipr_ioafp_mode_select_page24; in ipr_ioafp_mode_sense_page24()
7790 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed; in ipr_ioafp_mode_sense_page24()
7799 * ipr_init_res_table - Initialize the resource table
7804 * devices and schedule adding/removing them from the mid-layer
7812 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_init_res_table()
7819 if (ioa_cfg->sis64) in ipr_init_res_table()
7820 flag = ioa_cfg->u.cfg_table64->hdr64.flags; in ipr_init_res_table()
7822 flag = ioa_cfg->u.cfg_table->hdr.flags; in ipr_init_res_table()
7825 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); in ipr_init_res_table()
7827 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) in ipr_init_res_table()
7828 list_move_tail(&res->queue, &old_res); in ipr_init_res_table()
7830 if (ioa_cfg->sis64) in ipr_init_res_table()
7831 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); in ipr_init_res_table()
7833 entries = ioa_cfg->u.cfg_table->hdr.num_entries; in ipr_init_res_table()
7836 if (ioa_cfg->sis64) in ipr_init_res_table()
7837 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i]; in ipr_init_res_table()
7839 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i]; in ipr_init_res_table()
7844 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7851 if (list_empty(&ioa_cfg->free_res_q)) { in ipr_init_res_table()
7852 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n"); in ipr_init_res_table()
7857 res = list_entry(ioa_cfg->free_res_q.next, in ipr_init_res_table()
7859 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7861 res->add_to_ml = 1; in ipr_init_res_table()
7862 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))) in ipr_init_res_table()
7863 res->sdev->allow_restart = 1; in ipr_init_res_table()
7870 if (res->sdev) { in ipr_init_res_table()
7871 res->del_from_ml = 1; in ipr_init_res_table()
7872 res->res_handle = IPR_INVALID_RES_HANDLE; in ipr_init_res_table()
7873 list_move_tail(&res->queue, &ioa_cfg->used_res_q); in ipr_init_res_table()
7879 list_move_tail(&res->queue, &ioa_cfg->free_res_q); in ipr_init_res_table()
7882 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) in ipr_init_res_table()
7883 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; in ipr_init_res_table()
7885 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; in ipr_init_res_table()
7892 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7903 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_query_ioa_cfg()
7904 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_query_ioa_cfg()
7905 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; in ipr_ioafp_query_ioa_cfg()
7906 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; in ipr_ioafp_query_ioa_cfg()
7909 if (cap->cap & IPR_CAP_DUAL_IOA_RAID) in ipr_ioafp_query_ioa_cfg()
7910 ioa_cfg->dual_raid = 1; in ipr_ioafp_query_ioa_cfg()
7911 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", in ipr_ioafp_query_ioa_cfg()
7912 ucode_vpd->major_release, ucode_vpd->card_type, in ipr_ioafp_query_ioa_cfg()
7913 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); in ipr_ioafp_query_ioa_cfg()
7914 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_ioafp_query_ioa_cfg()
7915 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_ioafp_query_ioa_cfg()
7917 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; in ipr_ioafp_query_ioa_cfg()
7918 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; in ipr_ioafp_query_ioa_cfg()
7919 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; in ipr_ioafp_query_ioa_cfg()
7920 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; in ipr_ioafp_query_ioa_cfg()
7922 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size, in ipr_ioafp_query_ioa_cfg()
7925 ipr_cmd->job_step = ipr_init_res_table; in ipr_ioafp_query_ioa_cfg()
7935 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_ioa_service_action_failed()
7946 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_build_ioa_service_action()
7948 ioarcb->res_handle = res_handle; in ipr_build_ioa_service_action()
7949 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION; in ipr_build_ioa_service_action()
7950 ioarcb->cmd_pkt.cdb[1] = sa_code; in ipr_build_ioa_service_action()
7951 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_build_ioa_service_action()
7955 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7964 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_set_caching_parameters()
7965 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_set_caching_parameters()
7966 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; in ipr_ioafp_set_caching_parameters()
7970 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; in ipr_ioafp_set_caching_parameters()
7972 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) { in ipr_ioafp_set_caching_parameters()
7977 ioarcb->cmd_pkt.cdb[2] = 0x40; in ipr_ioafp_set_caching_parameters()
7979 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed; in ipr_ioafp_set_caching_parameters()
7992 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
8007 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_inquiry()
8010 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_ioafp_inquiry()
8011 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_ioafp_inquiry()
8013 ioarcb->cmd_pkt.cdb[0] = INQUIRY; in ipr_ioafp_inquiry()
8014 ioarcb->cmd_pkt.cdb[1] = flags; in ipr_ioafp_inquiry()
8015 ioarcb->cmd_pkt.cdb[2] = page; in ipr_ioafp_inquiry()
8016 ioarcb->cmd_pkt.cdb[4] = xfer_len; in ipr_ioafp_inquiry()
8025 * ipr_inquiry_page_supported - Is the given inquiry page supported
8038 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++) in ipr_inquiry_page_supported()
8039 if (page0->page[i] == page) in ipr_inquiry_page_supported()
8046 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8057 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_pageC4_inquiry()
8058 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; in ipr_ioafp_pageC4_inquiry()
8059 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data; in ipr_ioafp_pageC4_inquiry()
8062 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters; in ipr_ioafp_pageC4_inquiry()
8067 (ioa_cfg->vpd_cbs_dma in ipr_ioafp_pageC4_inquiry()
8079 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8090 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_cap_inquiry()
8091 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; in ipr_ioafp_cap_inquiry()
8092 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; in ipr_ioafp_cap_inquiry()
8095 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry; in ipr_ioafp_cap_inquiry()
8100 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap), in ipr_ioafp_cap_inquiry()
8110 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8121 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_page3_inquiry()
8125 ipr_cmd->job_step = ipr_ioafp_cap_inquiry; in ipr_ioafp_page3_inquiry()
8128 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), in ipr_ioafp_page3_inquiry()
8136 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8147 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_page0_inquiry()
8153 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); in ipr_ioafp_page0_inquiry()
8155 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); in ipr_ioafp_page0_inquiry()
8158 dev_err(&ioa_cfg->pdev->dev, in ipr_ioafp_page0_inquiry()
8162 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES; in ipr_ioafp_page0_inquiry()
8164 list_add_tail(&ipr_cmd->queue, in ipr_ioafp_page0_inquiry()
8165 &ioa_cfg->hrrq->hrrq_free_q); in ipr_ioafp_page0_inquiry()
8170 ipr_cmd->job_step = ipr_ioafp_page3_inquiry; in ipr_ioafp_page0_inquiry()
8173 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data), in ipr_ioafp_page0_inquiry()
8181 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8191 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_std_inquiry()
8194 ipr_cmd->job_step = ipr_ioafp_page0_inquiry; in ipr_ioafp_std_inquiry()
8197 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), in ipr_ioafp_std_inquiry()
8205 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8216 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_ioafp_identify_hrrq()
8217 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; in ipr_ioafp_identify_hrrq()
8221 ipr_cmd->job_step = ipr_ioafp_std_inquiry; in ipr_ioafp_identify_hrrq()
8222 if (ioa_cfg->identify_hrrq_index == 0) in ipr_ioafp_identify_hrrq()
8223 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); in ipr_ioafp_identify_hrrq()
8225 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) { in ipr_ioafp_identify_hrrq()
8226 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index]; in ipr_ioafp_identify_hrrq()
8228 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q; in ipr_ioafp_identify_hrrq()
8229 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_ioafp_identify_hrrq()
8231 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_ioafp_identify_hrrq()
8232 if (ioa_cfg->sis64) in ipr_ioafp_identify_hrrq()
8233 ioarcb->cmd_pkt.cdb[1] = 0x1; in ipr_ioafp_identify_hrrq()
8235 if (ioa_cfg->nvectors == 1) in ipr_ioafp_identify_hrrq()
8236 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE; in ipr_ioafp_identify_hrrq()
8238 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE; in ipr_ioafp_identify_hrrq()
8240 ioarcb->cmd_pkt.cdb[2] = in ipr_ioafp_identify_hrrq()
8241 ((u64) hrrq->host_rrq_dma >> 24) & 0xff; in ipr_ioafp_identify_hrrq()
8242 ioarcb->cmd_pkt.cdb[3] = in ipr_ioafp_identify_hrrq()
8243 ((u64) hrrq->host_rrq_dma >> 16) & 0xff; in ipr_ioafp_identify_hrrq()
8244 ioarcb->cmd_pkt.cdb[4] = in ipr_ioafp_identify_hrrq()
8245 ((u64) hrrq->host_rrq_dma >> 8) & 0xff; in ipr_ioafp_identify_hrrq()
8246 ioarcb->cmd_pkt.cdb[5] = in ipr_ioafp_identify_hrrq()
8247 ((u64) hrrq->host_rrq_dma) & 0xff; in ipr_ioafp_identify_hrrq()
8248 ioarcb->cmd_pkt.cdb[7] = in ipr_ioafp_identify_hrrq()
8249 ((sizeof(u32) * hrrq->size) >> 8) & 0xff; in ipr_ioafp_identify_hrrq()
8250 ioarcb->cmd_pkt.cdb[8] = in ipr_ioafp_identify_hrrq()
8251 (sizeof(u32) * hrrq->size) & 0xff; in ipr_ioafp_identify_hrrq()
8253 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) in ipr_ioafp_identify_hrrq()
8254 ioarcb->cmd_pkt.cdb[9] = in ipr_ioafp_identify_hrrq()
8255 ioa_cfg->identify_hrrq_index; in ipr_ioafp_identify_hrrq()
8257 if (ioa_cfg->sis64) { in ipr_ioafp_identify_hrrq()
8258 ioarcb->cmd_pkt.cdb[10] = in ipr_ioafp_identify_hrrq()
8259 ((u64) hrrq->host_rrq_dma >> 56) & 0xff; in ipr_ioafp_identify_hrrq()
8260 ioarcb->cmd_pkt.cdb[11] = in ipr_ioafp_identify_hrrq()
8261 ((u64) hrrq->host_rrq_dma >> 48) & 0xff; in ipr_ioafp_identify_hrrq()
8262 ioarcb->cmd_pkt.cdb[12] = in ipr_ioafp_identify_hrrq()
8263 ((u64) hrrq->host_rrq_dma >> 40) & 0xff; in ipr_ioafp_identify_hrrq()
8264 ioarcb->cmd_pkt.cdb[13] = in ipr_ioafp_identify_hrrq()
8265 ((u64) hrrq->host_rrq_dma >> 32) & 0xff; in ipr_ioafp_identify_hrrq()
8268 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE) in ipr_ioafp_identify_hrrq()
8269 ioarcb->cmd_pkt.cdb[14] = in ipr_ioafp_identify_hrrq()
8270 ioa_cfg->identify_hrrq_index; in ipr_ioafp_identify_hrrq()
8275 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) in ipr_ioafp_identify_hrrq()
8276 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_ioafp_identify_hrrq()
8287 * ipr_reset_timer_done - Adapter reset timer function
8302 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_timer_done()
8305 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_timer_done()
8307 if (ioa_cfg->reset_cmd == ipr_cmd) { in ipr_reset_timer_done()
8308 list_del(&ipr_cmd->queue); in ipr_reset_timer_done()
8309 ipr_cmd->done(ipr_cmd); in ipr_reset_timer_done()
8312 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_timer_done()
8316 * ipr_reset_start_timer - Start a timer for adapter reset job
8334 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_start_timer()
8335 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_start_timer()
8337 ipr_cmd->timer.expires = jiffies + timeout; in ipr_reset_start_timer()
8338 ipr_cmd->timer.function = ipr_reset_timer_done; in ipr_reset_start_timer()
8339 add_timer(&ipr_cmd->timer); in ipr_reset_start_timer()
8343 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8354 spin_lock(&hrrq->_lock); in ipr_init_ioa_mem()
8355 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size); in ipr_init_ioa_mem()
8358 hrrq->hrrq_start = hrrq->host_rrq; in ipr_init_ioa_mem()
8359 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1]; in ipr_init_ioa_mem()
8360 hrrq->hrrq_curr = hrrq->hrrq_start; in ipr_init_ioa_mem()
8361 hrrq->toggle_bit = 1; in ipr_init_ioa_mem()
8362 spin_unlock(&hrrq->_lock); in ipr_init_ioa_mem()
8366 ioa_cfg->identify_hrrq_index = 0; in ipr_init_ioa_mem()
8367 if (ioa_cfg->hrrq_num == 1) in ipr_init_ioa_mem()
8368 atomic_set(&ioa_cfg->hrrq_index, 0); in ipr_init_ioa_mem()
8370 atomic_set(&ioa_cfg->hrrq_index, 1); in ipr_init_ioa_mem()
8373 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); in ipr_init_ioa_mem()
8377 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8388 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_next_stage()
8391 feedback = readl(ioa_cfg->regs.init_feedback_reg); in ipr_reset_next_stage()
8406 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_reset_next_stage()
8407 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_next_stage()
8408 stage_time = ioa_cfg->transop_timeout; in ipr_reset_next_stage()
8409 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_reset_next_stage()
8411 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_reset_next_stage()
8413 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_reset_next_stage()
8416 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); in ipr_reset_next_stage()
8417 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_next_stage()
8422 ipr_cmd->timer.expires = jiffies + stage_time * HZ; in ipr_reset_next_stage()
8423 ipr_cmd->timer.function = ipr_oper_timeout; in ipr_reset_next_stage()
8424 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_next_stage()
8425 add_timer(&ipr_cmd->timer); in ipr_reset_next_stage()
8427 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_next_stage()
8433 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8444 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_enable_ioa()
8450 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; in ipr_reset_enable_ioa()
8453 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_reset_enable_ioa()
8454 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
8455 ioa_cfg->hrrq[i].allow_interrupts = 1; in ipr_reset_enable_ioa()
8456 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_enable_ioa()
8458 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8460 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); in ipr_reset_enable_ioa()
8461 int_reg = readl(ioa_cfg->regs.endian_swap_reg); in ipr_reset_enable_ioa()
8464 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_reset_enable_ioa()
8468 ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_reset_enable_ioa()
8469 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_enable_ioa()
8474 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_enable_ioa()
8476 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8479 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); in ipr_reset_enable_ioa()
8481 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_reset_enable_ioa()
8483 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_reset_enable_ioa()
8485 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); in ipr_reset_enable_ioa()
8487 if (ioa_cfg->sis64) { in ipr_reset_enable_ioa()
8488 ipr_cmd->job_step = ipr_reset_next_stage; in ipr_reset_enable_ioa()
8492 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); in ipr_reset_enable_ioa()
8493 ipr_cmd->timer.function = ipr_oper_timeout; in ipr_reset_enable_ioa()
8494 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_enable_ioa()
8495 add_timer(&ipr_cmd->timer); in ipr_reset_enable_ioa()
8496 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_enable_ioa()
8503 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8514 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_wait_for_dump()
8516 if (ioa_cfg->sdt_state == GET_DUMP) in ipr_reset_wait_for_dump()
8517 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_reset_wait_for_dump()
8518 else if (ioa_cfg->sdt_state == READ_DUMP) in ipr_reset_wait_for_dump()
8519 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_reset_wait_for_dump()
8521 ioa_cfg->dump_timeout = 1; in ipr_reset_wait_for_dump()
8522 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_wait_for_dump()
8528 * ipr_unit_check_no_data - Log a unit check/no data error log
8539 ioa_cfg->errors_logged++; in ipr_unit_check_no_data()
8540 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n"); in ipr_unit_check_no_data()
8544 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8561 mailbox = readl(ioa_cfg->ioa_mailbox); in ipr_get_unit_check_buffer()
8563 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) { in ipr_get_unit_check_buffer()
8583 length = (be32_to_cpu(sdt.entry[0].end_token) - in ipr_get_unit_check_buffer()
8587 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, in ipr_get_unit_check_buffer()
8589 list_del_init(&hostrcb->queue); in ipr_get_unit_check_buffer()
8590 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); in ipr_get_unit_check_buffer()
8594 (__be32 *)&hostrcb->hcam, in ipr_get_unit_check_buffer()
8595 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); in ipr_get_unit_check_buffer()
8599 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); in ipr_get_unit_check_buffer()
8601 ioa_cfg->sdt_state == GET_DUMP) in ipr_get_unit_check_buffer()
8602 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_get_unit_check_buffer()
8606 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); in ipr_get_unit_check_buffer()
8610 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8620 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_get_unit_check_job()
8623 ioa_cfg->ioa_unit_checked = 0; in ipr_reset_get_unit_check_job()
8625 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_get_unit_check_job()
8634 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_dump_mailbox_wait()
8638 if (ioa_cfg->sdt_state != GET_DUMP) in ipr_dump_mailbox_wait()
8641 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left || in ipr_dump_mailbox_wait()
8642 (readl(ioa_cfg->regs.sense_interrupt_reg) & in ipr_dump_mailbox_wait()
8645 if (!ipr_cmd->u.time_left) in ipr_dump_mailbox_wait()
8646 dev_err(&ioa_cfg->pdev->dev, in ipr_dump_mailbox_wait()
8649 ioa_cfg->sdt_state = READ_DUMP; in ipr_dump_mailbox_wait()
8650 ioa_cfg->dump_timeout = 0; in ipr_dump_mailbox_wait()
8651 if (ioa_cfg->sis64) in ipr_dump_mailbox_wait()
8655 ipr_cmd->job_step = ipr_reset_wait_for_dump; in ipr_dump_mailbox_wait()
8656 schedule_work(&ioa_cfg->work_q); in ipr_dump_mailbox_wait()
8659 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; in ipr_dump_mailbox_wait()
8669 * ipr_reset_restore_cfg_space - Restore PCI config space.
8681 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_restore_cfg_space()
8684 ioa_cfg->pdev->state_saved = true; in ipr_reset_restore_cfg_space()
8685 pci_restore_state(ioa_cfg->pdev); in ipr_reset_restore_cfg_space()
8688 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); in ipr_reset_restore_cfg_space()
8694 if (ioa_cfg->sis64) { in ipr_reset_restore_cfg_space()
8696 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); in ipr_reset_restore_cfg_space()
8697 readl(ioa_cfg->regs.endian_swap_reg); in ipr_reset_restore_cfg_space()
8700 if (ioa_cfg->ioa_unit_checked) { in ipr_reset_restore_cfg_space()
8701 if (ioa_cfg->sis64) { in ipr_reset_restore_cfg_space()
8702 ipr_cmd->job_step = ipr_reset_get_unit_check_job; in ipr_reset_restore_cfg_space()
8706 ioa_cfg->ioa_unit_checked = 0; in ipr_reset_restore_cfg_space()
8708 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_restore_cfg_space()
8714 if (ioa_cfg->in_ioa_bringdown) { in ipr_reset_restore_cfg_space()
8715 ipr_cmd->job_step = ipr_ioa_bringdown_done; in ipr_reset_restore_cfg_space()
8716 } else if (ioa_cfg->sdt_state == GET_DUMP) { in ipr_reset_restore_cfg_space()
8717 ipr_cmd->job_step = ipr_dump_mailbox_wait; in ipr_reset_restore_cfg_space()
8718 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX; in ipr_reset_restore_cfg_space()
8720 ipr_cmd->job_step = ipr_reset_enable_ioa; in ipr_reset_restore_cfg_space()
8728 * ipr_reset_bist_done - BIST has completed on the adapter.
8738 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_bist_done()
8741 if (ioa_cfg->cfg_locked) in ipr_reset_bist_done()
8742 pci_cfg_access_unlock(ioa_cfg->pdev); in ipr_reset_bist_done()
8743 ioa_cfg->cfg_locked = 0; in ipr_reset_bist_done()
8744 ipr_cmd->job_step = ipr_reset_restore_cfg_space; in ipr_reset_bist_done()
8750 * ipr_reset_start_bist - Run BIST on the adapter.
8760 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_start_bist()
8764 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO) in ipr_reset_start_bist()
8766 ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_start_bist()
8768 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); in ipr_reset_start_bist()
8771 ipr_cmd->job_step = ipr_reset_bist_done; in ipr_reset_start_bist()
8775 if (ioa_cfg->cfg_locked) in ipr_reset_start_bist()
8776 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev); in ipr_reset_start_bist()
8777 ioa_cfg->cfg_locked = 0; in ipr_reset_start_bist()
8778 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); in ipr_reset_start_bist()
8787 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8798 ipr_cmd->job_step = ipr_reset_bist_done; in ipr_reset_slot_reset_done()
8805 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8814 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_reset_work()
8815 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_reset_reset_work()
8823 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_reset_work()
8824 if (ioa_cfg->reset_cmd == ipr_cmd) in ipr_reset_reset_work()
8826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_reset_reset_work()
8831 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8841 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_slot_reset()
8844 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work); in ipr_reset_slot_reset()
8845 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work); in ipr_reset_slot_reset()
8846 ipr_cmd->job_step = ipr_reset_slot_reset_done; in ipr_reset_slot_reset()
8852 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8862 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_block_config_access_wait()
8865 if (pci_cfg_access_trylock(ioa_cfg->pdev)) { in ipr_reset_block_config_access_wait()
8866 ioa_cfg->cfg_locked = 1; in ipr_reset_block_config_access_wait()
8867 ipr_cmd->job_step = ioa_cfg->reset; in ipr_reset_block_config_access_wait()
8869 if (ipr_cmd->u.time_left) { in ipr_reset_block_config_access_wait()
8871 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; in ipr_reset_block_config_access_wait()
8875 ipr_cmd->job_step = ioa_cfg->reset; in ipr_reset_block_config_access_wait()
8876 dev_err(&ioa_cfg->pdev->dev, in ipr_reset_block_config_access_wait()
8885 * ipr_reset_block_config_access - Block config access to the IOA
8895 ipr_cmd->ioa_cfg->cfg_locked = 0; in ipr_reset_block_config_access()
8896 ipr_cmd->job_step = ipr_reset_block_config_access_wait; in ipr_reset_block_config_access()
8897 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; in ipr_reset_block_config_access()
8902 * ipr_reset_allowed - Query whether or not IOA can be reset
8906 * 0 if reset not allowed / non-zero if reset is allowed
8912 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_reset_allowed()
8917 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8933 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_wait_to_start_bist()
8936 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) { in ipr_reset_wait_to_start_bist()
8937 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; in ipr_reset_wait_to_start_bist()
8940 ipr_cmd->job_step = ipr_reset_block_config_access; in ipr_reset_wait_to_start_bist()
8948 * ipr_reset_alert - Alert the adapter of a pending reset
8961 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_alert()
8966 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg); in ipr_reset_alert()
8970 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); in ipr_reset_alert()
8971 ipr_cmd->job_step = ipr_reset_wait_to_start_bist; in ipr_reset_alert()
8973 ipr_cmd->job_step = ipr_reset_block_config_access; in ipr_reset_alert()
8976 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; in ipr_reset_alert()
8984 * ipr_reset_quiesce_done - Complete IOA disconnect
8994 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_quiesce_done()
8997 ipr_cmd->job_step = ipr_ioa_bringdown_done; in ipr_reset_quiesce_done()
9004 * ipr_reset_cancel_hcam_done - Check for outstanding commands
9015 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cancel_hcam_done()
9022 ipr_cmd->job_step = ipr_reset_quiesce_done; in ipr_reset_cancel_hcam_done()
9025 spin_lock(&hrrq->_lock); in ipr_reset_cancel_hcam_done()
9026 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_reset_cancel_hcam_done()
9029 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_cancel_hcam_done()
9033 spin_unlock(&hrrq->_lock); in ipr_reset_cancel_hcam_done()
9044 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9054 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_cancel_hcam()
9058 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ]; in ipr_reset_cancel_hcam()
9061 ipr_cmd->job_step = ipr_reset_cancel_hcam_done; in ipr_reset_cancel_hcam()
9063 if (!hrrq->ioa_is_dead) { in ipr_reset_cancel_hcam()
9064 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) { in ipr_reset_cancel_hcam()
9065 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) { in ipr_reset_cancel_hcam()
9066 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC) in ipr_reset_cancel_hcam()
9069 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_reset_cancel_hcam()
9070 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_reset_cancel_hcam()
9071 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; in ipr_reset_cancel_hcam()
9072 cmd_pkt->request_type = IPR_RQTYPE_IOACMD; in ipr_reset_cancel_hcam()
9073 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST; in ipr_reset_cancel_hcam()
9074 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB; in ipr_reset_cancel_hcam()
9075 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff; in ipr_reset_cancel_hcam()
9076 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff; in ipr_reset_cancel_hcam()
9077 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff; in ipr_reset_cancel_hcam()
9078 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff; in ipr_reset_cancel_hcam()
9079 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff; in ipr_reset_cancel_hcam()
9080 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff; in ipr_reset_cancel_hcam()
9081 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff; in ipr_reset_cancel_hcam()
9082 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff; in ipr_reset_cancel_hcam()
9088 ipr_cmd->job_step = ipr_reset_cancel_hcam; in ipr_reset_cancel_hcam()
9093 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_cancel_hcam()
9100 * ipr_reset_ucode_download_done - Microcode download completion
9110 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ucode_download_done()
9111 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; in ipr_reset_ucode_download_done()
9113 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist, in ipr_reset_ucode_download_done()
9114 sglist->num_sg, DMA_TO_DEVICE); in ipr_reset_ucode_download_done()
9116 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_ucode_download_done()
9121 * ipr_reset_ucode_download - Download microcode to the adapter
9132 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ucode_download()
9133 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; in ipr_reset_ucode_download()
9136 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_ucode_download()
9141 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_reset_ucode_download()
9142 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; in ipr_reset_ucode_download()
9143 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER; in ipr_reset_ucode_download()
9144 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE; in ipr_reset_ucode_download()
9145 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16; in ipr_reset_ucode_download()
9146 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; in ipr_reset_ucode_download()
9147 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; in ipr_reset_ucode_download()
9149 if (ioa_cfg->sis64) in ipr_reset_ucode_download()
9153 ipr_cmd->job_step = ipr_reset_ucode_download_done; in ipr_reset_ucode_download()
9163 * ipr_reset_shutdown_ioa - Shutdown the adapter
9175 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_shutdown_ioa()
9176 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type; in ipr_reset_shutdown_ioa()
9182 ipr_cmd->job_step = ipr_reset_cancel_hcam; in ipr_reset_shutdown_ioa()
9184 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { in ipr_reset_shutdown_ioa()
9185 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_reset_shutdown_ioa()
9186 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_reset_shutdown_ioa()
9187 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; in ipr_reset_shutdown_ioa()
9188 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; in ipr_reset_shutdown_ioa()
9194 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) in ipr_reset_shutdown_ioa()
9202 ipr_cmd->job_step = ipr_reset_ucode_download; in ipr_reset_shutdown_ioa()
9204 ipr_cmd->job_step = ipr_reset_alert; in ipr_reset_shutdown_ioa()
9211 * ipr_reset_ioa_job - Adapter reset job
9222 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_ioa_job()
9225 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); in ipr_reset_ioa_job()
9227 if (ioa_cfg->reset_cmd != ipr_cmd) { in ipr_reset_ioa_job()
9232 list_add_tail(&ipr_cmd->queue, in ipr_reset_ioa_job()
9233 &ipr_cmd->hrrq->hrrq_free_q); in ipr_reset_ioa_job()
9238 rc = ipr_cmd->job_step_failed(ipr_cmd); in ipr_reset_ioa_job()
9244 ipr_cmd->job_step_failed = ipr_reset_cmd_failed; in ipr_reset_ioa_job()
9245 rc = ipr_cmd->job_step(ipr_cmd); in ipr_reset_ioa_job()
9250 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9270 ioa_cfg->in_reset_reload = 1; in _ipr_initiate_ioa_reset()
9271 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in _ipr_initiate_ioa_reset()
9272 spin_lock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
9273 ioa_cfg->hrrq[i].allow_cmds = 0; in _ipr_initiate_ioa_reset()
9274 spin_unlock(&ioa_cfg->hrrq[i]._lock); in _ipr_initiate_ioa_reset()
9277 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in _ipr_initiate_ioa_reset()
9278 ioa_cfg->scsi_unblock = 0; in _ipr_initiate_ioa_reset()
9279 ioa_cfg->scsi_blocked = 1; in _ipr_initiate_ioa_reset()
9280 scsi_block_requests(ioa_cfg->host); in _ipr_initiate_ioa_reset()
9284 ioa_cfg->reset_cmd = ipr_cmd; in _ipr_initiate_ioa_reset()
9285 ipr_cmd->job_step = job_step; in _ipr_initiate_ioa_reset()
9286 ipr_cmd->u.shutdown_type = shutdown_type; in _ipr_initiate_ioa_reset()
9292 * ipr_initiate_ioa_reset - Initiate an adapter reset
9308 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) in ipr_initiate_ioa_reset()
9311 if (ioa_cfg->in_reset_reload) { in ipr_initiate_ioa_reset()
9312 if (ioa_cfg->sdt_state == GET_DUMP) in ipr_initiate_ioa_reset()
9313 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in ipr_initiate_ioa_reset()
9314 else if (ioa_cfg->sdt_state == READ_DUMP) in ipr_initiate_ioa_reset()
9315 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_initiate_ioa_reset()
9318 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { in ipr_initiate_ioa_reset()
9319 dev_err(&ioa_cfg->pdev->dev, in ipr_initiate_ioa_reset()
9320 "IOA taken offline - error recovery failed\n"); in ipr_initiate_ioa_reset()
9322 ioa_cfg->reset_retries = 0; in ipr_initiate_ioa_reset()
9323 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_initiate_ioa_reset()
9324 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
9325 ioa_cfg->hrrq[i].ioa_is_dead = 1; in ipr_initiate_ioa_reset()
9326 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_initiate_ioa_reset()
9330 if (ioa_cfg->in_ioa_bringdown) { in ipr_initiate_ioa_reset()
9331 ioa_cfg->reset_cmd = NULL; in ipr_initiate_ioa_reset()
9332 ioa_cfg->in_reset_reload = 0; in ipr_initiate_ioa_reset()
9334 wake_up_all(&ioa_cfg->reset_wait_q); in ipr_initiate_ioa_reset()
9336 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { in ipr_initiate_ioa_reset()
9337 ioa_cfg->scsi_unblock = 1; in ipr_initiate_ioa_reset()
9338 schedule_work(&ioa_cfg->work_q); in ipr_initiate_ioa_reset()
9342 ioa_cfg->in_ioa_bringdown = 1; in ipr_initiate_ioa_reset()
9352 * ipr_reset_freeze - Hold off all I/O activity
9361 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; in ipr_reset_freeze()
9365 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_reset_freeze()
9366 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
9367 ioa_cfg->hrrq[i].allow_interrupts = 0; in ipr_reset_freeze()
9368 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_reset_freeze()
9371 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); in ipr_reset_freeze()
9372 ipr_cmd->done = ipr_reset_ioa_job; in ipr_reset_freeze()
9377 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9388 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_mmio_enabled()
9389 if (!ioa_cfg->probe_done) in ipr_pci_mmio_enabled()
9391 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_mmio_enabled()
9396 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9408 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_frozen()
9409 if (ioa_cfg->probe_done) in ipr_pci_frozen()
9411 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_frozen()
9415 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9427 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_slot_reset()
9428 if (ioa_cfg->probe_done) { in ipr_pci_slot_reset()
9429 if (ioa_cfg->needs_warm_reset) in ipr_pci_slot_reset()
9435 wake_up_all(&ioa_cfg->eeh_wait_q); in ipr_pci_slot_reset()
9436 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_slot_reset()
9441 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9453 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_pci_perm_failure()
9454 if (ioa_cfg->probe_done) { in ipr_pci_perm_failure()
9455 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) in ipr_pci_perm_failure()
9456 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_pci_perm_failure()
9457 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; in ipr_pci_perm_failure()
9458 ioa_cfg->in_ioa_bringdown = 1; in ipr_pci_perm_failure()
9459 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_pci_perm_failure()
9460 spin_lock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
9461 ioa_cfg->hrrq[i].allow_cmds = 0; in ipr_pci_perm_failure()
9462 spin_unlock(&ioa_cfg->hrrq[i]._lock); in ipr_pci_perm_failure()
9467 wake_up_all(&ioa_cfg->eeh_wait_q); in ipr_pci_perm_failure()
9468 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_pci_perm_failure()
9472 * ipr_pci_error_detected - Called when a PCI error is detected.
9499 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9506 * 0 on success / -EIO on failure
9514 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in ipr_probe_ioa_part2()
9515 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); in ipr_probe_ioa_part2()
9516 ioa_cfg->probe_done = 1; in ipr_probe_ioa_part2()
9517 if (ioa_cfg->needs_hard_reset) { in ipr_probe_ioa_part2()
9518 ioa_cfg->needs_hard_reset = 0; in ipr_probe_ioa_part2()
9523 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in ipr_probe_ioa_part2()
9530 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9540 if (ioa_cfg->ipr_cmnd_list) { in ipr_free_cmd_blks()
9542 if (ioa_cfg->ipr_cmnd_list[i]) in ipr_free_cmd_blks()
9543 dma_pool_free(ioa_cfg->ipr_cmd_pool, in ipr_free_cmd_blks()
9544 ioa_cfg->ipr_cmnd_list[i], in ipr_free_cmd_blks()
9545 ioa_cfg->ipr_cmnd_list_dma[i]); in ipr_free_cmd_blks()
9547 ioa_cfg->ipr_cmnd_list[i] = NULL; in ipr_free_cmd_blks()
9551 dma_pool_destroy(ioa_cfg->ipr_cmd_pool); in ipr_free_cmd_blks()
9553 kfree(ioa_cfg->ipr_cmnd_list); in ipr_free_cmd_blks()
9554 kfree(ioa_cfg->ipr_cmnd_list_dma); in ipr_free_cmd_blks()
9555 ioa_cfg->ipr_cmnd_list = NULL; in ipr_free_cmd_blks()
9556 ioa_cfg->ipr_cmnd_list_dma = NULL; in ipr_free_cmd_blks()
9557 ioa_cfg->ipr_cmd_pool = NULL; in ipr_free_cmd_blks()
9561 * ipr_free_mem - Frees memory allocated for an adapter
9571 kfree(ioa_cfg->res_entries); in ipr_free_mem()
9572 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs), in ipr_free_mem()
9573 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); in ipr_free_mem()
9576 for (i = 0; i < ioa_cfg->hrrq_num; i++) in ipr_free_mem()
9577 dma_free_coherent(&ioa_cfg->pdev->dev, in ipr_free_mem()
9578 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_free_mem()
9579 ioa_cfg->hrrq[i].host_rrq, in ipr_free_mem()
9580 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_free_mem()
9582 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size, in ipr_free_mem()
9583 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); in ipr_free_mem()
9586 dma_free_coherent(&ioa_cfg->pdev->dev, in ipr_free_mem()
9588 ioa_cfg->hostrcb[i], in ipr_free_mem()
9589 ioa_cfg->hostrcb_dma[i]); in ipr_free_mem()
9593 kfree(ioa_cfg->trace); in ipr_free_mem()
9597 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9608 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_free_irqs()
9611 for (i = 0; i < ioa_cfg->nvectors; i++) in ipr_free_irqs()
9612 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]); in ipr_free_irqs()
9617 * ipr_free_all_resources - Free all allocated resources for an adapter.
9628 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_free_all_resources()
9632 if (ioa_cfg->reset_work_q) in ipr_free_all_resources()
9633 destroy_workqueue(ioa_cfg->reset_work_q); in ipr_free_all_resources()
9634 iounmap(ioa_cfg->hdw_dma_regs); in ipr_free_all_resources()
9637 scsi_host_put(ioa_cfg->host); in ipr_free_all_resources()
9643 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9647 * 0 on success / -ENOMEM on allocation failure
9656 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev, in ipr_alloc_cmd_blks()
9659 if (!ioa_cfg->ipr_cmd_pool) in ipr_alloc_cmd_blks()
9660 return -ENOMEM; in ipr_alloc_cmd_blks()
9662 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL); in ipr_alloc_cmd_blks()
9663 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL); in ipr_alloc_cmd_blks()
9665 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) { in ipr_alloc_cmd_blks()
9667 return -ENOMEM; in ipr_alloc_cmd_blks()
9670 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_cmd_blks()
9671 if (ioa_cfg->hrrq_num > 1) { in ipr_alloc_cmd_blks()
9674 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
9675 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
9676 (entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
9680 (ioa_cfg->hrrq_num - 1); in ipr_alloc_cmd_blks()
9681 ioa_cfg->hrrq[i].min_cmd_id = in ipr_alloc_cmd_blks()
9683 (i - 1) * entries_each_hrrq; in ipr_alloc_cmd_blks()
9684 ioa_cfg->hrrq[i].max_cmd_id = in ipr_alloc_cmd_blks()
9686 i * entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
9690 ioa_cfg->hrrq[i].min_cmd_id = 0; in ipr_alloc_cmd_blks()
9691 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1); in ipr_alloc_cmd_blks()
9693 ioa_cfg->hrrq[i].size = entries_each_hrrq; in ipr_alloc_cmd_blks()
9696 BUG_ON(ioa_cfg->hrrq_num == 0); in ipr_alloc_cmd_blks()
9698 i = IPR_NUM_CMD_BLKS - in ipr_alloc_cmd_blks()
9699 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1; in ipr_alloc_cmd_blks()
9701 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i; in ipr_alloc_cmd_blks()
9702 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i; in ipr_alloc_cmd_blks()
9706 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool, in ipr_alloc_cmd_blks()
9711 return -ENOMEM; in ipr_alloc_cmd_blks()
9714 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd; in ipr_alloc_cmd_blks()
9715 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; in ipr_alloc_cmd_blks()
9717 ioarcb = &ipr_cmd->ioarcb; in ipr_alloc_cmd_blks()
9718 ipr_cmd->dma_addr = dma_addr; in ipr_alloc_cmd_blks()
9719 if (ioa_cfg->sis64) in ipr_alloc_cmd_blks()
9720 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr); in ipr_alloc_cmd_blks()
9722 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr); in ipr_alloc_cmd_blks()
9724 ioarcb->host_response_handle = cpu_to_be32(i << 2); in ipr_alloc_cmd_blks()
9725 if (ioa_cfg->sis64) { in ipr_alloc_cmd_blks()
9726 ioarcb->u.sis64_addr_data.data_ioadl_addr = in ipr_alloc_cmd_blks()
9728 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = in ipr_alloc_cmd_blks()
9731 ioarcb->write_ioadl_addr = in ipr_alloc_cmd_blks()
9733 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; in ipr_alloc_cmd_blks()
9734 ioarcb->ioasa_host_pci_addr = in ipr_alloc_cmd_blks()
9737 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); in ipr_alloc_cmd_blks()
9738 ipr_cmd->cmd_index = i; in ipr_alloc_cmd_blks()
9739 ipr_cmd->ioa_cfg = ioa_cfg; in ipr_alloc_cmd_blks()
9740 ipr_cmd->sense_buffer_dma = dma_addr + in ipr_alloc_cmd_blks()
9743 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id; in ipr_alloc_cmd_blks()
9744 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id]; in ipr_alloc_cmd_blks()
9745 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_alloc_cmd_blks()
9746 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id) in ipr_alloc_cmd_blks()
9754 * ipr_alloc_mem - Allocate memory for an adapter
9758 * 0 on success / non-zero for error
9762 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_alloc_mem()
9763 int i, rc = -ENOMEM; in ipr_alloc_mem()
9766 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported, in ipr_alloc_mem()
9770 if (!ioa_cfg->res_entries) in ipr_alloc_mem()
9773 for (i = 0; i < ioa_cfg->max_devs_supported; i++) { in ipr_alloc_mem()
9774 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); in ipr_alloc_mem()
9775 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9778 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9780 &ioa_cfg->vpd_cbs_dma, in ipr_alloc_mem()
9783 if (!ioa_cfg->vpd_cbs) in ipr_alloc_mem()
9789 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_mem()
9790 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9791 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9792 &ioa_cfg->hrrq[i].host_rrq_dma, in ipr_alloc_mem()
9795 if (!ioa_cfg->hrrq[i].host_rrq) { in ipr_alloc_mem()
9796 while (--i >= 0) in ipr_alloc_mem()
9797 dma_free_coherent(&pdev->dev, in ipr_alloc_mem()
9798 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9799 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9800 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9803 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9806 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9807 ioa_cfg->cfg_table_size, in ipr_alloc_mem()
9808 &ioa_cfg->cfg_table_dma, in ipr_alloc_mem()
9811 if (!ioa_cfg->u.cfg_table) in ipr_alloc_mem()
9815 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev, in ipr_alloc_mem()
9817 &ioa_cfg->hostrcb_dma[i], in ipr_alloc_mem()
9820 if (!ioa_cfg->hostrcb[i]) in ipr_alloc_mem()
9823 ioa_cfg->hostrcb[i]->hostrcb_dma = in ipr_alloc_mem()
9824 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); in ipr_alloc_mem()
9825 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg; in ipr_alloc_mem()
9826 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); in ipr_alloc_mem()
9829 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES, in ipr_alloc_mem()
9833 if (!ioa_cfg->trace) in ipr_alloc_mem()
9842 while (i-- > 0) { in ipr_alloc_mem()
9843 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb), in ipr_alloc_mem()
9844 ioa_cfg->hostrcb[i], in ipr_alloc_mem()
9845 ioa_cfg->hostrcb_dma[i]); in ipr_alloc_mem()
9847 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size, in ipr_alloc_mem()
9848 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); in ipr_alloc_mem()
9850 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in ipr_alloc_mem()
9851 dma_free_coherent(&pdev->dev, in ipr_alloc_mem()
9852 sizeof(u32) * ioa_cfg->hrrq[i].size, in ipr_alloc_mem()
9853 ioa_cfg->hrrq[i].host_rrq, in ipr_alloc_mem()
9854 ioa_cfg->hrrq[i].host_rrq_dma); in ipr_alloc_mem()
9859 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs), in ipr_alloc_mem()
9860 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); in ipr_alloc_mem()
9862 kfree(ioa_cfg->res_entries); in ipr_alloc_mem()
9867 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9878 ioa_cfg->bus_attr[i].bus = i; in ipr_initialize_bus_attr()
9879 ioa_cfg->bus_attr[i].qas_enabled = 0; in ipr_initialize_bus_attr()
9880 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH; in ipr_initialize_bus_attr()
9882 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed]; in ipr_initialize_bus_attr()
9884 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE; in ipr_initialize_bus_attr()
9889 * ipr_init_regs - Initialize IOA registers
9901 p = &ioa_cfg->chip_cfg->regs; in ipr_init_regs()
9902 t = &ioa_cfg->regs; in ipr_init_regs()
9903 base = ioa_cfg->hdw_dma_regs; in ipr_init_regs()
9905 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; in ipr_init_regs()
9906 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; in ipr_init_regs()
9907 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; in ipr_init_regs()
9908 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; in ipr_init_regs()
9909 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; in ipr_init_regs()
9910 t->clr_interrupt_reg = base + p->clr_interrupt_reg; in ipr_init_regs()
9911 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; in ipr_init_regs()
9912 t->sense_interrupt_reg = base + p->sense_interrupt_reg; in ipr_init_regs()
9913 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; in ipr_init_regs()
9914 t->ioarrin_reg = base + p->ioarrin_reg; in ipr_init_regs()
9915 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; in ipr_init_regs()
9916 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; in ipr_init_regs()
9917 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; in ipr_init_regs()
9918 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; in ipr_init_regs()
9919 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; in ipr_init_regs()
9920 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; in ipr_init_regs()
9922 if (ioa_cfg->sis64) { in ipr_init_regs()
9923 t->init_feedback_reg = base + p->init_feedback_reg; in ipr_init_regs()
9924 t->dump_addr_reg = base + p->dump_addr_reg; in ipr_init_regs()
9925 t->dump_data_reg = base + p->dump_data_reg; in ipr_init_regs()
9926 t->endian_swap_reg = base + p->endian_swap_reg; in ipr_init_regs()
9931 * ipr_init_ioa_cfg - Initialize IOA config struct
9944 ioa_cfg->host = host; in ipr_init_ioa_cfg()
9945 ioa_cfg->pdev = pdev; in ipr_init_ioa_cfg()
9946 ioa_cfg->log_level = ipr_log_level; in ipr_init_ioa_cfg()
9947 ioa_cfg->doorbell = IPR_DOORBELL; in ipr_init_ioa_cfg()
9948 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); in ipr_init_ioa_cfg()
9949 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); in ipr_init_ioa_cfg()
9950 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); in ipr_init_ioa_cfg()
9951 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); in ipr_init_ioa_cfg()
9952 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); in ipr_init_ioa_cfg()
9953 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); in ipr_init_ioa_cfg()
9955 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); in ipr_init_ioa_cfg()
9956 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); in ipr_init_ioa_cfg()
9957 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q); in ipr_init_ioa_cfg()
9958 INIT_LIST_HEAD(&ioa_cfg->free_res_q); in ipr_init_ioa_cfg()
9959 INIT_LIST_HEAD(&ioa_cfg->used_res_q); in ipr_init_ioa_cfg()
9960 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); in ipr_init_ioa_cfg()
9961 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread); in ipr_init_ioa_cfg()
9962 init_waitqueue_head(&ioa_cfg->reset_wait_q); in ipr_init_ioa_cfg()
9963 init_waitqueue_head(&ioa_cfg->msi_wait_q); in ipr_init_ioa_cfg()
9964 init_waitqueue_head(&ioa_cfg->eeh_wait_q); in ipr_init_ioa_cfg()
9965 ioa_cfg->sdt_state = INACTIVE; in ipr_init_ioa_cfg()
9968 ioa_cfg->max_devs_supported = ipr_max_devs; in ipr_init_ioa_cfg()
9970 if (ioa_cfg->sis64) { in ipr_init_ioa_cfg()
9971 host->max_channel = IPR_MAX_SIS64_BUSES; in ipr_init_ioa_cfg()
9972 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS; in ipr_init_ioa_cfg()
9973 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; in ipr_init_ioa_cfg()
9975 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; in ipr_init_ioa_cfg()
9976 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) in ipr_init_ioa_cfg()
9978 * ioa_cfg->max_devs_supported))); in ipr_init_ioa_cfg()
9980 host->max_channel = IPR_VSET_BUS; in ipr_init_ioa_cfg()
9981 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; in ipr_init_ioa_cfg()
9982 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; in ipr_init_ioa_cfg()
9984 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; in ipr_init_ioa_cfg()
9985 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) in ipr_init_ioa_cfg()
9987 * ioa_cfg->max_devs_supported))); in ipr_init_ioa_cfg()
9990 host->unique_id = host->host_no; in ipr_init_ioa_cfg()
9991 host->max_cmd_len = IPR_MAX_CDB_LEN; in ipr_init_ioa_cfg()
9992 host->can_queue = ioa_cfg->max_cmds; in ipr_init_ioa_cfg()
9995 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { in ipr_init_ioa_cfg()
9996 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); in ipr_init_ioa_cfg()
9997 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); in ipr_init_ioa_cfg()
9998 spin_lock_init(&ioa_cfg->hrrq[i]._lock); in ipr_init_ioa_cfg()
10000 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; in ipr_init_ioa_cfg()
10002 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; in ipr_init_ioa_cfg()
10007 * ipr_get_chip_info - Find adapter chip information
10019 if (ipr_chip[i].vendor == dev_id->vendor && in ipr_get_chip_info()
10020 ipr_chip[i].device == dev_id->device) in ipr_get_chip_info()
10026 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10035 struct pci_dev *pdev = ioa_cfg->pdev; in ipr_wait_for_pci_err_recovery()
10038 wait_event_timeout(ioa_cfg->eeh_wait_q, in ipr_wait_for_pci_err_recovery()
10047 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1; in name_msi_vectors()
10049 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) { in name_msi_vectors()
10050 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n, in name_msi_vectors()
10051 "host%d-%d", ioa_cfg->host->host_no, vec_idx); in name_msi_vectors()
10052 ioa_cfg->vectors_info[vec_idx]. in name_msi_vectors()
10053 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0; in name_msi_vectors()
10062 for (i = 1; i < ioa_cfg->nvectors; i++) { in ipr_request_other_msi_irqs()
10066 ioa_cfg->vectors_info[i].desc, in ipr_request_other_msi_irqs()
10067 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
10069 while (--i > 0) in ipr_request_other_msi_irqs()
10071 &ioa_cfg->hrrq[i]); in ipr_request_other_msi_irqs()
10079 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10087 * 0 on success / non-zero on failure
10095 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq); in ipr_test_intr()
10096 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_intr()
10098 ioa_cfg->msi_received = 1; in ipr_test_intr()
10099 wake_up(&ioa_cfg->msi_wait_q); in ipr_test_intr()
10101 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_intr()
10106 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10115 * 0 on success / non-zero on failure
10125 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
10126 init_waitqueue_head(&ioa_cfg->msi_wait_q); in ipr_test_msi()
10127 ioa_cfg->msi_received = 0; in ipr_test_msi()
10129 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32); in ipr_test_msi()
10130 readl(ioa_cfg->regs.sense_interrupt_mask_reg); in ipr_test_msi()
10131 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
10135 dev_err(&pdev->dev, "Can not assign irq %d\n", irq); in ipr_test_msi()
10138 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq); in ipr_test_msi()
10140 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); in ipr_test_msi()
10141 readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_test_msi()
10142 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); in ipr_test_msi()
10143 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
10146 if (!ioa_cfg->msi_received) { in ipr_test_msi()
10148 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n"); in ipr_test_msi()
10149 rc = -EOPNOTSUPP; in ipr_test_msi()
10151 dev_info(&pdev->dev, "MSI test succeeded.\n"); in ipr_test_msi()
10153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_test_msi()
10162 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10167 * 0 on success / non-zero on failure
10183 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); in ipr_probe_ioa()
10187 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); in ipr_probe_ioa()
10188 rc = -ENOMEM; in ipr_probe_ioa()
10192 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; in ipr_probe_ioa()
10194 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops); in ipr_probe_ioa()
10196 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); in ipr_probe_ioa()
10198 if (!ioa_cfg->ipr_chip) { in ipr_probe_ioa()
10199 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n", in ipr_probe_ioa()
10200 dev_id->vendor, dev_id->device); in ipr_probe_ioa()
10205 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; in ipr_probe_ioa()
10206 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; in ipr_probe_ioa()
10207 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr; in ipr_probe_ioa()
10208 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds; in ipr_probe_ioa()
10211 ioa_cfg->transop_timeout = ipr_transop_timeout; in ipr_probe_ioa()
10212 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT) in ipr_probe_ioa()
10213 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT; in ipr_probe_ioa()
10215 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT; in ipr_probe_ioa()
10217 ioa_cfg->revid = pdev->revision; in ipr_probe_ioa()
10225 dev_err(&pdev->dev, in ipr_probe_ioa()
10239 dev_err(&pdev->dev, "Cannot enable adapter\n"); in ipr_probe_ioa()
10248 dev_err(&pdev->dev, in ipr_probe_ioa()
10250 rc = -ENOMEM; in ipr_probe_ioa()
10254 ioa_cfg->hdw_dma_regs = ipr_regs; in ipr_probe_ioa()
10255 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; in ipr_probe_ioa()
10256 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; in ipr_probe_ioa()
10260 if (ioa_cfg->sis64) { in ipr_probe_ioa()
10261 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in ipr_probe_ioa()
10263 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n"); in ipr_probe_ioa()
10264 rc = dma_set_mask_and_coherent(&pdev->dev, in ipr_probe_ioa()
10268 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in ipr_probe_ioa()
10271 dev_err(&pdev->dev, "Failed to set DMA mask\n"); in ipr_probe_ioa()
10276 ioa_cfg->chip_cfg->cache_line_size); in ipr_probe_ioa()
10279 dev_err(&pdev->dev, "Write of cache line size failed\n"); in ipr_probe_ioa()
10281 rc = -EIO; in ipr_probe_ioa()
10286 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); in ipr_probe_ioa()
10290 dev_err(&pdev->dev, "The max number of MSIX is %d\n", in ipr_probe_ioa()
10296 if (ioa_cfg->ipr_chip->has_msi) in ipr_probe_ioa()
10303 ioa_cfg->nvectors = rc; in ipr_probe_ioa()
10305 if (!pdev->msi_enabled && !pdev->msix_enabled) in ipr_probe_ioa()
10306 ioa_cfg->clear_isr = 1; in ipr_probe_ioa()
10314 rc = -EIO; in ipr_probe_ioa()
10319 if (pdev->msi_enabled || pdev->msix_enabled) { in ipr_probe_ioa()
10323 dev_info(&pdev->dev, in ipr_probe_ioa()
10324 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors, in ipr_probe_ioa()
10325 pdev->msix_enabled ? "-X" : ""); in ipr_probe_ioa()
10327 case -EOPNOTSUPP: in ipr_probe_ioa()
10331 ioa_cfg->nvectors = 1; in ipr_probe_ioa()
10332 ioa_cfg->clear_isr = 1; in ipr_probe_ioa()
10339 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors, in ipr_probe_ioa()
10351 dev_err(&pdev->dev, in ipr_probe_ioa()
10356 /* Save away PCI config space for use following IOA reset */ in ipr_probe_ioa()
10360 dev_err(&pdev->dev, "Failed to save PCI config space\n"); in ipr_probe_ioa()
10361 rc = -EIO; in ipr_probe_ioa()
10369 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); in ipr_probe_ioa()
10370 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32); in ipr_probe_ioa()
10371 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); in ipr_probe_ioa()
10373 ioa_cfg->needs_hard_reset = 1; in ipr_probe_ioa()
10375 ioa_cfg->needs_hard_reset = 1; in ipr_probe_ioa()
10377 ioa_cfg->ioa_unit_checked = 1; in ipr_probe_ioa()
10379 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_probe_ioa()
10381 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_probe_ioa()
10383 if (pdev->msi_enabled || pdev->msix_enabled) { in ipr_probe_ioa()
10386 ioa_cfg->vectors_info[0].desc, in ipr_probe_ioa()
10387 &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
10391 rc = request_irq(pdev->irq, ipr_isr, in ipr_probe_ioa()
10393 IPR_NAME, &ioa_cfg->hrrq[0]); in ipr_probe_ioa()
10396 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", in ipr_probe_ioa()
10397 pdev->irq, rc); in ipr_probe_ioa()
10401 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) || in ipr_probe_ioa()
10402 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) { in ipr_probe_ioa()
10403 ioa_cfg->needs_warm_reset = 1; in ipr_probe_ioa()
10404 ioa_cfg->reset = ipr_reset_slot_reset; in ipr_probe_ioa()
10406 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d", in ipr_probe_ioa()
10407 WQ_MEM_RECLAIM, host->host_no); in ipr_probe_ioa()
10409 if (!ioa_cfg->reset_work_q) { in ipr_probe_ioa()
10410 dev_err(&pdev->dev, "Couldn't register reset workqueue\n"); in ipr_probe_ioa()
10411 rc = -ENOMEM; in ipr_probe_ioa()
10415 ioa_cfg->reset = ipr_reset_start_bist; in ipr_probe_ioa()
10418 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); in ipr_probe_ioa()
10444 * ipr_initiate_ioa_bringdown - Bring down an adapter
10461 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) in ipr_initiate_ioa_bringdown()
10462 ioa_cfg->sdt_state = ABORT_DUMP; in ipr_initiate_ioa_bringdown()
10463 ioa_cfg->reset_retries = 0; in ipr_initiate_ioa_bringdown()
10464 ioa_cfg->in_ioa_bringdown = 1; in ipr_initiate_ioa_bringdown()
10470 * __ipr_remove - Remove a single adapter
10486 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10487 while (ioa_cfg->in_reset_reload) { in __ipr_remove()
10488 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10489 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in __ipr_remove()
10490 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10493 for (i = 0; i < ioa_cfg->hrrq_num; i++) { in __ipr_remove()
10494 spin_lock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
10495 ioa_cfg->hrrq[i].removing_ioa = 1; in __ipr_remove()
10496 spin_unlock(&ioa_cfg->hrrq[i]._lock); in __ipr_remove()
10501 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10502 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in __ipr_remove()
10503 flush_work(&ioa_cfg->work_q); in __ipr_remove()
10504 if (ioa_cfg->reset_work_q) in __ipr_remove()
10505 flush_workqueue(ioa_cfg->reset_work_q); in __ipr_remove()
10506 INIT_LIST_HEAD(&ioa_cfg->used_res_q); in __ipr_remove()
10507 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10510 list_del(&ioa_cfg->queue); in __ipr_remove()
10513 if (ioa_cfg->sdt_state == ABORT_DUMP) in __ipr_remove()
10514 ioa_cfg->sdt_state = WAIT_FOR_DUMP; in __ipr_remove()
10515 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); in __ipr_remove()
10523 * ipr_remove - IOA hot plug remove entry point
10537 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
10539 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
10541 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_remove()
10543 scsi_remove_host(ioa_cfg->host); in ipr_remove()
10551 * ipr_probe - Adapter hot plug add entry point
10556 * 0 on success / non-zero on failure
10577 rc = scsi_add_host(ioa_cfg->host, &pdev->dev); in ipr_probe()
10584 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10588 scsi_remove_host(ioa_cfg->host); in ipr_probe()
10593 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10597 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10599 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10601 scsi_remove_host(ioa_cfg->host); in ipr_probe()
10606 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10610 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10612 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, in ipr_probe()
10614 scsi_remove_host(ioa_cfg->host); in ipr_probe()
10618 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_probe()
10619 ioa_cfg->scan_enabled = 1; in ipr_probe()
10620 schedule_work(&ioa_cfg->work_q); in ipr_probe()
10621 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_probe()
10623 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; in ipr_probe()
10625 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_probe()
10626 for (i = 1; i < ioa_cfg->hrrq_num; i++) { in ipr_probe()
10627 irq_poll_init(&ioa_cfg->hrrq[i].iopoll, in ipr_probe()
10628 ioa_cfg->iopoll_weight, ipr_iopoll); in ipr_probe()
10632 scsi_scan_host(ioa_cfg->host); in ipr_probe()
10638 * ipr_shutdown - Shutdown handler.
10654 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10655 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { in ipr_shutdown()
10656 ioa_cfg->iopoll_weight = 0; in ipr_shutdown()
10657 for (i = 1; i < ioa_cfg->hrrq_num; i++) in ipr_shutdown()
10658 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll); in ipr_shutdown()
10661 while (ioa_cfg->in_reset_reload) { in ipr_shutdown()
10662 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10663 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_shutdown()
10664 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10667 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) in ipr_shutdown()
10671 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); in ipr_shutdown()
10672 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); in ipr_shutdown()
10673 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) { in ipr_shutdown()
10675 pci_disable_device(ioa_cfg->pdev); in ipr_shutdown()
10809 * ipr_halt_done - Shutdown prepare completion
10817 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); in ipr_halt_done()
10821 * ipr_halt - Issue shutdown prepare to all adapters
10841 spin_lock_irqsave(ioa_cfg->host->host_lock, flags); in ipr_halt()
10842 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || in ipr_halt()
10843 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) { in ipr_halt()
10844 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_halt()
10849 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); in ipr_halt()
10850 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; in ipr_halt()
10851 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; in ipr_halt()
10852 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; in ipr_halt()
10855 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); in ipr_halt()
10867 * ipr_init - Module entry point
10890 * ipr_exit - Module unload