• Home
  • Raw
  • Download

Lines Matching refs:hba

392 static struct status_msg *stex_get_status(struct st_hba *hba)  in stex_get_status()  argument
394 struct status_msg *status = hba->status_buffer + hba->status_tail; in stex_get_status()
396 ++hba->status_tail; in stex_get_status()
397 hba->status_tail %= hba->sts_count+1; in stex_get_status()
410 static struct req_msg *stex_alloc_req(struct st_hba *hba) in stex_alloc_req() argument
412 struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size; in stex_alloc_req()
414 ++hba->req_head; in stex_alloc_req()
415 hba->req_head %= hba->rq_count+1; in stex_alloc_req()
420 static struct req_msg *stex_ss_alloc_req(struct st_hba *hba) in stex_ss_alloc_req() argument
422 return (struct req_msg *)(hba->dma_mem + in stex_ss_alloc_req()
423 hba->req_head * hba->rq_size + sizeof(struct st_msg_header)); in stex_ss_alloc_req()
426 static int stex_map_sg(struct st_hba *hba, in stex_map_sg() argument
443 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize); in stex_map_sg()
458 static int stex_ss_map_sg(struct st_hba *hba, in stex_ss_map_sg() argument
475 dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize); in stex_ss_map_sg()
491 static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) in stex_controller_info() argument
496 p = hba->copy_buffer; in stex_controller_info()
499 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); in stex_controller_info()
507 p->bus = hba->pdev->bus->number; in stex_controller_info()
508 p->slot = hba->pdev->devfn; in stex_controller_info()
510 p->irq_vec = hba->pdev->irq; in stex_controller_info()
511 p->id = hba->pdev->vendor << 16 | hba->pdev->device; in stex_controller_info()
513 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; in stex_controller_info()
519 stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) in stex_send_cmd() argument
523 hba->ccb[tag].req = req; in stex_send_cmd()
524 hba->out_req_cnt++; in stex_send_cmd()
526 writel(hba->req_head, hba->mmio_base + IMR0); in stex_send_cmd()
527 writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL); in stex_send_cmd()
528 readl(hba->mmio_base + IDBL); /* flush */ in stex_send_cmd()
532 stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) in stex_ss_send_cmd() argument
540 hba->ccb[tag].req = req; in stex_ss_send_cmd()
541 hba->out_req_cnt++; in stex_ss_send_cmd()
543 cmd = hba->ccb[tag].cmd; in stex_ss_send_cmd()
549 addr = hba->dma_handle + hba->req_head * hba->rq_size; in stex_ss_send_cmd()
550 addr += (hba->ccb[tag].sg_count+4)/11; in stex_ss_send_cmd()
553 ++hba->req_head; in stex_ss_send_cmd()
554 hba->req_head %= hba->rq_count+1; in stex_ss_send_cmd()
555 if (hba->cardtype == st_P3) { in stex_ss_send_cmd()
556 writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); in stex_ss_send_cmd()
557 writel(addr, hba->mmio_base + YH2I_REQ); in stex_ss_send_cmd()
559 writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); in stex_ss_send_cmd()
560 readl(hba->mmio_base + YH2I_REQ_HI); /* flush */ in stex_ss_send_cmd()
561 writel(addr, hba->mmio_base + YH2I_REQ); in stex_ss_send_cmd()
562 readl(hba->mmio_base + YH2I_REQ); /* flush */ in stex_ss_send_cmd()
566 static void return_abnormal_state(struct st_hba *hba, int status) in return_abnormal_state() argument
572 spin_lock_irqsave(hba->host->host_lock, flags); in return_abnormal_state()
573 for (tag = 0; tag < hba->host->can_queue; tag++) { in return_abnormal_state()
574 ccb = &hba->ccb[tag]; in return_abnormal_state()
585 spin_unlock_irqrestore(hba->host->host_lock, flags); in return_abnormal_state()
600 struct st_hba *hba; in stex_queuecommand_lck() local
609 hba = (struct st_hba *) &host->hostdata[0]; in stex_queuecommand_lck()
610 if (hba->mu_status == MU_STATE_NOCONNECT) { in stex_queuecommand_lck()
615 if (unlikely(hba->mu_status != MU_STATE_STARTED)) in stex_queuecommand_lck()
641 if (hba->cardtype == st_shasta || id == host->max_id - 1) { in stex_queuecommand_lck()
679 .host_no = hba->host->host_no, in stex_queuecommand_lck()
703 req = hba->alloc_rq(hba); in stex_queuecommand_lck()
718 hba->ccb[tag].cmd = cmd; in stex_queuecommand_lck()
719 hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE; in stex_queuecommand_lck()
720 hba->ccb[tag].sense_buffer = cmd->sense_buffer; in stex_queuecommand_lck()
722 if (!hba->map_sg(hba, req, &hba->ccb[tag])) { in stex_queuecommand_lck()
723 hba->ccb[tag].sg_count = 0; in stex_queuecommand_lck()
727 hba->send(hba, req, tag); in stex_queuecommand_lck()
790 static void stex_check_cmd(struct st_hba *hba, in stex_check_cmd() argument
799 static void stex_mu_intr(struct st_hba *hba, u32 doorbell) in stex_mu_intr() argument
801 void __iomem *base = hba->mmio_base; in stex_mu_intr()
811 hba->status_head = readl(base + OMR1); in stex_mu_intr()
812 if (unlikely(hba->status_head > hba->sts_count)) { in stex_mu_intr()
814 pci_name(hba->pdev)); in stex_mu_intr()
826 if (unlikely(hba->out_req_cnt <= 0 || in stex_mu_intr()
827 (hba->mu_status == MU_STATE_RESETTING && in stex_mu_intr()
828 hba->cardtype != st_yosemite))) { in stex_mu_intr()
829 hba->status_tail = hba->status_head; in stex_mu_intr()
833 while (hba->status_tail != hba->status_head) { in stex_mu_intr()
834 resp = stex_get_status(hba); in stex_mu_intr()
836 if (unlikely(tag >= hba->host->can_queue)) { in stex_mu_intr()
838 "(%s): invalid tag\n", pci_name(hba->pdev)); in stex_mu_intr()
842 hba->out_req_cnt--; in stex_mu_intr()
843 ccb = &hba->ccb[tag]; in stex_mu_intr()
844 if (unlikely(hba->wait_ccb == ccb)) in stex_mu_intr()
845 hba->wait_ccb = NULL; in stex_mu_intr()
848 "(%s): lagging req\n", pci_name(hba->pdev)); in stex_mu_intr()
856 pci_name(hba->pdev)); in stex_mu_intr()
868 if (hba->cardtype == st_yosemite) in stex_mu_intr()
869 stex_check_cmd(hba, ccb, resp); in stex_mu_intr()
873 stex_controller_info(hba, ccb); in stex_mu_intr()
882 writel(hba->status_head, base + IMR1); in stex_mu_intr()
888 struct st_hba *hba = __hba; in stex_intr() local
889 void __iomem *base = hba->mmio_base; in stex_intr()
893 spin_lock_irqsave(hba->host->host_lock, flags); in stex_intr()
901 stex_mu_intr(hba, data); in stex_intr()
902 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_intr()
904 hba->cardtype == st_shasta)) in stex_intr()
905 queue_work(hba->work_q, &hba->reset_work); in stex_intr()
909 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_intr()
914 static void stex_ss_mu_intr(struct st_hba *hba) in stex_ss_mu_intr() argument
924 if (unlikely(hba->out_req_cnt <= 0 || in stex_ss_mu_intr()
925 hba->mu_status == MU_STATE_RESETTING)) in stex_ss_mu_intr()
928 while (count < hba->sts_count) { in stex_ss_mu_intr()
929 scratch = hba->scratch + hba->status_tail; in stex_ss_mu_intr()
934 resp = hba->status_buffer + hba->status_tail; in stex_ss_mu_intr()
937 ++hba->status_tail; in stex_ss_mu_intr()
938 hba->status_tail %= hba->sts_count+1; in stex_ss_mu_intr()
941 if (unlikely(tag >= hba->host->can_queue)) { in stex_ss_mu_intr()
943 "(%s): invalid tag\n", pci_name(hba->pdev)); in stex_ss_mu_intr()
947 hba->out_req_cnt--; in stex_ss_mu_intr()
948 ccb = &hba->ccb[tag]; in stex_ss_mu_intr()
949 if (unlikely(hba->wait_ccb == ccb)) in stex_ss_mu_intr()
950 hba->wait_ccb = NULL; in stex_ss_mu_intr()
953 "(%s): lagging req\n", pci_name(hba->pdev)); in stex_ss_mu_intr()
969 pci_name(hba->pdev)); in stex_ss_mu_intr()
976 stex_check_cmd(hba, ccb, resp); in stex_ss_mu_intr()
989 struct st_hba *hba = __hba; in stex_ss_intr() local
990 void __iomem *base = hba->mmio_base; in stex_ss_intr()
994 spin_lock_irqsave(hba->host->host_lock, flags); in stex_ss_intr()
996 if (hba->cardtype == st_yel) { in stex_ss_intr()
1001 stex_ss_mu_intr(hba); in stex_ss_intr()
1002 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_ss_intr()
1004 queue_work(hba->work_q, &hba->reset_work); in stex_ss_intr()
1015 stex_ss_mu_intr(hba); in stex_ss_intr()
1016 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_ss_intr()
1018 queue_work(hba->work_q, &hba->reset_work); in stex_ss_intr()
1023 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_ss_intr()
1028 static int stex_common_handshake(struct st_hba *hba) in stex_common_handshake() argument
1030 void __iomem *base = hba->mmio_base; in stex_common_handshake()
1044 pci_name(hba->pdev)); in stex_common_handshake()
1057 if (hba->host->can_queue > data) { in stex_common_handshake()
1058 hba->host->can_queue = data; in stex_common_handshake()
1059 hba->host->cmd_per_lun = data; in stex_common_handshake()
1063 h = (struct handshake_frame *)hba->status_buffer; in stex_common_handshake()
1064 h->rb_phy = cpu_to_le64(hba->dma_handle); in stex_common_handshake()
1065 h->req_sz = cpu_to_le16(hba->rq_size); in stex_common_handshake()
1066 h->req_cnt = cpu_to_le16(hba->rq_count+1); in stex_common_handshake()
1068 h->status_cnt = cpu_to_le16(hba->sts_count+1); in stex_common_handshake()
1071 if (hba->extra_offset) { in stex_common_handshake()
1072 h->extra_offset = cpu_to_le32(hba->extra_offset); in stex_common_handshake()
1073 h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset); in stex_common_handshake()
1077 status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size; in stex_common_handshake()
1094 pci_name(hba->pdev)); in stex_common_handshake()
1112 static int stex_ss_handshake(struct st_hba *hba) in stex_ss_handshake() argument
1114 void __iomem *base = hba->mmio_base; in stex_ss_handshake()
1124 if (hba->cardtype == st_yel) { in stex_ss_handshake()
1130 pci_name(hba->pdev)); in stex_ss_handshake()
1142 pci_name(hba->pdev)); in stex_ss_handshake()
1150 msg_h = (struct st_msg_header *)hba->dma_mem; in stex_ss_handshake()
1151 msg_h->handle = cpu_to_le64(hba->dma_handle); in stex_ss_handshake()
1155 h->rb_phy = cpu_to_le64(hba->dma_handle); in stex_ss_handshake()
1156 h->req_sz = cpu_to_le16(hba->rq_size); in stex_ss_handshake()
1157 h->req_cnt = cpu_to_le16(hba->rq_count+1); in stex_ss_handshake()
1159 h->status_cnt = cpu_to_le16(hba->sts_count+1); in stex_ss_handshake()
1163 scratch_size = (hba->sts_count+1)*sizeof(u32); in stex_ss_handshake()
1166 if (hba->cardtype == st_yel) { in stex_ss_handshake()
1170 writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); in stex_ss_handshake()
1172 writel(hba->dma_handle, base + YH2I_REQ); in stex_ss_handshake()
1179 if (hba->msi_lock == 0) { in stex_ss_handshake()
1182 hba->msi_lock = 1; in stex_ss_handshake()
1184 writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); in stex_ss_handshake()
1185 writel(hba->dma_handle, base + YH2I_REQ); in stex_ss_handshake()
1189 scratch = hba->scratch; in stex_ss_handshake()
1190 if (hba->cardtype == st_yel) { in stex_ss_handshake()
1195 pci_name(hba->pdev)); in stex_ss_handshake()
1208 pci_name(hba->pdev)); in stex_ss_handshake()
1223 static int stex_handshake(struct st_hba *hba) in stex_handshake() argument
1229 if (hba->cardtype == st_yel || hba->cardtype == st_P3) in stex_handshake()
1230 err = stex_ss_handshake(hba); in stex_handshake()
1232 err = stex_common_handshake(hba); in stex_handshake()
1233 spin_lock_irqsave(hba->host->host_lock, flags); in stex_handshake()
1234 mu_status = hba->mu_status; in stex_handshake()
1236 hba->req_head = 0; in stex_handshake()
1237 hba->req_tail = 0; in stex_handshake()
1238 hba->status_head = 0; in stex_handshake()
1239 hba->status_tail = 0; in stex_handshake()
1240 hba->out_req_cnt = 0; in stex_handshake()
1241 hba->mu_status = MU_STATE_STARTED; in stex_handshake()
1243 hba->mu_status = MU_STATE_FAILED; in stex_handshake()
1245 wake_up_all(&hba->reset_waitq); in stex_handshake()
1246 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_handshake()
1253 struct st_hba *hba = (struct st_hba *)host->hostdata; in stex_abort() local
1262 base = hba->mmio_base; in stex_abort()
1265 hba->ccb[tag].req && hba->ccb[tag].cmd == cmd) in stex_abort()
1266 hba->wait_ccb = &hba->ccb[tag]; in stex_abort()
1270 if (hba->cardtype == st_yel) { in stex_abort()
1276 stex_ss_mu_intr(hba); in stex_abort()
1277 } else if (hba->cardtype == st_P3) { in stex_abort()
1285 stex_ss_mu_intr(hba); in stex_abort()
1293 stex_mu_intr(hba, data); in stex_abort()
1295 if (hba->wait_ccb == NULL) { in stex_abort()
1297 "(%s): lost interrupt\n", pci_name(hba->pdev)); in stex_abort()
1303 hba->wait_ccb->req = NULL; /* nullify the req's future return */ in stex_abort()
1304 hba->wait_ccb = NULL; in stex_abort()
1311 static void stex_hard_reset(struct st_hba *hba) in stex_hard_reset() argument
1319 pci_read_config_dword(hba->pdev, i * 4, in stex_hard_reset()
1320 &hba->pdev->saved_config_space[i]); in stex_hard_reset()
1324 bus = hba->pdev->bus; in stex_hard_reset()
1338 pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd); in stex_hard_reset()
1346 pci_write_config_dword(hba->pdev, i * 4, in stex_hard_reset()
1347 hba->pdev->saved_config_space[i]); in stex_hard_reset()
1350 static int stex_yos_reset(struct st_hba *hba) in stex_yos_reset() argument
1356 base = hba->mmio_base; in stex_yos_reset()
1360 while (hba->out_req_cnt > 0) { in stex_yos_reset()
1363 "(%s): reset timeout\n", pci_name(hba->pdev)); in stex_yos_reset()
1370 spin_lock_irqsave(hba->host->host_lock, flags); in stex_yos_reset()
1372 hba->mu_status = MU_STATE_FAILED; in stex_yos_reset()
1374 hba->mu_status = MU_STATE_STARTED; in stex_yos_reset()
1375 wake_up_all(&hba->reset_waitq); in stex_yos_reset()
1376 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_yos_reset()
1381 static void stex_ss_reset(struct st_hba *hba) in stex_ss_reset() argument
1383 writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); in stex_ss_reset()
1384 readl(hba->mmio_base + YH2I_INT); in stex_ss_reset()
1388 static void stex_p3_reset(struct st_hba *hba) in stex_p3_reset() argument
1390 writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); in stex_p3_reset()
1394 static int stex_do_reset(struct st_hba *hba) in stex_do_reset() argument
1399 spin_lock_irqsave(hba->host->host_lock, flags); in stex_do_reset()
1400 if (hba->mu_status == MU_STATE_STARTING) { in stex_do_reset()
1401 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_do_reset()
1403 pci_name(hba->pdev)); in stex_do_reset()
1406 while (hba->mu_status == MU_STATE_RESETTING) { in stex_do_reset()
1407 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_do_reset()
1408 wait_event_timeout(hba->reset_waitq, in stex_do_reset()
1409 hba->mu_status != MU_STATE_RESETTING, in stex_do_reset()
1411 spin_lock_irqsave(hba->host->host_lock, flags); in stex_do_reset()
1412 mu_status = hba->mu_status; in stex_do_reset()
1416 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_do_reset()
1420 hba->mu_status = MU_STATE_RESETTING; in stex_do_reset()
1421 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_do_reset()
1423 if (hba->cardtype == st_yosemite) in stex_do_reset()
1424 return stex_yos_reset(hba); in stex_do_reset()
1426 if (hba->cardtype == st_shasta) in stex_do_reset()
1427 stex_hard_reset(hba); in stex_do_reset()
1428 else if (hba->cardtype == st_yel) in stex_do_reset()
1429 stex_ss_reset(hba); in stex_do_reset()
1430 else if (hba->cardtype == st_P3) in stex_do_reset()
1431 stex_p3_reset(hba); in stex_do_reset()
1433 return_abnormal_state(hba, DID_RESET); in stex_do_reset()
1435 if (stex_handshake(hba) == 0) in stex_do_reset()
1439 pci_name(hba->pdev)); in stex_do_reset()
1445 struct st_hba *hba; in stex_reset() local
1447 hba = (struct st_hba *) &cmd->device->host->hostdata[0]; in stex_reset()
1452 return stex_do_reset(hba) ? FAILED : SUCCESS; in stex_reset()
1457 struct st_hba *hba = container_of(work, struct st_hba, reset_work); in stex_reset_work() local
1459 stex_do_reset(hba); in stex_reset_work()
1620 static int stex_request_irq(struct st_hba *hba) in stex_request_irq() argument
1622 struct pci_dev *pdev = hba->pdev; in stex_request_irq()
1625 if (msi || hba->cardtype == st_P3) { in stex_request_irq()
1632 hba->msi_enabled = 1; in stex_request_irq()
1634 hba->msi_enabled = 0; in stex_request_irq()
1637 (hba->cardtype == st_yel || hba->cardtype == st_P3) ? in stex_request_irq()
1638 stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba); in stex_request_irq()
1641 if (hba->msi_enabled) in stex_request_irq()
1647 static void stex_free_irq(struct st_hba *hba) in stex_free_irq() argument
1649 struct pci_dev *pdev = hba->pdev; in stex_free_irq()
1651 free_irq(pdev->irq, hba); in stex_free_irq()
1652 if (hba->msi_enabled) in stex_free_irq()
1658 struct st_hba *hba; in stex_probe() local
1682 hba = (struct st_hba *)host->hostdata; in stex_probe()
1683 memset(hba, 0, sizeof(struct st_hba)); in stex_probe()
1692 hba->mmio_base = pci_ioremap_bar(pdev, 0); in stex_probe()
1693 if ( !hba->mmio_base) { in stex_probe()
1709 hba->cardtype = (unsigned int) id->driver_data; in stex_probe()
1710 ci = &stex_card_info[hba->cardtype]; in stex_probe()
1726 if (hba->cardtype == st_yel || hba->cardtype == st_P3) in stex_probe()
1727 hba->supports_pm = 1; in stex_probe()
1731 if (hba->cardtype == st_yel || hba->cardtype == st_P3) in stex_probe()
1734 hba->dma_size = cp_offset + sizeof(struct st_frame); in stex_probe()
1735 if (hba->cardtype == st_seq || in stex_probe()
1736 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) { in stex_probe()
1737 hba->extra_offset = hba->dma_size; in stex_probe()
1738 hba->dma_size += ST_ADDITIONAL_MEM; in stex_probe()
1740 hba->dma_mem = dma_alloc_coherent(&pdev->dev, in stex_probe()
1741 hba->dma_size, &hba->dma_handle, GFP_KERNEL); in stex_probe()
1742 if (!hba->dma_mem) { in stex_probe()
1744 if (hba->cardtype == st_seq || in stex_probe()
1745 (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) { in stex_probe()
1749 hba->dma_size = hba->extra_offset in stex_probe()
1751 hba->dma_mem = dma_alloc_coherent(&pdev->dev, in stex_probe()
1752 hba->dma_size, &hba->dma_handle, GFP_KERNEL); in stex_probe()
1755 if (!hba->dma_mem) { in stex_probe()
1763 hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL); in stex_probe()
1764 if (!hba->ccb) { in stex_probe()
1771 if (hba->cardtype == st_yel || hba->cardtype == st_P3) in stex_probe()
1772 hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset); in stex_probe()
1773 hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset); in stex_probe()
1774 hba->copy_buffer = hba->dma_mem + cp_offset; in stex_probe()
1775 hba->rq_count = ci->rq_count; in stex_probe()
1776 hba->rq_size = ci->rq_size; in stex_probe()
1777 hba->sts_count = ci->sts_count; in stex_probe()
1778 hba->alloc_rq = ci->alloc_rq; in stex_probe()
1779 hba->map_sg = ci->map_sg; in stex_probe()
1780 hba->send = ci->send; in stex_probe()
1781 hba->mu_status = MU_STATE_STARTING; in stex_probe()
1782 hba->msi_lock = 0; in stex_probe()
1784 if (hba->cardtype == st_yel || hba->cardtype == st_P3) in stex_probe()
1796 hba->host = host; in stex_probe()
1797 hba->pdev = pdev; in stex_probe()
1798 init_waitqueue_head(&hba->reset_waitq); in stex_probe()
1800 snprintf(hba->work_q_name, sizeof(hba->work_q_name), in stex_probe()
1802 hba->work_q = create_singlethread_workqueue(hba->work_q_name); in stex_probe()
1803 if (!hba->work_q) { in stex_probe()
1809 INIT_WORK(&hba->reset_work, stex_reset_work); in stex_probe()
1811 err = stex_request_irq(hba); in stex_probe()
1818 err = stex_handshake(hba); in stex_probe()
1822 pci_set_drvdata(pdev, hba); in stex_probe()
1836 stex_free_irq(hba); in stex_probe()
1838 destroy_workqueue(hba->work_q); in stex_probe()
1840 kfree(hba->ccb); in stex_probe()
1842 dma_free_coherent(&pdev->dev, hba->dma_size, in stex_probe()
1843 hba->dma_mem, hba->dma_handle); in stex_probe()
1845 iounmap(hba->mmio_base); in stex_probe()
1856 static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic) in stex_hba_stop() argument
1864 spin_lock_irqsave(hba->host->host_lock, flags); in stex_hba_stop()
1866 if ((hba->cardtype == st_yel || hba->cardtype == st_P3) && in stex_hba_stop()
1867 hba->supports_pm == 1) { in stex_hba_stop()
1869 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_hba_stop()
1873 req = hba->alloc_rq(hba); in stex_hba_stop()
1874 if (hba->cardtype == st_yel || hba->cardtype == st_P3) { in stex_hba_stop()
1876 memset(msg_h, 0, hba->rq_size); in stex_hba_stop()
1878 memset(req, 0, hba->rq_size); in stex_hba_stop()
1880 if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel in stex_hba_stop()
1881 || hba->cardtype == st_P3) in stex_hba_stop()
1887 } else if ((hba->cardtype == st_yel || hba->cardtype == st_P3) in stex_hba_stop()
1899 hba->ccb[tag].cmd = NULL; in stex_hba_stop()
1900 hba->ccb[tag].sg_count = 0; in stex_hba_stop()
1901 hba->ccb[tag].sense_bufflen = 0; in stex_hba_stop()
1902 hba->ccb[tag].sense_buffer = NULL; in stex_hba_stop()
1903 hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE; in stex_hba_stop()
1904 hba->send(hba, req, tag); in stex_hba_stop()
1905 spin_unlock_irqrestore(hba->host->host_lock, flags); in stex_hba_stop()
1907 while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) { in stex_hba_stop()
1909 hba->ccb[tag].req_type = 0; in stex_hba_stop()
1910 hba->mu_status = MU_STATE_STOP; in stex_hba_stop()
1915 hba->mu_status = MU_STATE_STOP; in stex_hba_stop()
1918 static void stex_hba_free(struct st_hba *hba) in stex_hba_free() argument
1920 stex_free_irq(hba); in stex_hba_free()
1922 destroy_workqueue(hba->work_q); in stex_hba_free()
1924 iounmap(hba->mmio_base); in stex_hba_free()
1926 pci_release_regions(hba->pdev); in stex_hba_free()
1928 kfree(hba->ccb); in stex_hba_free()
1930 dma_free_coherent(&hba->pdev->dev, hba->dma_size, in stex_hba_free()
1931 hba->dma_mem, hba->dma_handle); in stex_hba_free()
1936 struct st_hba *hba = pci_get_drvdata(pdev); in stex_remove() local
1938 hba->mu_status = MU_STATE_NOCONNECT; in stex_remove()
1939 return_abnormal_state(hba, DID_NO_CONNECT); in stex_remove()
1940 scsi_remove_host(hba->host); in stex_remove()
1942 scsi_block_requests(hba->host); in stex_remove()
1944 stex_hba_free(hba); in stex_remove()
1946 scsi_host_put(hba->host); in stex_remove()
1955 struct st_hba *hba = pci_get_drvdata(pdev); in stex_shutdown() local
1957 if (hba->supports_pm == 0) { in stex_shutdown()
1958 stex_hba_stop(hba, ST_IGNORED); in stex_shutdown()
1959 } else if (hba->supports_pm == 1 && S6flag) { in stex_shutdown()
1961 stex_hba_stop(hba, ST_S6); in stex_shutdown()
1963 stex_hba_stop(hba, ST_S5); in stex_shutdown()
1966 static int stex_choice_sleep_mic(struct st_hba *hba, pm_message_t state) in stex_choice_sleep_mic() argument
1972 hba->msi_lock = 0; in stex_choice_sleep_mic()
1981 struct st_hba *hba = pci_get_drvdata(pdev); in stex_suspend() local
1983 if ((hba->cardtype == st_yel || hba->cardtype == st_P3) in stex_suspend()
1984 && hba->supports_pm == 1) in stex_suspend()
1985 stex_hba_stop(hba, stex_choice_sleep_mic(hba, state)); in stex_suspend()
1987 stex_hba_stop(hba, ST_IGNORED); in stex_suspend()
1993 struct st_hba *hba = pci_get_drvdata(pdev); in stex_resume() local
1995 hba->mu_status = MU_STATE_STARTING; in stex_resume()
1996 stex_handshake(hba); in stex_resume()