• Home
  • Raw
  • Download

Lines Matching +full:xs +full:- +full:phy

6  * Copyright (C) 2012-2014  LSI Corporation
7 * Copyright (C) 2013-2014 Avago Technologies
8 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
57 #include <linux/dma-mapping.h>
75 static int max_queue_depth = -1;
79 static int max_sgl_entries = -1;
83 static int msix_disable = -1;
91 static int max_msix_vectors = -1;
96 static int irqpoll_weight = -1;
103 " enable detection of firmware fault and halt firmware - (default=0)");
105 static int perf_mode = -1;
109 "0 - balanced: high iops mode is enabled &\n\t\t"
111 "1 - iops: high iops mode is disabled &\n\t\t"
113 "2 - latency: high iops mode is disabled &\n\t\t"
115 "\t\tdefault - default perf_mode is 'balanced'"
127 MPT_PERF_MODE_DEFAULT = -1,
145 * mpt3sas_base_check_cmd_timeout - Function
173 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
192 ioc->fwfault_debug = mpt3sas_fwfault_debug; in _scsih_set_fwfault_debug()
200 * _base_readl_aero - retry readl for max three times.
240 * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
256 u16 cmd_credit = ioc->facts.RequestCredit + 1; in _base_clone_reply_to_sys_mem()
257 void __iomem *reply_free_iomem = (void __iomem *)ioc->chip + in _base_clone_reply_to_sys_mem()
259 (cmd_credit * ioc->request_sz) + (index * sizeof(u32)); in _base_clone_reply_to_sys_mem()
265 * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
284 * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
302 * _base_get_chain - Calculates and Returns virtual chain address
316 u16 cmd_credit = ioc->facts.RequestCredit + 1; in _base_get_chain()
318 base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET + in _base_get_chain()
319 (cmd_credit * ioc->request_sz) + in _base_get_chain()
321 chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth * in _base_get_chain()
322 ioc->request_sz) + (sge_chain_count * ioc->request_sz); in _base_get_chain()
327 * _base_get_chain_phys - Calculates and Returns physical address
342 u16 cmd_credit = ioc->facts.RequestCredit + 1; in _base_get_chain_phys()
344 base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET + in _base_get_chain_phys()
345 (cmd_credit * ioc->request_sz) + in _base_get_chain_phys()
347 chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth * in _base_get_chain_phys()
348 ioc->request_sz) + (sge_chain_count * ioc->request_sz); in _base_get_chain_phys()
353 * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
366 u16 cmd_credit = ioc->facts.RequestCredit + 1; in _base_get_buffer_bar0()
370 ioc->facts.MaxChainDepth); in _base_get_buffer_bar0()
375 * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
387 u16 cmd_credit = ioc->facts.RequestCredit + 1; in _base_get_buffer_phys_bar0()
390 ioc->facts.MaxChainDepth); in _base_get_buffer_phys_bar0()
395 * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
412 for (index = 0; index < ioc->scsiio_depth; index++) { in _base_get_chain_buffer_dma_to_chain_buffer()
413 for (j = 0; j < ioc->chains_needed_per_io; j++) { in _base_get_chain_buffer_dma_to_chain_buffer()
414 ct = &ioc->chain_lookup[index].chains_per_smid[j]; in _base_get_chain_buffer_dma_to_chain_buffer()
415 if (ct && ct->chain_buffer_dma == chain_buffer_dma) in _base_get_chain_buffer_dma_to_chain_buffer()
416 return ct->chain_buffer; in _base_get_chain_buffer_dma_to_chain_buffer()
424 * _clone_sg_entries - MPI EP's scsiio and config requests
454 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) { in _clone_sg_entries()
457 sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL; in _clone_sg_entries()
459 } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) { in _clone_sg_entries()
462 sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE; in _clone_sg_entries()
468 * address associated with sgel->Address. in _clone_sg_entries()
484 * 0 - 255 System register in _clone_sg_entries()
485 * 256 - 4352 MPI Frame. (This is based on maxCredit 32) in _clone_sg_entries()
486 * 4352 - 4864 Reply_free pool (512 byte is reserved in _clone_sg_entries()
490 * 4864 - 17152 SGE chain element. (32cmd * 3 chain of in _clone_sg_entries()
492 * 17152 - x Host buffer mapped with smid. in _clone_sg_entries()
505 if (le32_to_cpu(sgel->FlagsLength) & in _clone_sg_entries()
509 for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) { in _clone_sg_entries()
512 (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT); in _clone_sg_entries()
519 * the virtual address for sgel->Address in _clone_sg_entries()
523 le32_to_cpu(sgel->Address)); in _clone_sg_entries()
538 sgel->Address = in _clone_sg_entries()
548 (le32_to_cpu(sgel->FlagsLength) & in _clone_sg_entries()
554 sgel->Address = in _clone_sg_entries()
558 ioc->config_vaddr, in _clone_sg_entries()
559 (le32_to_cpu(sgel->FlagsLength) & in _clone_sg_entries()
561 sgel->Address = in _clone_sg_entries()
565 buff_ptr += (le32_to_cpu(sgel->FlagsLength) & in _clone_sg_entries()
567 buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) & in _clone_sg_entries()
569 if ((le32_to_cpu(sgel->FlagsLength) & in _clone_sg_entries()
596 src_chain_addr[i], ioc->request_sz); in _clone_sg_entries()
601 * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
606 * -1 for other case.
614 return -1; in mpt3sas_remove_dead_ioc_func()
616 pdev = ioc->pdev; in mpt3sas_remove_dead_ioc_func()
618 return -1; in mpt3sas_remove_dead_ioc_func()
624 * _base_sync_drv_fw_timestamp - Sync Drive-Fw TimeStamp.
638 mutex_lock(&ioc->scsih_cmds.mutex); in _base_sync_drv_fw_timestamp()
639 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { in _base_sync_drv_fw_timestamp()
643 ioc->scsih_cmds.status = MPT3_CMD_PENDING; in _base_sync_drv_fw_timestamp()
644 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); in _base_sync_drv_fw_timestamp()
647 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; in _base_sync_drv_fw_timestamp()
651 ioc->scsih_cmds.smid = smid; in _base_sync_drv_fw_timestamp()
653 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; in _base_sync_drv_fw_timestamp()
654 mpi_request->Operation = MPI26_CTRL_OP_SET_IOC_PARAMETER; in _base_sync_drv_fw_timestamp()
655 mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP; in _base_sync_drv_fw_timestamp()
658 mpi_request->Reserved7 = cpu_to_le32(TimeStamp >> 32); in _base_sync_drv_fw_timestamp()
659 mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF); in _base_sync_drv_fw_timestamp()
660 init_completion(&ioc->scsih_cmds.done); in _base_sync_drv_fw_timestamp()
661 ioc->put_smid_default(ioc, smid); in _base_sync_drv_fw_timestamp()
665 wait_for_completion_timeout(&ioc->scsih_cmds.done, in _base_sync_drv_fw_timestamp()
667 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { in _base_sync_drv_fw_timestamp()
669 ioc->scsih_cmds.status, mpi_request, in _base_sync_drv_fw_timestamp()
673 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { in _base_sync_drv_fw_timestamp()
674 mpi_reply = ioc->scsih_cmds.reply; in _base_sync_drv_fw_timestamp()
677 le16_to_cpu(mpi_reply->IOCStatus), in _base_sync_drv_fw_timestamp()
678 le32_to_cpu(mpi_reply->IOCLogInfo))); in _base_sync_drv_fw_timestamp()
683 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; in _base_sync_drv_fw_timestamp()
685 mutex_unlock(&ioc->scsih_cmds.mutex); in _base_sync_drv_fw_timestamp()
689 * _base_fault_reset_work - workq handling ioc fault conditions
705 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); in _base_fault_reset_work()
706 if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) || in _base_fault_reset_work()
707 ioc->pci_error_recovery) in _base_fault_reset_work()
709 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); in _base_fault_reset_work()
713 ioc_err(ioc, "SAS host is non-operational !!!!\n"); in _base_fault_reset_work()
717 * by considering controller is in a non-operational state. So in _base_fault_reset_work()
720 * controller to non-operational state and remove the dead ioc in _base_fault_reset_work()
723 if (ioc->non_operational_loop++ < 5) { in _base_fault_reset_work()
724 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, in _base_fault_reset_work()
737 ioc->schedule_dead_ioc_flush_running_cmds(ioc); in _base_fault_reset_work()
742 ioc->remove_host = 1; in _base_fault_reset_work()
745 "%s_dead_ioc_%d", ioc->driver_name, ioc->id); in _base_fault_reset_work()
756 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ? in _base_fault_reset_work()
757 ioc->manu_pg11.CoreDumpTOSec : in _base_fault_reset_work()
762 if (ioc->ioc_coredump_loop == 0) { in _base_fault_reset_work()
767 &ioc->ioc_reset_in_progress_lock, flags); in _base_fault_reset_work()
768 ioc->shost_recovery = 1; in _base_fault_reset_work()
770 &ioc->ioc_reset_in_progress_lock, flags); in _base_fault_reset_work()
777 __func__, ioc->ioc_coredump_loop); in _base_fault_reset_work()
780 if (ioc->ioc_coredump_loop++ < timeout) { in _base_fault_reset_work()
782 &ioc->ioc_reset_in_progress_lock, flags); in _base_fault_reset_work()
787 if (ioc->ioc_coredump_loop) { in _base_fault_reset_work()
790 __func__, ioc->ioc_coredump_loop); in _base_fault_reset_work()
793 __func__, ioc->ioc_coredump_loop); in _base_fault_reset_work()
794 ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE; in _base_fault_reset_work()
796 ioc->non_operational_loop = 0; in _base_fault_reset_work()
813 ioc->ioc_coredump_loop = 0; in _base_fault_reset_work()
814 if (ioc->time_sync_interval && in _base_fault_reset_work()
815 ++ioc->timestamp_update_count >= ioc->time_sync_interval) { in _base_fault_reset_work()
816 ioc->timestamp_update_count = 0; in _base_fault_reset_work()
819 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); in _base_fault_reset_work()
821 if (ioc->fault_reset_work_q) in _base_fault_reset_work()
822 queue_delayed_work(ioc->fault_reset_work_q, in _base_fault_reset_work()
823 &ioc->fault_reset_work, in _base_fault_reset_work()
825 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); in _base_fault_reset_work()
829 * mpt3sas_base_start_watchdog - start the fault_reset_work_q
839 if (ioc->fault_reset_work_q) in mpt3sas_base_start_watchdog()
842 ioc->timestamp_update_count = 0; in mpt3sas_base_start_watchdog()
845 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); in mpt3sas_base_start_watchdog()
846 snprintf(ioc->fault_reset_work_q_name, in mpt3sas_base_start_watchdog()
847 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status", in mpt3sas_base_start_watchdog()
848 ioc->driver_name, ioc->id); in mpt3sas_base_start_watchdog()
849 ioc->fault_reset_work_q = in mpt3sas_base_start_watchdog()
850 create_singlethread_workqueue(ioc->fault_reset_work_q_name); in mpt3sas_base_start_watchdog()
851 if (!ioc->fault_reset_work_q) { in mpt3sas_base_start_watchdog()
855 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); in mpt3sas_base_start_watchdog()
856 if (ioc->fault_reset_work_q) in mpt3sas_base_start_watchdog()
857 queue_delayed_work(ioc->fault_reset_work_q, in mpt3sas_base_start_watchdog()
858 &ioc->fault_reset_work, in mpt3sas_base_start_watchdog()
860 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); in mpt3sas_base_start_watchdog()
864 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
875 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); in mpt3sas_base_stop_watchdog()
876 wq = ioc->fault_reset_work_q; in mpt3sas_base_stop_watchdog()
877 ioc->fault_reset_work_q = NULL; in mpt3sas_base_stop_watchdog()
878 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); in mpt3sas_base_stop_watchdog()
880 if (!cancel_delayed_work_sync(&ioc->fault_reset_work)) in mpt3sas_base_stop_watchdog()
887 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
898 * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state
911 * mpt3sas_base_wait_for_coredump_completion - Wait until coredump
916 * Return: 0 for success, non-zero for failure.
922 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ? in mpt3sas_base_wait_for_coredump_completion()
923 ioc->manu_pg11.CoreDumpTOSec : in mpt3sas_base_wait_for_coredump_completion()
942 * mpt3sas_halt_firmware - halt's mpt controller firmware
955 if (!ioc->fwfault_debug) in mpt3sas_halt_firmware()
960 doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); in mpt3sas_halt_firmware()
969 writel(0xC0FFEE00, &ioc->chip->Doorbell); in mpt3sas_halt_firmware()
973 if (ioc->fwfault_debug == 2) in mpt3sas_halt_firmware()
981 * _base_sas_ioc_info - verbose translation of the ioc status
990 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & in _base_sas_ioc_info()
997 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || in _base_sas_ioc_info()
998 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || in _base_sas_ioc_info()
999 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION) in _base_sas_ioc_info()
1009 if (request_hdr->Function == MPI2_FUNCTION_CONFIG) { in _base_sas_ioc_info()
1012 if ((rqst->ExtPageType == in _base_sas_ioc_info()
1014 !(ioc->logging_level & MPT_DEBUG_CONFIG)) { in _base_sas_ioc_info()
1098 * For use by SCSI Initiator and SCSI Target end-to-end data protection in _base_sas_ioc_info()
1171 switch (request_hdr->Function) { in _base_sas_ioc_info()
1173 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size; in _base_sas_ioc_info()
1197 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size; in _base_sas_ioc_info()
1202 ioc->sge_size; in _base_sas_ioc_info()
1218 * _base_display_event_data - verbose translation of firmware asyn events
1229 if (!(ioc->logging_level & MPT_DEBUG_EVENTS)) in _base_display_event_data()
1232 event = le16_to_cpu(mpi_reply->Event); in _base_display_event_data()
1251 if (!ioc->hide_ir_msg) in _base_display_event_data()
1257 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData; in _base_display_event_data()
1259 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ? in _base_display_event_data()
1261 if (event_data->DiscoveryStatus) in _base_display_event_data()
1263 le32_to_cpu(event_data->DiscoveryStatus)); in _base_display_event_data()
1283 if (!ioc->hide_ir_msg) in _base_display_event_data()
1287 if (!ioc->hide_ir_msg) in _base_display_event_data()
1291 if (!ioc->hide_ir_msg) in _base_display_event_data()
1295 if (!ioc->hide_ir_msg) in _base_display_event_data()
1313 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData; in _base_display_event_data()
1315 event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ? in _base_display_event_data()
1317 if (event_data->EnumerationStatus) in _base_display_event_data()
1319 le32_to_cpu(event_data->EnumerationStatus)); in _base_display_event_data()
1335 * _base_sas_log_info - verbose translation of firmware log info
1363 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info == in _base_sas_log_info()
1375 if (!ioc->hide_ir_msg) in _base_sas_log_info()
1388 * _base_display_reply_info - handle reply descriptors depending on IOC Status
1408 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); in _base_display_reply_info()
1411 (ioc->logging_level & MPT_DEBUG_REPLY)) { in _base_display_reply_info()
1417 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo); in _base_display_reply_info()
1428 * mpt3sas_base_done - base internal command completion routine
1445 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK) in mpt3sas_base_done()
1448 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED) in mpt3sas_base_done()
1451 ioc->base_cmds.status |= MPT3_CMD_COMPLETE; in mpt3sas_base_done()
1453 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID; in mpt3sas_base_done()
1454 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); in mpt3sas_base_done()
1456 ioc->base_cmds.status &= ~MPT3_CMD_PENDING; in mpt3sas_base_done()
1458 complete(&ioc->base_cmds.done); in mpt3sas_base_done()
1463 * _base_async_event - main callback handler for firmware asyn events
1483 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) in _base_async_event()
1488 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED)) in _base_async_event()
1490 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); in _base_async_event()
1496 INIT_LIST_HEAD(&delayed_event_ack->list); in _base_async_event()
1497 delayed_event_ack->Event = mpi_reply->Event; in _base_async_event()
1498 delayed_event_ack->EventContext = mpi_reply->EventContext; in _base_async_event()
1499 list_add_tail(&delayed_event_ack->list, in _base_async_event()
1500 &ioc->delayed_event_ack_list); in _base_async_event()
1503 le16_to_cpu(mpi_reply->Event))); in _base_async_event()
1509 ack_request->Function = MPI2_FUNCTION_EVENT_ACK; in _base_async_event()
1510 ack_request->Event = mpi_reply->Event; in _base_async_event()
1511 ack_request->EventContext = mpi_reply->EventContext; in _base_async_event()
1512 ack_request->VF_ID = 0; /* TODO */ in _base_async_event()
1513 ack_request->VP_ID = 0; in _base_async_event()
1514 ioc->put_smid_default(ioc, smid); in _base_async_event()
1533 WARN_ON(smid >= ioc->hi_priority_smid)) in _get_st_from_smid()
1544 * _base_get_cb_idx - obtain the callback index
1554 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1; in _base_get_cb_idx()
1557 if (smid < ioc->hi_priority_smid) { in _base_get_cb_idx()
1563 cb_idx = st->cb_idx; in _base_get_cb_idx()
1565 cb_idx = ioc->ctl_cb_idx; in _base_get_cb_idx()
1566 } else if (smid < ioc->internal_smid) { in _base_get_cb_idx()
1567 i = smid - ioc->hi_priority_smid; in _base_get_cb_idx()
1568 cb_idx = ioc->hpr_lookup[i].cb_idx; in _base_get_cb_idx()
1569 } else if (smid <= ioc->hba_queue_depth) { in _base_get_cb_idx()
1570 i = smid - ioc->internal_smid; in _base_get_cb_idx()
1571 cb_idx = ioc->internal_lookup[i].cb_idx; in _base_get_cb_idx()
1577 * mpt3sas_base_pause_mq_polling - pause polling on the mq poll queues
1591 ioc->reply_queue_count - ioc->iopoll_q_start_index; in mpt3sas_base_pause_mq_polling()
1595 atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1); in mpt3sas_base_pause_mq_polling()
1601 while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) { in mpt3sas_base_pause_mq_polling()
1609 * mpt3sas_base_resume_mq_polling - Resume polling on mq poll queues.
1618 ioc->reply_queue_count - ioc->iopoll_q_start_index; in mpt3sas_base_resume_mq_polling()
1622 atomic_set(&ioc->io_uring_poll_queues[qid].pause, 0); in mpt3sas_base_resume_mq_polling()
1626 * mpt3sas_base_mask_interrupts - disable interrupts
1636 ioc->mask_interrupts = 1; in mpt3sas_base_mask_interrupts()
1637 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask); in mpt3sas_base_mask_interrupts()
1639 writel(him_register, &ioc->chip->HostInterruptMask); in mpt3sas_base_mask_interrupts()
1640 ioc->base_readl(&ioc->chip->HostInterruptMask); in mpt3sas_base_mask_interrupts()
1644 * mpt3sas_base_unmask_interrupts - enable interrupts
1654 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask); in mpt3sas_base_unmask_interrupts()
1656 writel(him_register, &ioc->chip->HostInterruptMask); in mpt3sas_base_unmask_interrupts()
1657 ioc->mask_interrupts = 0; in mpt3sas_base_unmask_interrupts()
1679 * _base_process_reply_queue - Process reply descriptors from reply
1695 u8 msix_index = reply_q->msix_index; in _base_process_reply_queue()
1696 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; in _base_process_reply_queue()
1701 if (!atomic_add_unless(&reply_q->busy, 1, 1)) in _base_process_reply_queue()
1704 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index]; in _base_process_reply_queue()
1705 request_descript_type = rpf->Default.ReplyFlags in _base_process_reply_queue()
1708 atomic_dec(&reply_q->busy); in _base_process_reply_queue()
1714 rd.word = le64_to_cpu(rpf->Words); in _base_process_reply_queue()
1718 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); in _base_process_reply_queue()
1736 rpf->AddressReply.ReplyFrameAddress); in _base_process_reply_queue()
1737 if (reply > ioc->reply_dma_max_address || in _base_process_reply_queue()
1738 reply < ioc->reply_dma_min_address) in _base_process_reply_queue()
1759 ioc->reply_free_host_index = in _base_process_reply_queue()
1760 (ioc->reply_free_host_index == in _base_process_reply_queue()
1761 (ioc->reply_free_queue_depth - 1)) ? in _base_process_reply_queue()
1762 0 : ioc->reply_free_host_index + 1; in _base_process_reply_queue()
1763 ioc->reply_free[ioc->reply_free_host_index] = in _base_process_reply_queue()
1765 if (ioc->is_mcpu_endpoint) in _base_process_reply_queue()
1768 ioc->reply_free_host_index); in _base_process_reply_queue()
1769 writel(ioc->reply_free_host_index, in _base_process_reply_queue()
1770 &ioc->chip->ReplyFreeHostIndex); in _base_process_reply_queue()
1774 rpf->Words = cpu_to_le64(ULLONG_MAX); in _base_process_reply_queue()
1775 reply_q->reply_post_host_index = in _base_process_reply_queue()
1776 (reply_q->reply_post_host_index == in _base_process_reply_queue()
1777 (ioc->reply_post_queue_depth - 1)) ? 0 : in _base_process_reply_queue()
1778 reply_q->reply_post_host_index + 1; in _base_process_reply_queue()
1780 reply_q->reply_post_free[reply_q->reply_post_host_index]. in _base_process_reply_queue()
1788 if (completed_cmds >= ioc->thresh_hold) { in _base_process_reply_queue()
1789 if (ioc->combined_reply_queue) { in _base_process_reply_queue()
1790 writel(reply_q->reply_post_host_index | in _base_process_reply_queue()
1793 ioc->replyPostRegisterIndex[msix_index/8]); in _base_process_reply_queue()
1795 writel(reply_q->reply_post_host_index | in _base_process_reply_queue()
1798 &ioc->chip->ReplyPostHostIndex); in _base_process_reply_queue()
1800 if (!reply_q->is_iouring_poll_q && in _base_process_reply_queue()
1801 !reply_q->irq_poll_scheduled) { in _base_process_reply_queue()
1802 reply_q->irq_poll_scheduled = true; in _base_process_reply_queue()
1803 irq_poll_sched(&reply_q->irqpoll); in _base_process_reply_queue()
1805 atomic_dec(&reply_q->busy); in _base_process_reply_queue()
1810 if (!reply_q->reply_post_host_index) in _base_process_reply_queue()
1811 rpf = reply_q->reply_post_free; in _base_process_reply_queue()
1819 atomic_dec(&reply_q->busy); in _base_process_reply_queue()
1823 if (ioc->is_warpdrive) { in _base_process_reply_queue()
1824 writel(reply_q->reply_post_host_index, in _base_process_reply_queue()
1825 ioc->reply_post_host_index[msix_index]); in _base_process_reply_queue()
1826 atomic_dec(&reply_q->busy); in _base_process_reply_queue()
1839 * Host Index Register supports 8 MSI-X vectors. in _base_process_reply_queue()
1845 if (ioc->combined_reply_queue) in _base_process_reply_queue()
1846 writel(reply_q->reply_post_host_index | ((msix_index & 7) << in _base_process_reply_queue()
1848 ioc->replyPostRegisterIndex[msix_index/8]); in _base_process_reply_queue()
1850 writel(reply_q->reply_post_host_index | (msix_index << in _base_process_reply_queue()
1852 &ioc->chip->ReplyPostHostIndex); in _base_process_reply_queue()
1853 atomic_dec(&reply_q->busy); in _base_process_reply_queue()
1858 * mpt3sas_blk_mq_poll - poll the blk mq poll queue
1867 (struct MPT3SAS_ADAPTER *)shost->hostdata; in mpt3sas_blk_mq_poll()
1870 int qid = queue_num - ioc->iopoll_q_start_index; in mpt3sas_blk_mq_poll()
1872 if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) || in mpt3sas_blk_mq_poll()
1873 !atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1)) in mpt3sas_blk_mq_poll()
1876 reply_q = ioc->io_uring_poll_queues[qid].reply_q; in mpt3sas_blk_mq_poll()
1879 atomic_dec(&ioc->io_uring_poll_queues[qid].busy); in mpt3sas_blk_mq_poll()
1885 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
1895 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; in _base_interrupt()
1897 if (ioc->mask_interrupts) in _base_interrupt()
1899 if (reply_q->irq_poll_scheduled) in _base_interrupt()
1906 * _base_irqpoll - IRQ poll callback handler
1920 if (reply_q->irq_line_enable) { in _base_irqpoll()
1921 disable_irq_nosync(reply_q->os_irq); in _base_irqpoll()
1922 reply_q->irq_line_enable = false; in _base_irqpoll()
1927 reply_q->irq_poll_scheduled = false; in _base_irqpoll()
1928 reply_q->irq_line_enable = true; in _base_irqpoll()
1929 enable_irq(reply_q->os_irq); in _base_irqpoll()
1943 * _base_init_irqpolls - initliaze IRQ polls
1953 if (list_empty(&ioc->reply_queue_list)) in _base_init_irqpolls()
1956 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { in _base_init_irqpolls()
1957 if (reply_q->is_iouring_poll_q) in _base_init_irqpolls()
1959 irq_poll_init(&reply_q->irqpoll, in _base_init_irqpolls()
1960 ioc->hba_queue_depth/4, _base_irqpoll); in _base_init_irqpolls()
1961 reply_q->irq_poll_scheduled = false; in _base_init_irqpolls()
1962 reply_q->irq_line_enable = true; in _base_init_irqpolls()
1963 reply_q->os_irq = pci_irq_vector(ioc->pdev, in _base_init_irqpolls()
1964 reply_q->msix_index); in _base_init_irqpolls()
1969 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1977 return (ioc->facts.IOCCapabilities & in _base_is_controller_msix_enabled()
1978 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; in _base_is_controller_msix_enabled()
1982 * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
1985 * timed-out SCSI command got delayed
1986 * Context: non-ISR context
1996 * then multi-queues are not enabled in mpt3sas_base_sync_reply_irqs()
2001 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { in mpt3sas_base_sync_reply_irqs()
2002 if (ioc->shost_recovery || ioc->remove_host || in mpt3sas_base_sync_reply_irqs()
2003 ioc->pci_error_recovery) in mpt3sas_base_sync_reply_irqs()
2006 if (reply_q->msix_index == 0) in mpt3sas_base_sync_reply_irqs()
2009 if (reply_q->is_iouring_poll_q) { in mpt3sas_base_sync_reply_irqs()
2014 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); in mpt3sas_base_sync_reply_irqs()
2015 if (reply_q->irq_poll_scheduled) { in mpt3sas_base_sync_reply_irqs()
2019 irq_poll_disable(&reply_q->irqpoll); in mpt3sas_base_sync_reply_irqs()
2020 irq_poll_enable(&reply_q->irqpoll); in mpt3sas_base_sync_reply_irqs()
2024 if (reply_q->irq_poll_scheduled) { in mpt3sas_base_sync_reply_irqs()
2025 reply_q->irq_poll_scheduled = false; in mpt3sas_base_sync_reply_irqs()
2026 reply_q->irq_line_enable = true; in mpt3sas_base_sync_reply_irqs()
2027 enable_irq(reply_q->os_irq); in mpt3sas_base_sync_reply_irqs()
2037 * mpt3sas_base_release_callback_handler - clear interrupt callback handler
2047 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
2057 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--) in mpt3sas_base_register_callback_handler()
2066 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
2079 * _base_build_zero_len_sge - build zero length sg entry
2094 ioc->base_add_sg_single(paddr, flags_length, -1); in _base_build_zero_len_sge()
2098 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
2110 sgel->FlagsLength = cpu_to_le32(flags_length); in _base_add_sg_single_32()
2111 sgel->Address = cpu_to_le32(dma_addr); in _base_add_sg_single_32()
2116 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
2128 sgel->FlagsLength = cpu_to_le32(flags_length); in _base_add_sg_single_64()
2129 sgel->Address = cpu_to_le64(dma_addr); in _base_add_sg_single_64()
2133 * _base_get_chain_buffer_tracker - obtain chain tracker
2146 u16 smid = st->smid; in _base_get_chain_buffer_tracker()
2148 atomic_read(&ioc->chain_lookup[smid - 1].chain_offset); in _base_get_chain_buffer_tracker()
2150 if (chain_offset == ioc->chains_needed_per_io) in _base_get_chain_buffer_tracker()
2153 chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset]; in _base_get_chain_buffer_tracker()
2154 atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset); in _base_get_chain_buffer_tracker()
2160 * _base_build_sg - build generic sg
2185 ioc->base_add_sg_single(psge, sgl_flags | in _base_build_sg()
2189 psge += ioc->sge_size; in _base_build_sg()
2196 ioc->base_add_sg_single(psge, sgl_flags | in _base_build_sg()
2203 ioc->base_add_sg_single(psge, sgl_flags | in _base_build_sg()
2210 ioc->base_add_sg_single(psge, sgl_flags | in _base_build_sg()
2218 * _base_build_nvme_prp - This function is called for NVMe end devices to build
2242 * non-contiguous SGL into a PRP in this case. All PRPs will describe
2254 * Each 64-bit PRP entry comprises an address and an offset field. The address
2257 * first element in a PRP list may contain a non-zero offset, implying that all
2262 * by the list begins at a non-zero offset within the first 4KB page, then the
2263 * first PRP element will contain a non-zero offset indicating where the region
2286 (void *)nvme_encap_request->NVMe_Command; in _base_build_nvme_prp()
2294 prp1_entry = &nvme_cmd->prp1; in _base_build_nvme_prp()
2295 prp2_entry = &nvme_cmd->prp2; in _base_build_nvme_prp()
2308 page_mask = ioc->page_size - 1; in _base_build_nvme_prp()
2335 * page boundary - prp_size (8 bytes). in _base_build_nvme_prp()
2342 * - bump the current memory pointer to the next in _base_build_nvme_prp()
2344 * - set the PRP Entry to point to that page. This in _base_build_nvme_prp()
2346 * - bump the PRP Entry pointer the start of the in _base_build_nvme_prp()
2348 * contiguous, no need to get a new page - it's in _base_build_nvme_prp()
2358 entry_len = ioc->page_size - offset; in _base_build_nvme_prp()
2378 if (length > ioc->page_size) { in _base_build_nvme_prp()
2422 length -= entry_len; in _base_build_nvme_prp()
2427 * base_make_prp_nvme - Prepare PRPs (Physical Region Page) -
2431 * @scmd: SCSI command from the mid-layer
2455 nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE); in base_make_prp_nvme()
2468 page_mask = nvme_pg_size - 1; in base_make_prp_nvme()
2480 main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL; in base_make_prp_nvme()
2497 main_chain_element->Address = cpu_to_le64(msg_dma); in base_make_prp_nvme()
2498 main_chain_element->NextChainOffset = 0; in base_make_prp_nvme()
2499 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | in base_make_prp_nvme()
2504 ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL; in base_make_prp_nvme()
2510 first_prp_len = nvme_pg_size - offset; in base_make_prp_nvme()
2512 ptr_first_sgl->Address = cpu_to_le64(sge_addr); in base_make_prp_nvme()
2513 ptr_first_sgl->Length = cpu_to_le32(first_prp_len); in base_make_prp_nvme()
2515 data_len -= first_prp_len; in base_make_prp_nvme()
2519 sge_len -= first_prp_len; in base_make_prp_nvme()
2547 sge_len -= nvme_pg_size; in base_make_prp_nvme()
2548 data_len -= nvme_pg_size; in base_make_prp_nvme()
2561 main_chain_element->Length = in base_make_prp_nvme()
2575 (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) { in base_is_prp_possible()
2590 * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
2636 * _base_add_sg_single_ieee - add sg element for IEEE format
2649 sgel->Flags = flags; in _base_add_sg_single_ieee()
2650 sgel->NextChainOffset = chain_offset; in _base_add_sg_single_ieee()
2651 sgel->Length = cpu_to_le32(length); in _base_add_sg_single_ieee()
2652 sgel->Address = cpu_to_le64(dma_addr); in _base_add_sg_single_ieee()
2656 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
2671 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); in _base_build_zero_len_sge_ieee()
2675 * _base_build_sg_scmd - main sg creation routine
2710 if (scmd->sc_data_direction == DMA_TO_DEVICE) in _base_build_sg_scmd()
2722 return -ENOMEM; in _base_build_sg_scmd()
2724 sg_local = &mpi_request->SGL; in _base_build_sg_scmd()
2725 sges_in_segment = ioc->max_sges_in_main_message; in _base_build_sg_scmd()
2729 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) + in _base_build_sg_scmd()
2730 (sges_in_segment * ioc->sge_size))/4; in _base_build_sg_scmd()
2735 ioc->base_add_sg_single(sg_local, in _base_build_sg_scmd()
2739 ioc->base_add_sg_single(sg_local, sgl_flags | in _base_build_sg_scmd()
2742 sg_local += ioc->sge_size; in _base_build_sg_scmd()
2743 sges_left--; in _base_build_sg_scmd()
2744 sges_in_segment--; in _base_build_sg_scmd()
2751 return -1; in _base_build_sg_scmd()
2752 chain = chain_req->chain_buffer; in _base_build_sg_scmd()
2753 chain_dma = chain_req->chain_buffer_dma; in _base_build_sg_scmd()
2756 ioc->max_sges_in_chain_message) ? sges_left : in _base_build_sg_scmd()
2757 ioc->max_sges_in_chain_message; in _base_build_sg_scmd()
2759 0 : (sges_in_segment * ioc->sge_size)/4; in _base_build_sg_scmd()
2760 chain_length = sges_in_segment * ioc->sge_size; in _base_build_sg_scmd()
2764 chain_length += ioc->sge_size; in _base_build_sg_scmd()
2766 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset | in _base_build_sg_scmd()
2775 ioc->base_add_sg_single(sg_local, in _base_build_sg_scmd()
2780 ioc->base_add_sg_single(sg_local, sgl_flags | in _base_build_sg_scmd()
2784 sg_local += ioc->sge_size; in _base_build_sg_scmd()
2785 sges_left--; in _base_build_sg_scmd()
2786 sges_in_segment--; in _base_build_sg_scmd()
2791 return -1; in _base_build_sg_scmd()
2792 chain = chain_req->chain_buffer; in _base_build_sg_scmd()
2793 chain_dma = chain_req->chain_buffer_dma; in _base_build_sg_scmd()
2802 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer | in _base_build_sg_scmd()
2805 ioc->base_add_sg_single(sg_local, sgl_flags | in _base_build_sg_scmd()
2808 sg_local += ioc->sge_size; in _base_build_sg_scmd()
2809 sges_left--; in _base_build_sg_scmd()
2816 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
2866 return -ENOMEM; in _base_build_sg_scmd_ieee()
2868 sg_local = &mpi_request->SGL; in _base_build_sg_scmd_ieee()
2869 sges_in_segment = (ioc->request_sz - in _base_build_sg_scmd_ieee()
2870 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee; in _base_build_sg_scmd_ieee()
2874 mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) + in _base_build_sg_scmd_ieee()
2875 (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee); in _base_build_sg_scmd_ieee()
2882 sg_local += ioc->sge_size_ieee; in _base_build_sg_scmd_ieee()
2883 sges_left--; in _base_build_sg_scmd_ieee()
2884 sges_in_segment--; in _base_build_sg_scmd_ieee()
2890 return -1; in _base_build_sg_scmd_ieee()
2891 chain = chain_req->chain_buffer; in _base_build_sg_scmd_ieee()
2892 chain_dma = chain_req->chain_buffer_dma; in _base_build_sg_scmd_ieee()
2895 ioc->max_sges_in_chain_message) ? sges_left : in _base_build_sg_scmd_ieee()
2896 ioc->max_sges_in_chain_message; in _base_build_sg_scmd_ieee()
2899 chain_length = sges_in_segment * ioc->sge_size_ieee; in _base_build_sg_scmd_ieee()
2901 chain_length += ioc->sge_size_ieee; in _base_build_sg_scmd_ieee()
2914 sg_local += ioc->sge_size_ieee; in _base_build_sg_scmd_ieee()
2915 sges_left--; in _base_build_sg_scmd_ieee()
2916 sges_in_segment--; in _base_build_sg_scmd_ieee()
2921 return -1; in _base_build_sg_scmd_ieee()
2922 chain = chain_req->chain_buffer; in _base_build_sg_scmd_ieee()
2923 chain_dma = chain_req->chain_buffer_dma; in _base_build_sg_scmd_ieee()
2939 sg_local += ioc->sge_size_ieee; in _base_build_sg_scmd_ieee()
2940 sges_left--; in _base_build_sg_scmd_ieee()
2947 * _base_build_sg_ieee - build generic sg for IEEE format
2975 psge += ioc->sge_size_ieee; in _base_build_sg_ieee()
2996 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
2999 * _base_config_dma_addressing - set dma addressing
3003 * Return: 0 for success, non-zero for failure.
3011 if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4) { in _base_config_dma_addressing()
3012 ioc->dma_mask = 32; in _base_config_dma_addressing()
3015 } else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) { in _base_config_dma_addressing()
3016 ioc->dma_mask = 63; in _base_config_dma_addressing()
3019 ioc->dma_mask = 64; in _base_config_dma_addressing()
3023 if (ioc->use_32bit_dma) in _base_config_dma_addressing()
3026 if (dma_set_mask(&pdev->dev, dma_mask) || in _base_config_dma_addressing()
3027 dma_set_coherent_mask(&pdev->dev, coherent_dma_mask)) in _base_config_dma_addressing()
3028 return -ENODEV; in _base_config_dma_addressing()
3030 if (ioc->dma_mask > 32) { in _base_config_dma_addressing()
3031 ioc->base_add_sg_single = &_base_add_sg_single_64; in _base_config_dma_addressing()
3032 ioc->sge_size = sizeof(Mpi2SGESimple64_t); in _base_config_dma_addressing()
3034 ioc->base_add_sg_single = &_base_add_sg_single_32; in _base_config_dma_addressing()
3035 ioc->sge_size = sizeof(Mpi2SGESimple32_t); in _base_config_dma_addressing()
3040 ioc->dma_mask, convert_to_kb(s.totalram)); in _base_config_dma_addressing()
3046 * _base_check_enable_msix - checks MSIX capabable.
3059 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX in _base_check_enable_msix()
3061 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 && in _base_check_enable_msix()
3062 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) { in _base_check_enable_msix()
3063 return -EINVAL; in _base_check_enable_msix()
3066 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); in _base_check_enable_msix()
3069 return -EINVAL; in _base_check_enable_msix()
3074 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 || in _base_check_enable_msix()
3075 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 || in _base_check_enable_msix()
3076 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 || in _base_check_enable_msix()
3077 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 || in _base_check_enable_msix()
3078 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 || in _base_check_enable_msix()
3079 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 || in _base_check_enable_msix()
3080 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2) in _base_check_enable_msix()
3081 ioc->msix_vector_count = 1; in _base_check_enable_msix()
3083 pci_read_config_word(ioc->pdev, base + 2, &message_control); in _base_check_enable_msix()
3084 ioc->msix_vector_count = (message_control & 0x3FF) + 1; in _base_check_enable_msix()
3087 ioc->msix_vector_count)); in _base_check_enable_msix()
3092 * mpt3sas_base_free_irq - free irq
3103 if (list_empty(&ioc->reply_queue_list)) in mpt3sas_base_free_irq()
3106 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { in mpt3sas_base_free_irq()
3107 list_del(&reply_q->list); in mpt3sas_base_free_irq()
3108 if (reply_q->is_iouring_poll_q) { in mpt3sas_base_free_irq()
3113 if (ioc->smp_affinity_enable) { in mpt3sas_base_free_irq()
3114 irq = pci_irq_vector(ioc->pdev, reply_q->msix_index); in mpt3sas_base_free_irq()
3117 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index), in mpt3sas_base_free_irq()
3124 * _base_request_irq - request irq
3133 struct pci_dev *pdev = ioc->pdev; in _base_request_irq()
3141 return -ENOMEM; in _base_request_irq()
3143 reply_q->ioc = ioc; in _base_request_irq()
3144 reply_q->msix_index = index; in _base_request_irq()
3146 atomic_set(&reply_q->busy, 0); in _base_request_irq()
3148 if (index >= ioc->iopoll_q_start_index) { in _base_request_irq()
3149 qid = index - ioc->iopoll_q_start_index; in _base_request_irq()
3150 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d", in _base_request_irq()
3151 ioc->driver_name, ioc->id, qid); in _base_request_irq()
3152 reply_q->is_iouring_poll_q = 1; in _base_request_irq()
3153 ioc->io_uring_poll_queues[qid].reply_q = reply_q; in _base_request_irq()
3158 if (ioc->msix_enable) in _base_request_irq()
3159 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", in _base_request_irq()
3160 ioc->driver_name, ioc->id, index); in _base_request_irq()
3162 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", in _base_request_irq()
3163 ioc->driver_name, ioc->id); in _base_request_irq()
3165 IRQF_SHARED, reply_q->name, reply_q); in _base_request_irq()
3168 reply_q->name, pci_irq_vector(pdev, index)); in _base_request_irq()
3170 return -EBUSY; in _base_request_irq()
3173 INIT_LIST_HEAD(&reply_q->list); in _base_request_irq()
3174 list_add_tail(&reply_q->list, &ioc->reply_queue_list); in _base_request_irq()
3179 * _base_assign_reply_queues - assigning msix index for each cpu
3189 int iopoll_q_count = ioc->reply_queue_count - in _base_assign_reply_queues()
3190 ioc->iopoll_q_start_index; in _base_assign_reply_queues()
3196 if (ioc->msix_load_balance) in _base_assign_reply_queues()
3199 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); in _base_assign_reply_queues()
3202 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count, in _base_assign_reply_queues()
3203 ioc->facts.MaxMSIxVectors); in _base_assign_reply_queues()
3207 if (ioc->smp_affinity_enable) { in _base_assign_reply_queues()
3213 if (ioc->high_iops_queues) { in _base_assign_reply_queues()
3214 mask = cpumask_of_node(dev_to_node(&ioc->pdev->dev)); in _base_assign_reply_queues()
3215 for (index = 0; index < ioc->high_iops_queues; in _base_assign_reply_queues()
3217 irq = pci_irq_vector(ioc->pdev, index); in _base_assign_reply_queues()
3222 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { in _base_assign_reply_queues()
3225 if (reply_q->msix_index < ioc->high_iops_queues || in _base_assign_reply_queues()
3226 reply_q->msix_index >= ioc->iopoll_q_start_index) in _base_assign_reply_queues()
3229 mask = pci_irq_get_affinity(ioc->pdev, in _base_assign_reply_queues()
3230 reply_q->msix_index); in _base_assign_reply_queues()
3233 reply_q->msix_index); in _base_assign_reply_queues()
3238 if (cpu >= ioc->cpu_msix_table_sz) in _base_assign_reply_queues()
3240 ioc->cpu_msix_table[cpu] = reply_q->msix_index; in _base_assign_reply_queues()
3248 nr_msix -= (ioc->high_iops_queues - iopoll_q_count); in _base_assign_reply_queues()
3251 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { in _base_assign_reply_queues()
3254 if (reply_q->msix_index < ioc->high_iops_queues || in _base_assign_reply_queues()
3255 reply_q->msix_index >= ioc->iopoll_q_start_index) in _base_assign_reply_queues()
3265 ioc->cpu_msix_table[cpu] = reply_q->msix_index; in _base_assign_reply_queues()
3273 * _base_check_and_enable_high_iops_queues - enable high iops mode
3278 * - HBA is a SEA/AERO controller and
3279 * - MSI-Xs vector supported by the HBA is 128 and
3280 * - total CPU count in the system >=16 and
3281 * - loaded driver with default max_msix_vectors module parameter and
3282 * - system booted in non kdump mode
3297 ioc->io_uring_poll_queues) { in _base_check_and_enable_high_iops_queues()
3298 ioc->high_iops_queues = 0; in _base_check_and_enable_high_iops_queues()
3304 pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta); in _base_check_and_enable_high_iops_queues()
3308 ioc->high_iops_queues = 0; in _base_check_and_enable_high_iops_queues()
3313 if (!reset_devices && ioc->is_aero_ioc && in _base_check_and_enable_high_iops_queues()
3316 max_msix_vectors == -1) in _base_check_and_enable_high_iops_queues()
3317 ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES; in _base_check_and_enable_high_iops_queues()
3319 ioc->high_iops_queues = 0; in _base_check_and_enable_high_iops_queues()
3323 * mpt3sas_base_disable_msix - disables msix
3330 if (!ioc->msix_enable) in mpt3sas_base_disable_msix()
3332 pci_free_irq_vectors(ioc->pdev); in mpt3sas_base_disable_msix()
3333 ioc->msix_enable = 0; in mpt3sas_base_disable_msix()
3334 kfree(ioc->io_uring_poll_queues); in mpt3sas_base_disable_msix()
3338 * _base_alloc_irq_vectors - allocate msix vectors
3346 struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues }; in _base_alloc_irq_vectors()
3352 int nr_msix_vectors = ioc->iopoll_q_start_index; in _base_alloc_irq_vectors()
3355 if (ioc->smp_affinity_enable) in _base_alloc_irq_vectors()
3360 ioc_info(ioc, " %d %d %d\n", ioc->high_iops_queues, in _base_alloc_irq_vectors()
3361 ioc->reply_queue_count, nr_msix_vectors); in _base_alloc_irq_vectors()
3363 i = pci_alloc_irq_vectors_affinity(ioc->pdev, in _base_alloc_irq_vectors()
3364 ioc->high_iops_queues, in _base_alloc_irq_vectors()
3371 * _base_enable_msix - enables msix, failback to io_apic
3383 ioc->msix_load_balance = false; in _base_enable_msix()
3385 if (msix_disable == -1 || msix_disable == 0) in _base_enable_msix()
3394 ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count); in _base_enable_msix()
3396 ioc->cpu_count, max_msix_vectors); in _base_enable_msix()
3398 ioc->reply_queue_count = in _base_enable_msix()
3399 min_t(int, ioc->cpu_count, ioc->msix_vector_count); in _base_enable_msix()
3401 if (!ioc->rdpq_array_enable && max_msix_vectors == -1) in _base_enable_msix()
3413 if (!ioc->combined_reply_queue && in _base_enable_msix()
3414 ioc->hba_mpi_version_belonged != MPI2_VERSION) { in _base_enable_msix()
3417 ioc->msix_load_balance = true; in _base_enable_msix()
3424 if (ioc->msix_load_balance) in _base_enable_msix()
3425 ioc->smp_affinity_enable = 0; in _base_enable_msix()
3427 if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1) in _base_enable_msix()
3428 ioc->shost->host_tagset = 0; in _base_enable_msix()
3433 if (ioc->shost->host_tagset) in _base_enable_msix()
3437 ioc->io_uring_poll_queues = kcalloc(iopoll_q_count, in _base_enable_msix()
3439 if (!ioc->io_uring_poll_queues) in _base_enable_msix()
3443 if (ioc->is_aero_ioc) in _base_enable_msix()
3445 ioc->msix_vector_count); in _base_enable_msix()
3451 ioc->reply_queue_count = min_t(int, in _base_enable_msix()
3452 ioc->reply_queue_count + ioc->high_iops_queues, in _base_enable_msix()
3453 ioc->msix_vector_count); in _base_enable_msix()
3460 ioc->reply_queue_count = min_t(int, local_max_msix_vectors, in _base_enable_msix()
3461 ioc->reply_queue_count); in _base_enable_msix()
3467 if (ioc->reply_queue_count < (iopoll_q_count + MPT3_MIN_IRQS)) in _base_enable_msix()
3469 ioc->reply_queue_count = min_t(int, in _base_enable_msix()
3470 ioc->reply_queue_count + iopoll_q_count, in _base_enable_msix()
3471 ioc->msix_vector_count); in _base_enable_msix()
3477 ioc->iopoll_q_start_index = in _base_enable_msix()
3478 ioc->reply_queue_count - iopoll_q_count; in _base_enable_msix()
3491 if (r < ioc->iopoll_q_start_index) { in _base_enable_msix()
3492 ioc->reply_queue_count = r + iopoll_q_count; in _base_enable_msix()
3493 ioc->iopoll_q_start_index = in _base_enable_msix()
3494 ioc->reply_queue_count - iopoll_q_count; in _base_enable_msix()
3497 ioc->msix_enable = 1; in _base_enable_msix()
3498 for (i = 0; i < ioc->reply_queue_count; i++) { in _base_enable_msix()
3508 ioc->high_iops_queues ? "enabled" : "disabled"); in _base_enable_msix()
3514 ioc->high_iops_queues = 0; in _base_enable_msix()
3516 ioc->reply_queue_count = 1; in _base_enable_msix()
3517 ioc->iopoll_q_start_index = ioc->reply_queue_count - 0; in _base_enable_msix()
3518 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY); in _base_enable_msix()
3530 * mpt3sas_base_unmap_resources - free controller resources
3536 struct pci_dev *pdev = ioc->pdev; in mpt3sas_base_unmap_resources()
3543 kfree(ioc->replyPostRegisterIndex); in mpt3sas_base_unmap_resources()
3544 ioc->replyPostRegisterIndex = NULL; in mpt3sas_base_unmap_resources()
3547 if (ioc->chip_phys) { in mpt3sas_base_unmap_resources()
3548 iounmap(ioc->chip); in mpt3sas_base_unmap_resources()
3549 ioc->chip_phys = 0; in mpt3sas_base_unmap_resources()
3553 pci_release_selected_regions(ioc->pdev, ioc->bars); in mpt3sas_base_unmap_resources()
3562 * mpt3sas_base_check_for_fault_and_issue_reset - check if IOC is in fault state
3566 * Return: 0 for success, non-zero for failure.
3572 int rc = -EFAULT; in mpt3sas_base_check_for_fault_and_issue_reset()
3575 if (ioc->pci_error_recovery) in mpt3sas_base_check_for_fault_and_issue_reset()
3598 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
3601 * Return: 0 for success, non-zero for failure.
3606 struct pci_dev *pdev = ioc->pdev; in mpt3sas_base_map_resources()
3617 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); in mpt3sas_base_map_resources()
3620 ioc->bars = 0; in mpt3sas_base_map_resources()
3621 return -ENODEV; in mpt3sas_base_map_resources()
3625 if (pci_request_selected_regions(pdev, ioc->bars, in mpt3sas_base_map_resources()
3626 ioc->driver_name)) { in mpt3sas_base_map_resources()
3628 ioc->bars = 0; in mpt3sas_base_map_resources()
3629 r = -ENODEV; in mpt3sas_base_map_resources()
3638 r = -ENODEV; in mpt3sas_base_map_resources()
3652 ioc->chip_phys = pci_resource_start(pdev, i); in mpt3sas_base_map_resources()
3653 chip_phys = ioc->chip_phys; in mpt3sas_base_map_resources()
3655 ioc->chip = ioremap(ioc->chip_phys, memap_sz); in mpt3sas_base_map_resources()
3659 if (ioc->chip == NULL) { in mpt3sas_base_map_resources()
3662 r = -EINVAL; in mpt3sas_base_map_resources()
3675 if (!ioc->rdpq_array_enable_assigned) { in mpt3sas_base_map_resources()
3676 ioc->rdpq_array_enable = ioc->rdpq_array_capable; in mpt3sas_base_map_resources()
3677 ioc->rdpq_array_enable_assigned = 1; in mpt3sas_base_map_resources()
3684 iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; in mpt3sas_base_map_resources()
3686 atomic_set(&ioc->io_uring_poll_queues[i].busy, 0); in mpt3sas_base_map_resources()
3687 atomic_set(&ioc->io_uring_poll_queues[i].pause, 0); in mpt3sas_base_map_resources()
3690 if (!ioc->is_driver_loading) in mpt3sas_base_map_resources()
3695 if (ioc->combined_reply_queue) { in mpt3sas_base_map_resources()
3702 ioc->replyPostRegisterIndex = kcalloc( in mpt3sas_base_map_resources()
3703 ioc->combined_reply_index_count, in mpt3sas_base_map_resources()
3705 if (!ioc->replyPostRegisterIndex) { in mpt3sas_base_map_resources()
3708 r = -ENOMEM; in mpt3sas_base_map_resources()
3712 for (i = 0; i < ioc->combined_reply_index_count; i++) { in mpt3sas_base_map_resources()
3713 ioc->replyPostRegisterIndex[i] = in mpt3sas_base_map_resources()
3715 ((u8 __force *)&ioc->chip->Doorbell + in mpt3sas_base_map_resources()
3721 if (ioc->is_warpdrive) { in mpt3sas_base_map_resources()
3722 ioc->reply_post_host_index[0] = (resource_size_t __iomem *) in mpt3sas_base_map_resources()
3723 &ioc->chip->ReplyPostHostIndex; in mpt3sas_base_map_resources()
3725 for (i = 1; i < ioc->cpu_msix_table_sz; i++) in mpt3sas_base_map_resources()
3726 ioc->reply_post_host_index[i] = in mpt3sas_base_map_resources()
3728 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) in mpt3sas_base_map_resources()
3732 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { in mpt3sas_base_map_resources()
3733 if (reply_q->msix_index >= ioc->iopoll_q_start_index) { in mpt3sas_base_map_resources()
3735 reply_q->name, reply_q->msix_index); in mpt3sas_base_map_resources()
3740 reply_q->name, in mpt3sas_base_map_resources()
3741 ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC", in mpt3sas_base_map_resources()
3742 pci_irq_vector(ioc->pdev, reply_q->msix_index)); in mpt3sas_base_map_resources()
3746 &chip_phys, ioc->chip, memap_sz); in mpt3sas_base_map_resources()
3760 * mpt3sas_base_get_msg_frame - obtain request mf pointer
3769 return (void *)(ioc->request + (smid * ioc->request_sz)); in mpt3sas_base_get_msg_frame()
3773 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
3782 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); in mpt3sas_base_get_sense_buffer()
3786 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
3795 return cpu_to_le32(ioc->sense_dma + ((smid - 1) * in mpt3sas_base_get_sense_buffer_dma()
3800 * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
3809 return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl); in mpt3sas_base_get_pcie_sgl()
3813 * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
3822 return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma; in mpt3sas_base_get_pcie_sgl_dma()
3826 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
3837 return ioc->reply + (phys_addr - (u32)ioc->reply_dma); in mpt3sas_base_get_reply_virt_addr()
3841 * _base_get_msix_index - get the msix index
3854 if (ioc->msix_load_balance) in _base_get_msix_index()
3855 return ioc->reply_queue_count ? in _base_get_msix_index()
3857 &ioc->total_io_cnt), ioc->reply_queue_count) : 0; in _base_get_msix_index()
3859 if (scmd && ioc->shost->nr_hw_queues > 1) { in _base_get_msix_index()
3863 ioc->high_iops_queues; in _base_get_msix_index()
3866 return ioc->cpu_msix_table[raw_smp_processor_id()]; in _base_get_msix_index()
3870 * _base_get_high_iops_msix_index - get the msix index of
3889 if (scsi_device_busy(scmd->device) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH) in _base_get_high_iops_msix_index()
3891 atomic64_add_return(1, &ioc->high_iops_outstanding) / in _base_get_high_iops_msix_index()
3899 * mpt3sas_base_get_smid - obtain a free smid from internal queue
3912 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); in mpt3sas_base_get_smid()
3913 if (list_empty(&ioc->internal_free_list)) { in mpt3sas_base_get_smid()
3914 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); in mpt3sas_base_get_smid()
3919 request = list_entry(ioc->internal_free_list.next, in mpt3sas_base_get_smid()
3921 request->cb_idx = cb_idx; in mpt3sas_base_get_smid()
3922 smid = request->smid; in mpt3sas_base_get_smid()
3923 list_del(&request->tracker_list); in mpt3sas_base_get_smid()
3924 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); in mpt3sas_base_get_smid()
3929 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
3954 * tag = smid - 1; in mpt3sas_base_get_smid_scsiio()
3955 * unique_tag = ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag; in mpt3sas_base_get_smid_scsiio()
3957 ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag); in mpt3sas_base_get_smid_scsiio()
3960 request->cb_idx = cb_idx; in mpt3sas_base_get_smid_scsiio()
3961 request->smid = smid; in mpt3sas_base_get_smid_scsiio()
3962 request->scmd = scmd; in mpt3sas_base_get_smid_scsiio()
3963 INIT_LIST_HEAD(&request->chain_list); in mpt3sas_base_get_smid_scsiio()
3968 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
3981 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); in mpt3sas_base_get_smid_hpr()
3982 if (list_empty(&ioc->hpr_free_list)) { in mpt3sas_base_get_smid_hpr()
3983 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); in mpt3sas_base_get_smid_hpr()
3987 request = list_entry(ioc->hpr_free_list.next, in mpt3sas_base_get_smid_hpr()
3989 request->cb_idx = cb_idx; in mpt3sas_base_get_smid_hpr()
3990 smid = request->smid; in mpt3sas_base_get_smid_hpr()
3991 list_del(&request->tracker_list); in mpt3sas_base_get_smid_hpr()
3992 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); in mpt3sas_base_get_smid_hpr()
4002 if (ioc->shost_recovery && ioc->pending_io_count) { in _base_recovery_check()
4003 ioc->pending_io_count = scsi_host_busy(ioc->shost); in _base_recovery_check()
4004 if (ioc->pending_io_count == 0) in _base_recovery_check()
4005 wake_up(&ioc->reset_wq); in _base_recovery_check()
4012 if (WARN_ON(st->smid == 0)) in mpt3sas_base_clear_st()
4014 st->cb_idx = 0xFF; in mpt3sas_base_clear_st()
4015 st->direct_io = 0; in mpt3sas_base_clear_st()
4016 st->scmd = NULL; in mpt3sas_base_clear_st()
4017 atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0); in mpt3sas_base_clear_st()
4018 st->smid = 0; in mpt3sas_base_clear_st()
4022 * mpt3sas_base_free_smid - put smid back on free_list
4032 if (smid < ioc->hi_priority_smid) { in mpt3sas_base_free_smid()
4044 memset(request, 0, ioc->request_sz); in mpt3sas_base_free_smid()
4048 ioc->io_queue_num[smid - 1] = 0; in mpt3sas_base_free_smid()
4052 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); in mpt3sas_base_free_smid()
4053 if (smid < ioc->internal_smid) { in mpt3sas_base_free_smid()
4054 /* hi-priority */ in mpt3sas_base_free_smid()
4055 i = smid - ioc->hi_priority_smid; in mpt3sas_base_free_smid()
4056 ioc->hpr_lookup[i].cb_idx = 0xFF; in mpt3sas_base_free_smid()
4057 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list); in mpt3sas_base_free_smid()
4058 } else if (smid <= ioc->hba_queue_depth) { in mpt3sas_base_free_smid()
4060 i = smid - ioc->internal_smid; in mpt3sas_base_free_smid()
4061 ioc->internal_lookup[i].cb_idx = 0xFF; in mpt3sas_base_free_smid()
4062 list_add(&ioc->internal_lookup[i].tracker_list, in mpt3sas_base_free_smid()
4063 &ioc->internal_free_list); in mpt3sas_base_free_smid()
4065 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); in mpt3sas_base_free_smid()
4069 * _base_mpi_ep_writeq - 32 bit write to MMIO
4091 * _base_writeq - 64 bit write to MMIO
4117 * _base_set_and_get_msix_index - get the msix index and assign to msix_io
4129 if (smid < ioc->hi_priority_smid) in _base_set_and_get_msix_index()
4135 st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd); in _base_set_and_get_msix_index()
4136 return st->msix_io; in _base_set_and_get_msix_index()
4140 * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
4155 mpi_req_iomem = (void __force *)ioc->chip + in _base_put_smid_mpi_ep_scsi_io()
4156 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); in _base_put_smid_mpi_ep_scsi_io()
4158 ioc->request_sz); in _base_put_smid_mpi_ep_scsi_io()
4164 _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow, in _base_put_smid_mpi_ep_scsi_io()
4165 &ioc->scsi_lookup_lock); in _base_put_smid_mpi_ep_scsi_io()
4169 * _base_put_smid_scsi_io - send SCSI_IO request to firmware
4186 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, in _base_put_smid_scsi_io()
4187 &ioc->scsi_lookup_lock); in _base_put_smid_scsi_io()
4191 * _base_put_smid_fast_path - send fast path request to firmware
4209 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, in _base_put_smid_fast_path()
4210 &ioc->scsi_lookup_lock); in _base_put_smid_fast_path()
4214 * _base_put_smid_hi_priority - send Task Management request to firmware
4227 if (ioc->is_mcpu_endpoint) { in _base_put_smid_hi_priority()
4231 mpi_req_iomem = (void __force *)ioc->chip in _base_put_smid_hi_priority()
4233 + (smid * ioc->request_sz); in _base_put_smid_hi_priority()
4235 ioc->request_sz); in _base_put_smid_hi_priority()
4246 if (ioc->is_mcpu_endpoint) in _base_put_smid_hi_priority()
4248 &ioc->chip->RequestDescriptorPostLow, in _base_put_smid_hi_priority()
4249 &ioc->scsi_lookup_lock); in _base_put_smid_hi_priority()
4251 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, in _base_put_smid_hi_priority()
4252 &ioc->scsi_lookup_lock); in _base_put_smid_hi_priority()
4256 * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
4273 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, in mpt3sas_base_put_smid_nvme_encap()
4274 &ioc->scsi_lookup_lock); in mpt3sas_base_put_smid_nvme_encap()
4278 * _base_put_smid_default - Default, primarily used for config pages
4289 if (ioc->is_mcpu_endpoint) { in _base_put_smid_default()
4294 mpi_req_iomem = (void __force *)ioc->chip + in _base_put_smid_default()
4295 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz); in _base_put_smid_default()
4297 ioc->request_sz); in _base_put_smid_default()
4305 if (ioc->is_mcpu_endpoint) in _base_put_smid_default()
4307 &ioc->chip->RequestDescriptorPostLow, in _base_put_smid_default()
4308 &ioc->scsi_lookup_lock); in _base_put_smid_default()
4310 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, in _base_put_smid_default()
4311 &ioc->scsi_lookup_lock); in _base_put_smid_default()
4315 * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
4334 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); in _base_put_smid_scsi_io_atomic()
4338 * _base_put_smid_fast_path_atomic - send fast path request to firmware
4356 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); in _base_put_smid_fast_path_atomic()
4360 * _base_put_smid_hi_priority_atomic - send Task Management request to
4379 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); in _base_put_smid_hi_priority_atomic()
4383 * _base_put_smid_default_atomic - Default, primarily used for config pages
4400 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); in _base_put_smid_default_atomic()
4404 * _base_display_OEMs_branding - Display branding string
4410 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) in _base_display_OEMs_branding()
4413 switch (ioc->pdev->subsystem_vendor) { in _base_display_OEMs_branding()
4415 switch (ioc->pdev->device) { in _base_display_OEMs_branding()
4417 switch (ioc->pdev->subsystem_device) { in _base_display_OEMs_branding()
4432 ioc->pdev->subsystem_device); in _base_display_OEMs_branding()
4437 switch (ioc->pdev->subsystem_device) { in _base_display_OEMs_branding()
4468 ioc->pdev->subsystem_device); in _base_display_OEMs_branding()
4473 switch (ioc->pdev->subsystem_device) { in _base_display_OEMs_branding()
4493 ioc->pdev->subsystem_device); in _base_display_OEMs_branding()
4499 ioc->pdev->subsystem_device); in _base_display_OEMs_branding()
4504 switch (ioc->pdev->device) { in _base_display_OEMs_branding()
4506 switch (ioc->pdev->subsystem_device) { in _base_display_OEMs_branding()
4537 ioc->pdev->subsystem_device); in _base_display_OEMs_branding()
4542 switch (ioc->pdev->subsystem_device) { in _base_display_OEMs_branding()
4549 ioc->pdev->subsystem_device); in _base_display_OEMs_branding()
4555 ioc->pdev->subsystem_device); in _base_display_OEMs_branding()
4560 switch (ioc->pdev->device) { in _base_display_OEMs_branding()
4562 switch (ioc->pdev->subsystem_device) { in _base_display_OEMs_branding()
4577 ioc->pdev->subsystem_device); in _base_display_OEMs_branding()
4582 switch (ioc->pdev->subsystem_device) { in _base_display_OEMs_branding()
4593 ioc->pdev->subsystem_device); in _base_display_OEMs_branding()
4599 ioc->pdev->subsystem_device); in _base_display_OEMs_branding()
4604 switch (ioc->pdev->device) { in _base_display_OEMs_branding()
4606 switch (ioc->pdev->subsystem_device) { in _base_display_OEMs_branding()
4613 ioc->pdev->subsystem_device); in _base_display_OEMs_branding()
4618 switch (ioc->pdev->subsystem_device) { in _base_display_OEMs_branding()
4637 ioc->pdev->subsystem_device); in _base_display_OEMs_branding()
4643 ioc->pdev->subsystem_device); in _base_display_OEMs_branding()
4653 * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
4657 * Return: 0 for success, non-zero for failure.
4675 if (ioc->base_cmds.status & MPT3_CMD_PENDING) { in _base_display_fwpkg_version()
4677 return -EAGAIN; in _base_display_fwpkg_version()
4681 fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, in _base_display_fwpkg_version()
4687 return -ENOMEM; in _base_display_fwpkg_version()
4690 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); in _base_display_fwpkg_version()
4693 r = -EAGAIN; in _base_display_fwpkg_version()
4697 ioc->base_cmds.status = MPT3_CMD_PENDING; in _base_display_fwpkg_version()
4699 ioc->base_cmds.smid = smid; in _base_display_fwpkg_version()
4701 mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD; in _base_display_fwpkg_version()
4702 mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH; in _base_display_fwpkg_version()
4703 mpi_request->ImageSize = cpu_to_le32(data_length); in _base_display_fwpkg_version()
4704 ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma, in _base_display_fwpkg_version()
4706 init_completion(&ioc->base_cmds.done); in _base_display_fwpkg_version()
4707 ioc->put_smid_default(ioc, smid); in _base_display_fwpkg_version()
4709 wait_for_completion_timeout(&ioc->base_cmds.done, in _base_display_fwpkg_version()
4712 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { in _base_display_fwpkg_version()
4719 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) { in _base_display_fwpkg_version()
4720 memcpy(&mpi_reply, ioc->base_cmds.reply, in _base_display_fwpkg_version()
4726 if (le32_to_cpu(fw_img_hdr->Signature) == in _base_display_fwpkg_version()
4733 cmp_img_hdr->ApplicationSpecific); in _base_display_fwpkg_version()
4737 fw_img_hdr->PackageVersion.Word); in _base_display_fwpkg_version()
4751 ioc->base_cmds.status = MPT3_CMD_NOT_USED; in _base_display_fwpkg_version()
4754 dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data, in _base_display_fwpkg_version()
4757 if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED) in _base_display_fwpkg_version()
4758 return -EFAULT; in _base_display_fwpkg_version()
4760 return -EFAULT; in _base_display_fwpkg_version()
4761 r = -EAGAIN; in _base_display_fwpkg_version()
4767 * _base_display_ioc_capabilities - Display IOC's capabilities.
4777 strncpy(desc, ioc->manu_pg0.ChipName, 16); in _base_display_ioc_capabilities()
4780 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, in _base_display_ioc_capabilities()
4781 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, in _base_display_ioc_capabilities()
4782 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, in _base_display_ioc_capabilities()
4783 ioc->facts.FWVersion.Word & 0x000000FF, in _base_display_ioc_capabilities()
4784 ioc->pdev->revision); in _base_display_ioc_capabilities()
4788 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { in _base_display_ioc_capabilities()
4795 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { in _base_display_ioc_capabilities()
4800 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) { in _base_display_ioc_capabilities()
4808 if (!ioc->hide_ir_msg) { in _base_display_ioc_capabilities()
4809 if (ioc->facts.IOCCapabilities & in _base_display_ioc_capabilities()
4816 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { in _base_display_ioc_capabilities()
4821 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) { in _base_display_ioc_capabilities()
4826 if (ioc->facts.IOCCapabilities & in _base_display_ioc_capabilities()
4832 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) { in _base_display_ioc_capabilities()
4837 if (ioc->facts.IOCCapabilities & in _base_display_ioc_capabilities()
4843 if (ioc->facts.IOCCapabilities & in _base_display_ioc_capabilities()
4849 if (ioc->facts.IOCCapabilities & in _base_display_ioc_capabilities()
4855 if (ioc->facts.IOCCapabilities & in _base_display_ioc_capabilities()
4861 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); in _base_display_ioc_capabilities()
4871 * mpt3sas_base_update_missing_delay - change the missing delay timers
4919 dmd = sas_iounit_pg1->ReportDeviceMissingDelay; in mpt3sas_base_update_missing_delay()
4932 sas_iounit_pg1->ReportDeviceMissingDelay = dmd; in mpt3sas_base_update_missing_delay()
4935 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay; in mpt3sas_base_update_missing_delay()
4936 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay; in mpt3sas_base_update_missing_delay()
4951 ioc->device_missing_delay = dmd_new; in mpt3sas_base_update_missing_delay()
4952 ioc->io_missing_delay = io_missing_delay; in mpt3sas_base_update_missing_delay()
4960 * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields
4974 rc = mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy); in _base_update_ioc_page1_inlinewith_perf_mode()
4977 memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t)); in _base_update_ioc_page1_inlinewith_perf_mode()
4982 if (ioc->high_iops_queues) { in _base_update_ioc_page1_inlinewith_perf_mode()
4997 ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1)); in _base_update_ioc_page1_inlinewith_perf_mode()
5036 * _base_get_event_diag_triggers - get event diag trigger values from
5071 ioc->diag_trigger_event.ValidEntries = count; in _base_get_event_diag_triggers()
5073 event_tg = &ioc->diag_trigger_event.EventTriggerEntry[0]; in _base_get_event_diag_triggers()
5076 event_tg->EventValue = le16_to_cpu( in _base_get_event_diag_triggers()
5077 mpi_event_tg->MPIEventCode); in _base_get_event_diag_triggers()
5078 event_tg->LogEntryQualifier = le16_to_cpu( in _base_get_event_diag_triggers()
5079 mpi_event_tg->MPIEventCodeSpecific); in _base_get_event_diag_triggers()
5088 * _base_get_scsi_diag_triggers - get scsi diag trigger values from
5123 ioc->diag_trigger_scsi.ValidEntries = count; in _base_get_scsi_diag_triggers()
5125 scsi_tg = &ioc->diag_trigger_scsi.SCSITriggerEntry[0]; in _base_get_scsi_diag_triggers()
5128 scsi_tg->ASCQ = mpi_scsi_tg->ASCQ; in _base_get_scsi_diag_triggers()
5129 scsi_tg->ASC = mpi_scsi_tg->ASC; in _base_get_scsi_diag_triggers()
5130 scsi_tg->SenseKey = mpi_scsi_tg->SenseKey; in _base_get_scsi_diag_triggers()
5140 * _base_get_mpi_diag_triggers - get mpi diag trigger values from
5175 ioc->diag_trigger_mpi.ValidEntries = count; in _base_get_mpi_diag_triggers()
5177 status_tg = &ioc->diag_trigger_mpi.MPITriggerEntry[0]; in _base_get_mpi_diag_triggers()
5181 status_tg->IOCStatus = le16_to_cpu( in _base_get_mpi_diag_triggers()
5182 mpi_status_tg->IOCStatus); in _base_get_mpi_diag_triggers()
5183 status_tg->IocLogInfo = le32_to_cpu( in _base_get_mpi_diag_triggers()
5184 mpi_status_tg->LogInfo); in _base_get_mpi_diag_triggers()
5194 * _base_get_master_diag_triggers - get master diag trigger values from
5224 ioc->diag_trigger_master.MasterData |= in _base_get_master_diag_triggers()
5231 * _base_check_for_trigger_pages_support - checks whether HBA FW supports
5237 * otherwise returns %-EFAULT if driver trigger pages are not supported by FW or
5258 return -EFAULT; in _base_check_for_trigger_pages_support()
5265 * _base_get_diag_triggers - Retrieve diag trigger values from
5281 ioc->diag_trigger_master.MasterData = in _base_get_diag_triggers()
5286 if (r == -EAGAIN) in _base_get_diag_triggers()
5295 ioc->supports_trigger_pages = 1; in _base_get_diag_triggers()
5343 * _base_update_diag_trigger_pages - Update the driver trigger pages after
5354 if (ioc->diag_trigger_master.MasterData) in _base_update_diag_trigger_pages()
5356 &ioc->diag_trigger_master, 1); in _base_update_diag_trigger_pages()
5358 if (ioc->diag_trigger_event.ValidEntries) in _base_update_diag_trigger_pages()
5360 &ioc->diag_trigger_event, 1); in _base_update_diag_trigger_pages()
5362 if (ioc->diag_trigger_scsi.ValidEntries) in _base_update_diag_trigger_pages()
5364 &ioc->diag_trigger_scsi, 1); in _base_update_diag_trigger_pages()
5366 if (ioc->diag_trigger_mpi.ValidEntries) in _base_update_diag_trigger_pages()
5368 &ioc->diag_trigger_mpi, 1); in _base_update_diag_trigger_pages()
5372 * _base_assign_fw_reported_qd - Get FW reported QD for SAS/SATA devices.
5373 * - On failure set default QD values.
5376 * Returns 0 for success, non-zero for failure.
5388 ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH; in _base_assign_fw_reported_qd()
5389 ioc->max_narrowport_qd = MPT3SAS_SAS_QUEUE_DEPTH; in _base_assign_fw_reported_qd()
5390 ioc->max_sata_qd = MPT3SAS_SATA_QUEUE_DEPTH; in _base_assign_fw_reported_qd()
5391 ioc->max_nvme_qd = MPT3SAS_NVME_QUEUE_DEPTH; in _base_assign_fw_reported_qd()
5392 if (!ioc->is_gen35_ioc) in _base_assign_fw_reported_qd()
5399 ioc->name, __FILE__, __LINE__, __func__); in _base_assign_fw_reported_qd()
5406 ioc->name, __FILE__, __LINE__, __func__); in _base_assign_fw_reported_qd()
5410 depth = le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth); in _base_assign_fw_reported_qd()
5411 ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH); in _base_assign_fw_reported_qd()
5413 depth = le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth); in _base_assign_fw_reported_qd()
5414 ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH); in _base_assign_fw_reported_qd()
5416 depth = sas_iounit_pg1->SATAMaxQDepth; in _base_assign_fw_reported_qd()
5417 ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH); in _base_assign_fw_reported_qd()
5424 ioc->name, __FILE__, __LINE__, __func__); in _base_assign_fw_reported_qd()
5427 ioc->max_nvme_qd = (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) ? in _base_assign_fw_reported_qd()
5433 ioc->max_wideport_qd, ioc->max_narrowport_qd, in _base_assign_fw_reported_qd()
5434 ioc->max_sata_qd, ioc->max_nvme_qd)); in _base_assign_fw_reported_qd()
5440 * mpt3sas_atto_validate_nvram - validate the ATTO nvram read from mfg pg1
5444 * Return: 0 for success, non-zero for failure.
5450 int r = -EINVAL; in mpt3sas_atto_validate_nvram()
5461 while (len--) in mpt3sas_atto_validate_nvram()
5469 s1 = (union ATTO_SAS_ADDRESS *) n->SasAddr; in mpt3sas_atto_validate_nvram()
5471 if (n->Signature[0] != 'E' in mpt3sas_atto_validate_nvram()
5472 || n->Signature[1] != 'S' in mpt3sas_atto_validate_nvram()
5473 || n->Signature[2] != 'A' in mpt3sas_atto_validate_nvram()
5474 || n->Signature[3] != 'S') in mpt3sas_atto_validate_nvram()
5476 else if (n->Version > ATTO_SASNVR_VERSION) in mpt3sas_atto_validate_nvram()
5478 else if ((n->SasAddr[7] & (ATTO_SAS_ADDR_ALIGN - 1)) in mpt3sas_atto_validate_nvram()
5479 || s1->b[0] != 0x50 in mpt3sas_atto_validate_nvram()
5480 || s1->b[1] != 0x01 in mpt3sas_atto_validate_nvram()
5481 || s1->b[2] != 0x08 in mpt3sas_atto_validate_nvram()
5482 || (s1->b[3] & 0xF0) != 0x60 in mpt3sas_atto_validate_nvram()
5483 || ((s1->b[3] & 0x0F) | le32_to_cpu(s1->d[1])) == 0) { in mpt3sas_atto_validate_nvram()
5491 * mpt3sas_atto_get_sas_addr - get the ATTO SAS address from mfg page 1
5495 * Return: 0 for success, non-zero for failure.
5518 addr = *((__be64 *) nvram->SasAddr); in mpt3sas_atto_get_sas_addr()
5519 sas_addr->q = cpu_to_le64(be64_to_cpu(addr)); in mpt3sas_atto_get_sas_addr()
5524 * mpt3sas_atto_init - perform initializaion for ATTO branded
5528 * Return: 0 for success, non-zero for failure.
5557 return -ENOMEM; in mpt3sas_atto_init()
5571 for (ix = 0; ix < bios_pg4->NumPhys; ix++) { in mpt3sas_atto_init()
5574 bios_pg4->Phy[ix].ReassignmentWWID = temp.q; in mpt3sas_atto_init()
5575 bios_pg4->Phy[ix].ReassignmentDeviceName = bias.q; in mpt3sas_atto_init()
5585 * _base_static_config_pages - static start of day config pages
5595 ioc->nvme_abort_timeout = 30; in _base_static_config_pages()
5598 &ioc->manu_pg0); in _base_static_config_pages()
5601 if (ioc->ir_firmware) { in _base_static_config_pages()
5603 &ioc->manu_pg10); in _base_static_config_pages()
5608 if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) { in _base_static_config_pages()
5619 &ioc->manu_pg11); in _base_static_config_pages()
5622 if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) { in _base_static_config_pages()
5624 ioc->name); in _base_static_config_pages()
5625 ioc->manu_pg11.EEDPTagMode &= ~0x3; in _base_static_config_pages()
5626 ioc->manu_pg11.EEDPTagMode |= 0x1; in _base_static_config_pages()
5628 &ioc->manu_pg11); in _base_static_config_pages()
5630 if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK) in _base_static_config_pages()
5631 ioc->tm_custom_handling = 1; in _base_static_config_pages()
5633 ioc->tm_custom_handling = 0; in _base_static_config_pages()
5634 if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT) in _base_static_config_pages()
5635 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT; in _base_static_config_pages()
5636 else if (ioc->manu_pg11.NVMeAbortTO > in _base_static_config_pages()
5638 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT; in _base_static_config_pages()
5640 ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO; in _base_static_config_pages()
5642 ioc->time_sync_interval = in _base_static_config_pages()
5643 ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_MASK; in _base_static_config_pages()
5644 if (ioc->time_sync_interval) { in _base_static_config_pages()
5645 if (ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_UNIT_MASK) in _base_static_config_pages()
5646 ioc->time_sync_interval = in _base_static_config_pages()
5647 ioc->time_sync_interval * SECONDS_PER_HOUR; in _base_static_config_pages()
5649 ioc->time_sync_interval = in _base_static_config_pages()
5650 ioc->time_sync_interval * SECONDS_PER_MIN; in _base_static_config_pages()
5652 "Driver-FW TimeSync interval is %d seconds. ManuPg11 TimeSync Unit is in %s\n", in _base_static_config_pages()
5653 ioc->time_sync_interval, (ioc->manu_pg11.TimeSyncInterval & in _base_static_config_pages()
5656 if (ioc->is_gen35_ioc) in _base_static_config_pages()
5658 "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n"); in _base_static_config_pages()
5667 if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) in _base_static_config_pages()
5668 ioc->bios_pg3.BiosVersion = 0; in _base_static_config_pages()
5670 rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); in _base_static_config_pages()
5673 rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); in _base_static_config_pages()
5678 rc = mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); in _base_static_config_pages()
5681 rc = mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); in _base_static_config_pages()
5684 rc = mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); in _base_static_config_pages()
5687 rc = mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8); in _base_static_config_pages()
5696 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); in _base_static_config_pages()
5697 if ((ioc->facts.IOCCapabilities & in _base_static_config_pages()
5704 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); in _base_static_config_pages()
5705 rc = mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); in _base_static_config_pages()
5709 if (ioc->iounit_pg8.NumSensors) in _base_static_config_pages()
5710 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors; in _base_static_config_pages()
5711 if (ioc->is_aero_ioc) { in _base_static_config_pages()
5716 if (ioc->is_gen35_ioc) { in _base_static_config_pages()
5717 if (ioc->is_driver_loading) { in _base_static_config_pages()
5726 * - If previous FW has not supported driver trigger in _base_static_config_pages()
5729 * - If previous FW has supported driver trigger pages in _base_static_config_pages()
5734 if (!ioc->supports_trigger_pages && tg_flags != -EFAULT) in _base_static_config_pages()
5736 else if (ioc->supports_trigger_pages && in _base_static_config_pages()
5737 tg_flags == -EFAULT) in _base_static_config_pages()
5738 ioc->supports_trigger_pages = 0; in _base_static_config_pages()
5745 * mpt3sas_free_enclosure_list - release memory
5757 enclosure_dev_next, &ioc->enclosure_list, list) { in mpt3sas_free_enclosure_list()
5758 list_del(&enclosure_dev->list); in mpt3sas_free_enclosure_list()
5764 * _base_release_memory_pools - release memory
5776 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; in _base_release_memory_pools()
5780 if (ioc->request) { in _base_release_memory_pools()
5781 dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz, in _base_release_memory_pools()
5782 ioc->request, ioc->request_dma); in _base_release_memory_pools()
5785 ioc->request)); in _base_release_memory_pools()
5786 ioc->request = NULL; in _base_release_memory_pools()
5789 if (ioc->sense) { in _base_release_memory_pools()
5790 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); in _base_release_memory_pools()
5791 dma_pool_destroy(ioc->sense_dma_pool); in _base_release_memory_pools()
5794 ioc->sense)); in _base_release_memory_pools()
5795 ioc->sense = NULL; in _base_release_memory_pools()
5798 if (ioc->reply) { in _base_release_memory_pools()
5799 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); in _base_release_memory_pools()
5800 dma_pool_destroy(ioc->reply_dma_pool); in _base_release_memory_pools()
5803 ioc->reply)); in _base_release_memory_pools()
5804 ioc->reply = NULL; in _base_release_memory_pools()
5807 if (ioc->reply_free) { in _base_release_memory_pools()
5808 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, in _base_release_memory_pools()
5809 ioc->reply_free_dma); in _base_release_memory_pools()
5810 dma_pool_destroy(ioc->reply_free_dma_pool); in _base_release_memory_pools()
5813 ioc->reply_free)); in _base_release_memory_pools()
5814 ioc->reply_free = NULL; in _base_release_memory_pools()
5817 if (ioc->reply_post) { in _base_release_memory_pools()
5823 if (ioc->reply_post[i].reply_post_free) { in _base_release_memory_pools()
5825 ioc->reply_post_free_dma_pool, in _base_release_memory_pools()
5826 ioc->reply_post[i].reply_post_free, in _base_release_memory_pools()
5827 ioc->reply_post[i].reply_post_free_dma); in _base_release_memory_pools()
5830 ioc->reply_post[i].reply_post_free)); in _base_release_memory_pools()
5831 ioc->reply_post[i].reply_post_free = in _base_release_memory_pools()
5834 --dma_alloc_count; in _base_release_memory_pools()
5837 dma_pool_destroy(ioc->reply_post_free_dma_pool); in _base_release_memory_pools()
5838 if (ioc->reply_post_free_array && in _base_release_memory_pools()
5839 ioc->rdpq_array_enable) { in _base_release_memory_pools()
5840 dma_pool_free(ioc->reply_post_free_array_dma_pool, in _base_release_memory_pools()
5841 ioc->reply_post_free_array, in _base_release_memory_pools()
5842 ioc->reply_post_free_array_dma); in _base_release_memory_pools()
5843 ioc->reply_post_free_array = NULL; in _base_release_memory_pools()
5845 dma_pool_destroy(ioc->reply_post_free_array_dma_pool); in _base_release_memory_pools()
5846 kfree(ioc->reply_post); in _base_release_memory_pools()
5849 if (ioc->pcie_sgl_dma_pool) { in _base_release_memory_pools()
5850 for (i = 0; i < ioc->scsiio_depth; i++) { in _base_release_memory_pools()
5851 dma_pool_free(ioc->pcie_sgl_dma_pool, in _base_release_memory_pools()
5852 ioc->pcie_sg_lookup[i].pcie_sgl, in _base_release_memory_pools()
5853 ioc->pcie_sg_lookup[i].pcie_sgl_dma); in _base_release_memory_pools()
5854 ioc->pcie_sg_lookup[i].pcie_sgl = NULL; in _base_release_memory_pools()
5856 dma_pool_destroy(ioc->pcie_sgl_dma_pool); in _base_release_memory_pools()
5858 kfree(ioc->pcie_sg_lookup); in _base_release_memory_pools()
5859 ioc->pcie_sg_lookup = NULL; in _base_release_memory_pools()
5861 if (ioc->config_page) { in _base_release_memory_pools()
5864 ioc->config_page)); in _base_release_memory_pools()
5865 dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz, in _base_release_memory_pools()
5866 ioc->config_page, ioc->config_page_dma); in _base_release_memory_pools()
5869 kfree(ioc->hpr_lookup); in _base_release_memory_pools()
5870 ioc->hpr_lookup = NULL; in _base_release_memory_pools()
5871 kfree(ioc->internal_lookup); in _base_release_memory_pools()
5872 ioc->internal_lookup = NULL; in _base_release_memory_pools()
5873 if (ioc->chain_lookup) { in _base_release_memory_pools()
5874 for (i = 0; i < ioc->scsiio_depth; i++) { in _base_release_memory_pools()
5875 for (j = ioc->chains_per_prp_buffer; in _base_release_memory_pools()
5876 j < ioc->chains_needed_per_io; j++) { in _base_release_memory_pools()
5877 ct = &ioc->chain_lookup[i].chains_per_smid[j]; in _base_release_memory_pools()
5878 if (ct && ct->chain_buffer) in _base_release_memory_pools()
5879 dma_pool_free(ioc->chain_dma_pool, in _base_release_memory_pools()
5880 ct->chain_buffer, in _base_release_memory_pools()
5881 ct->chain_buffer_dma); in _base_release_memory_pools()
5883 kfree(ioc->chain_lookup[i].chains_per_smid); in _base_release_memory_pools()
5885 dma_pool_destroy(ioc->chain_dma_pool); in _base_release_memory_pools()
5886 kfree(ioc->chain_lookup); in _base_release_memory_pools()
5887 ioc->chain_lookup = NULL; in _base_release_memory_pools()
5890 kfree(ioc->io_queue_num); in _base_release_memory_pools()
5891 ioc->io_queue_num = NULL; in _base_release_memory_pools()
5895 * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
5908 end_address = start_address + pool_sz - 1; in mpt3sas_check_same_4gb_region()
5917 * _base_reduce_hba_queue_depth- Retry with reduced queue depth
5920 * Return: 0 for success, non-zero for failure.
5927 if ((ioc->hba_queue_depth - reduce_sz) > in _base_reduce_hba_queue_depth()
5928 (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { in _base_reduce_hba_queue_depth()
5929 ioc->hba_queue_depth -= reduce_sz; in _base_reduce_hba_queue_depth()
5932 return -ENOMEM; in _base_reduce_hba_queue_depth()
5936 * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory
5941 * Return: 0 for success, non-zero for failure.
5950 ioc->pcie_sgl_dma_pool = in _base_allocate_pcie_sgl_pool()
5951 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, in _base_allocate_pcie_sgl_pool()
5952 ioc->page_size, 0); in _base_allocate_pcie_sgl_pool()
5953 if (!ioc->pcie_sgl_dma_pool) { in _base_allocate_pcie_sgl_pool()
5955 return -ENOMEM; in _base_allocate_pcie_sgl_pool()
5958 ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; in _base_allocate_pcie_sgl_pool()
5959 ioc->chains_per_prp_buffer = in _base_allocate_pcie_sgl_pool()
5960 min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io); in _base_allocate_pcie_sgl_pool()
5961 for (i = 0; i < ioc->scsiio_depth; i++) { in _base_allocate_pcie_sgl_pool()
5962 ioc->pcie_sg_lookup[i].pcie_sgl = in _base_allocate_pcie_sgl_pool()
5963 dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL, in _base_allocate_pcie_sgl_pool()
5964 &ioc->pcie_sg_lookup[i].pcie_sgl_dma); in _base_allocate_pcie_sgl_pool()
5965 if (!ioc->pcie_sg_lookup[i].pcie_sgl) { in _base_allocate_pcie_sgl_pool()
5967 return -EAGAIN; in _base_allocate_pcie_sgl_pool()
5971 ioc->pcie_sg_lookup[i].pcie_sgl_dma, sz)) { in _base_allocate_pcie_sgl_pool()
5973 ioc->pcie_sg_lookup[i].pcie_sgl, in _base_allocate_pcie_sgl_pool()
5975 ioc->pcie_sg_lookup[i].pcie_sgl_dma); in _base_allocate_pcie_sgl_pool()
5976 ioc->use_32bit_dma = true; in _base_allocate_pcie_sgl_pool()
5977 return -EAGAIN; in _base_allocate_pcie_sgl_pool()
5980 for (j = 0; j < ioc->chains_per_prp_buffer; j++) { in _base_allocate_pcie_sgl_pool()
5981 ct = &ioc->chain_lookup[i].chains_per_smid[j]; in _base_allocate_pcie_sgl_pool()
5982 ct->chain_buffer = in _base_allocate_pcie_sgl_pool()
5983 ioc->pcie_sg_lookup[i].pcie_sgl + in _base_allocate_pcie_sgl_pool()
5984 (j * ioc->chain_segment_sz); in _base_allocate_pcie_sgl_pool()
5985 ct->chain_buffer_dma = in _base_allocate_pcie_sgl_pool()
5986 ioc->pcie_sg_lookup[i].pcie_sgl_dma + in _base_allocate_pcie_sgl_pool()
5987 (j * ioc->chain_segment_sz); in _base_allocate_pcie_sgl_pool()
5992 ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); in _base_allocate_pcie_sgl_pool()
5995 ioc->chains_per_prp_buffer)); in _base_allocate_pcie_sgl_pool()
6000 * _base_allocate_chain_dma_pool - Allocating DMA'able memory
6005 * Return: 0 for success, non-zero for failure.
6013 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, in _base_allocate_chain_dma_pool()
6014 ioc->chain_segment_sz, 16, 0); in _base_allocate_chain_dma_pool()
6015 if (!ioc->chain_dma_pool) in _base_allocate_chain_dma_pool()
6016 return -ENOMEM; in _base_allocate_chain_dma_pool()
6018 for (i = 0; i < ioc->scsiio_depth; i++) { in _base_allocate_chain_dma_pool()
6019 for (j = ioc->chains_per_prp_buffer; in _base_allocate_chain_dma_pool()
6020 j < ioc->chains_needed_per_io; j++) { in _base_allocate_chain_dma_pool()
6021 ctr = &ioc->chain_lookup[i].chains_per_smid[j]; in _base_allocate_chain_dma_pool()
6022 ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool, in _base_allocate_chain_dma_pool()
6023 GFP_KERNEL, &ctr->chain_buffer_dma); in _base_allocate_chain_dma_pool()
6024 if (!ctr->chain_buffer) in _base_allocate_chain_dma_pool()
6025 return -EAGAIN; in _base_allocate_chain_dma_pool()
6027 ctr->chain_buffer_dma, ioc->chain_segment_sz)) { in _base_allocate_chain_dma_pool()
6030 ctr->chain_buffer, in _base_allocate_chain_dma_pool()
6031 (unsigned long long)ctr->chain_buffer_dma); in _base_allocate_chain_dma_pool()
6032 ioc->use_32bit_dma = true; in _base_allocate_chain_dma_pool()
6033 return -EAGAIN; in _base_allocate_chain_dma_pool()
6039 ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth * in _base_allocate_chain_dma_pool()
6040 (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) * in _base_allocate_chain_dma_pool()
6041 ioc->chain_segment_sz))/1024)); in _base_allocate_chain_dma_pool()
6046 * _base_allocate_sense_dma_pool - Allocating DMA'able memory
6050 * Return: 0 for success, non-zero for failure.
6055 ioc->sense_dma_pool = in _base_allocate_sense_dma_pool()
6056 dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0); in _base_allocate_sense_dma_pool()
6057 if (!ioc->sense_dma_pool) in _base_allocate_sense_dma_pool()
6058 return -ENOMEM; in _base_allocate_sense_dma_pool()
6059 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, in _base_allocate_sense_dma_pool()
6060 GFP_KERNEL, &ioc->sense_dma); in _base_allocate_sense_dma_pool()
6061 if (!ioc->sense) in _base_allocate_sense_dma_pool()
6062 return -EAGAIN; in _base_allocate_sense_dma_pool()
6063 if (!mpt3sas_check_same_4gb_region(ioc->sense_dma, sz)) { in _base_allocate_sense_dma_pool()
6066 ioc->sense, (unsigned long long) ioc->sense_dma)); in _base_allocate_sense_dma_pool()
6067 ioc->use_32bit_dma = true; in _base_allocate_sense_dma_pool()
6068 return -EAGAIN; in _base_allocate_sense_dma_pool()
6071 "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n", in _base_allocate_sense_dma_pool()
6072 ioc->sense, (unsigned long long)ioc->sense_dma, in _base_allocate_sense_dma_pool()
6073 ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024); in _base_allocate_sense_dma_pool()
6078 * _base_allocate_reply_pool - Allocating DMA'able memory
6082 * Return: 0 for success, non-zero for failure.
6088 ioc->reply_dma_pool = dma_pool_create("reply pool", in _base_allocate_reply_pool()
6089 &ioc->pdev->dev, sz, 4, 0); in _base_allocate_reply_pool()
6090 if (!ioc->reply_dma_pool) in _base_allocate_reply_pool()
6091 return -ENOMEM; in _base_allocate_reply_pool()
6092 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, in _base_allocate_reply_pool()
6093 &ioc->reply_dma); in _base_allocate_reply_pool()
6094 if (!ioc->reply) in _base_allocate_reply_pool()
6095 return -EAGAIN; in _base_allocate_reply_pool()
6096 if (!mpt3sas_check_same_4gb_region(ioc->reply_dma, sz)) { in _base_allocate_reply_pool()
6099 ioc->reply, (unsigned long long) ioc->reply_dma)); in _base_allocate_reply_pool()
6100 ioc->use_32bit_dma = true; in _base_allocate_reply_pool()
6101 return -EAGAIN; in _base_allocate_reply_pool()
6103 ioc->reply_dma_min_address = (u32)(ioc->reply_dma); in _base_allocate_reply_pool()
6104 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; in _base_allocate_reply_pool()
6106 "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n", in _base_allocate_reply_pool()
6107 ioc->reply, (unsigned long long)ioc->reply_dma, in _base_allocate_reply_pool()
6108 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024); in _base_allocate_reply_pool()
6113 * _base_allocate_reply_free_dma_pool - Allocating DMA'able memory
6117 * Return: 0 for success, non-zero for failure.
6123 ioc->reply_free_dma_pool = dma_pool_create( in _base_allocate_reply_free_dma_pool()
6124 "reply_free pool", &ioc->pdev->dev, sz, 16, 0); in _base_allocate_reply_free_dma_pool()
6125 if (!ioc->reply_free_dma_pool) in _base_allocate_reply_free_dma_pool()
6126 return -ENOMEM; in _base_allocate_reply_free_dma_pool()
6127 ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, in _base_allocate_reply_free_dma_pool()
6128 GFP_KERNEL, &ioc->reply_free_dma); in _base_allocate_reply_free_dma_pool()
6129 if (!ioc->reply_free) in _base_allocate_reply_free_dma_pool()
6130 return -EAGAIN; in _base_allocate_reply_free_dma_pool()
6131 if (!mpt3sas_check_same_4gb_region(ioc->reply_free_dma, sz)) { in _base_allocate_reply_free_dma_pool()
6134 ioc->reply_free, (unsigned long long) ioc->reply_free_dma)); in _base_allocate_reply_free_dma_pool()
6135 ioc->use_32bit_dma = true; in _base_allocate_reply_free_dma_pool()
6136 return -EAGAIN; in _base_allocate_reply_free_dma_pool()
6138 memset(ioc->reply_free, 0, sz); in _base_allocate_reply_free_dma_pool()
6141 ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); in _base_allocate_reply_free_dma_pool()
6144 (unsigned long long)ioc->reply_free_dma)); in _base_allocate_reply_free_dma_pool()
6149 * _base_allocate_reply_post_free_array - Allocating DMA'able memory
6153 * Return: 0 for success, non-zero for failure.
6160 ioc->reply_post_free_array_dma_pool = in _base_allocate_reply_post_free_array()
6162 &ioc->pdev->dev, reply_post_free_array_sz, 16, 0); in _base_allocate_reply_post_free_array()
6163 if (!ioc->reply_post_free_array_dma_pool) in _base_allocate_reply_post_free_array()
6164 return -ENOMEM; in _base_allocate_reply_post_free_array()
6165 ioc->reply_post_free_array = in _base_allocate_reply_post_free_array()
6166 dma_pool_alloc(ioc->reply_post_free_array_dma_pool, in _base_allocate_reply_post_free_array()
6167 GFP_KERNEL, &ioc->reply_post_free_array_dma); in _base_allocate_reply_post_free_array()
6168 if (!ioc->reply_post_free_array) in _base_allocate_reply_post_free_array()
6169 return -EAGAIN; in _base_allocate_reply_post_free_array()
6170 if (!mpt3sas_check_same_4gb_region(ioc->reply_post_free_array_dma, in _base_allocate_reply_post_free_array()
6174 ioc->reply_free, in _base_allocate_reply_post_free_array()
6175 (unsigned long long) ioc->reply_free_dma)); in _base_allocate_reply_post_free_array()
6176 ioc->use_32bit_dma = true; in _base_allocate_reply_post_free_array()
6177 return -EAGAIN; in _base_allocate_reply_post_free_array()
6182 * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
6186 * Return: 0 for success, non-zero for failure.
6193 int reply_post_free_sz = ioc->reply_post_queue_depth * in base_alloc_rdpq_dma_pool()
6195 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; in base_alloc_rdpq_dma_pool()
6197 ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct), in base_alloc_rdpq_dma_pool()
6199 if (!ioc->reply_post) in base_alloc_rdpq_dma_pool()
6200 return -ENOMEM; in base_alloc_rdpq_dma_pool()
6202 * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and in base_alloc_rdpq_dma_pool()
6203 * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should in base_alloc_rdpq_dma_pool()
6205 * upper 32-bits in their memory address. so here driver is allocating in base_alloc_rdpq_dma_pool()
6212 ioc->reply_post_free_dma_pool = in base_alloc_rdpq_dma_pool()
6214 &ioc->pdev->dev, sz, 16, 0); in base_alloc_rdpq_dma_pool()
6215 if (!ioc->reply_post_free_dma_pool) in base_alloc_rdpq_dma_pool()
6216 return -ENOMEM; in base_alloc_rdpq_dma_pool()
6219 ioc->reply_post[i].reply_post_free = in base_alloc_rdpq_dma_pool()
6220 dma_pool_zalloc(ioc->reply_post_free_dma_pool, in base_alloc_rdpq_dma_pool()
6222 &ioc->reply_post[i].reply_post_free_dma); in base_alloc_rdpq_dma_pool()
6223 if (!ioc->reply_post[i].reply_post_free) in base_alloc_rdpq_dma_pool()
6224 return -ENOMEM; in base_alloc_rdpq_dma_pool()
6235 ioc->reply_post[i].reply_post_free_dma, sz)) { in base_alloc_rdpq_dma_pool()
6239 ioc->reply_post[i].reply_post_free, in base_alloc_rdpq_dma_pool()
6241 ioc->reply_post[i].reply_post_free_dma)); in base_alloc_rdpq_dma_pool()
6242 return -EAGAIN; in base_alloc_rdpq_dma_pool()
6244 dma_alloc_count--; in base_alloc_rdpq_dma_pool()
6247 ioc->reply_post[i].reply_post_free = in base_alloc_rdpq_dma_pool()
6249 ((long)ioc->reply_post[i-1].reply_post_free in base_alloc_rdpq_dma_pool()
6251 ioc->reply_post[i].reply_post_free_dma = in base_alloc_rdpq_dma_pool()
6253 (ioc->reply_post[i-1].reply_post_free_dma + in base_alloc_rdpq_dma_pool()
6261 * _base_allocate_memory_pools - allocate start of day memory pools
6285 facts = &ioc->facts; in _base_allocate_memory_pools()
6288 if (max_sgl_entries != -1) in _base_allocate_memory_pools()
6291 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) in _base_allocate_memory_pools()
6302 if (ioc->is_mcpu_endpoint) in _base_allocate_memory_pools()
6303 ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS; in _base_allocate_memory_pools()
6313 ioc->shost->sg_tablesize = sg_tablesize; in _base_allocate_memory_pools()
6316 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)), in _base_allocate_memory_pools()
6317 (facts->RequestCredit / 4)); in _base_allocate_memory_pools()
6318 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) { in _base_allocate_memory_pools()
6319 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT + in _base_allocate_memory_pools()
6322 facts->RequestCredit); in _base_allocate_memory_pools()
6323 return -ENOMEM; in _base_allocate_memory_pools()
6325 ioc->internal_depth = 10; in _base_allocate_memory_pools()
6328 ioc->hi_priority_depth = ioc->internal_depth - (5); in _base_allocate_memory_pools()
6330 if (max_queue_depth != -1 && max_queue_depth != 0) { in _base_allocate_memory_pools()
6332 ioc->internal_depth, facts->RequestCredit); in _base_allocate_memory_pools()
6336 max_request_credit = min_t(u16, facts->RequestCredit, in _base_allocate_memory_pools()
6337 (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth)); in _base_allocate_memory_pools()
6339 max_request_credit = min_t(u16, facts->RequestCredit, in _base_allocate_memory_pools()
6342 /* Firmware maintains additional facts->HighPriorityCredit number of in _base_allocate_memory_pools()
6346 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth; in _base_allocate_memory_pools()
6349 ioc->request_sz = facts->IOCRequestFrameSize * 4; in _base_allocate_memory_pools()
6352 ioc->reply_sz = facts->ReplyFrameSize * 4; in _base_allocate_memory_pools()
6355 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { in _base_allocate_memory_pools()
6356 if (facts->IOCMaxChainSegmentSize) in _base_allocate_memory_pools()
6357 ioc->chain_segment_sz = in _base_allocate_memory_pools()
6358 facts->IOCMaxChainSegmentSize * in _base_allocate_memory_pools()
6362 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS * in _base_allocate_memory_pools()
6365 ioc->chain_segment_sz = ioc->request_sz; in _base_allocate_memory_pools()
6368 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee); in _base_allocate_memory_pools()
6373 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) - in _base_allocate_memory_pools()
6375 ioc->max_sges_in_main_message = max_sge_elements/sge_size; in _base_allocate_memory_pools()
6378 max_sge_elements = ioc->chain_segment_sz - sge_size; in _base_allocate_memory_pools()
6379 ioc->max_sges_in_chain_message = max_sge_elements/sge_size; in _base_allocate_memory_pools()
6384 chains_needed_per_io = ((ioc->shost->sg_tablesize - in _base_allocate_memory_pools()
6385 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message) in _base_allocate_memory_pools()
6387 if (chains_needed_per_io > facts->MaxChainDepth) { in _base_allocate_memory_pools()
6388 chains_needed_per_io = facts->MaxChainDepth; in _base_allocate_memory_pools()
6389 ioc->shost->sg_tablesize = min_t(u16, in _base_allocate_memory_pools()
6390 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message in _base_allocate_memory_pools()
6391 * chains_needed_per_io), ioc->shost->sg_tablesize); in _base_allocate_memory_pools()
6393 ioc->chains_needed_per_io = chains_needed_per_io; in _base_allocate_memory_pools()
6395 /* reply free queue sizing - taking into account for 64 FW events */ in _base_allocate_memory_pools()
6396 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; in _base_allocate_memory_pools()
6399 if (ioc->is_mcpu_endpoint) in _base_allocate_memory_pools()
6400 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth; in _base_allocate_memory_pools()
6403 ioc->reply_post_queue_depth = ioc->hba_queue_depth + in _base_allocate_memory_pools()
6404 ioc->reply_free_queue_depth + 1; in _base_allocate_memory_pools()
6406 if (ioc->reply_post_queue_depth % 16) in _base_allocate_memory_pools()
6407 ioc->reply_post_queue_depth += 16 - in _base_allocate_memory_pools()
6408 (ioc->reply_post_queue_depth % 16); in _base_allocate_memory_pools()
6411 if (ioc->reply_post_queue_depth > in _base_allocate_memory_pools()
6412 facts->MaxReplyDescriptorPostQueueDepth) { in _base_allocate_memory_pools()
6413 ioc->reply_post_queue_depth = in _base_allocate_memory_pools()
6414 facts->MaxReplyDescriptorPostQueueDepth - in _base_allocate_memory_pools()
6415 (facts->MaxReplyDescriptorPostQueueDepth % 16); in _base_allocate_memory_pools()
6416 ioc->hba_queue_depth = in _base_allocate_memory_pools()
6417 ((ioc->reply_post_queue_depth - 64) / 2) - 1; in _base_allocate_memory_pools()
6418 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; in _base_allocate_memory_pools()
6424 ioc->max_sges_in_main_message, in _base_allocate_memory_pools()
6425 ioc->max_sges_in_chain_message, in _base_allocate_memory_pools()
6426 ioc->shost->sg_tablesize, in _base_allocate_memory_pools()
6427 ioc->chains_needed_per_io); in _base_allocate_memory_pools()
6430 reply_post_free_sz = ioc->reply_post_queue_depth * in _base_allocate_memory_pools()
6433 if ((_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable) in _base_allocate_memory_pools()
6434 || (ioc->reply_queue_count < RDPQ_MAX_INDEX_IN_ONE_CHUNK)) in _base_allocate_memory_pools()
6435 rdpq_sz = reply_post_free_sz * ioc->reply_queue_count; in _base_allocate_memory_pools()
6437 if (ret == -EAGAIN) { in _base_allocate_memory_pools()
6443 ioc->use_32bit_dma = true; in _base_allocate_memory_pools()
6444 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) { in _base_allocate_memory_pools()
6446 "32 DMA mask failed %s\n", pci_name(ioc->pdev)); in _base_allocate_memory_pools()
6447 return -ENODEV; in _base_allocate_memory_pools()
6450 return -ENOMEM; in _base_allocate_memory_pools()
6451 } else if (ret == -ENOMEM) in _base_allocate_memory_pools()
6452 return -ENOMEM; in _base_allocate_memory_pools()
6453 total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 : in _base_allocate_memory_pools()
6454 DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK)); in _base_allocate_memory_pools()
6455 ioc->scsiio_depth = ioc->hba_queue_depth - in _base_allocate_memory_pools()
6456 ioc->hi_priority_depth - ioc->internal_depth; in _base_allocate_memory_pools()
6461 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT; in _base_allocate_memory_pools()
6464 ioc->shost->can_queue)); in _base_allocate_memory_pools()
6469 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth; in _base_allocate_memory_pools()
6470 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); in _base_allocate_memory_pools()
6472 /* hi-priority queue */ in _base_allocate_memory_pools()
6473 sz += (ioc->hi_priority_depth * ioc->request_sz); in _base_allocate_memory_pools()
6476 sz += (ioc->internal_depth * ioc->request_sz); in _base_allocate_memory_pools()
6478 ioc->request_dma_sz = sz; in _base_allocate_memory_pools()
6479 ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz, in _base_allocate_memory_pools()
6480 &ioc->request_dma, GFP_KERNEL); in _base_allocate_memory_pools()
6481 if (!ioc->request) { in _base_allocate_memory_pools()
6483 ioc->hba_queue_depth, ioc->chains_needed_per_io, in _base_allocate_memory_pools()
6484 ioc->request_sz, sz / 1024); in _base_allocate_memory_pools()
6485 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH) in _base_allocate_memory_pools()
6488 ioc->hba_queue_depth -= retry_sz; in _base_allocate_memory_pools()
6495 ioc->hba_queue_depth, ioc->chains_needed_per_io, in _base_allocate_memory_pools()
6496 ioc->request_sz, sz / 1024); in _base_allocate_memory_pools()
6498 /* hi-priority queue */ in _base_allocate_memory_pools()
6499 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) * in _base_allocate_memory_pools()
6500 ioc->request_sz); in _base_allocate_memory_pools()
6501 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) * in _base_allocate_memory_pools()
6502 ioc->request_sz); in _base_allocate_memory_pools()
6505 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth * in _base_allocate_memory_pools()
6506 ioc->request_sz); in _base_allocate_memory_pools()
6507 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * in _base_allocate_memory_pools()
6508 ioc->request_sz); in _base_allocate_memory_pools()
6511 "request pool(0x%p) - dma(0x%llx): " in _base_allocate_memory_pools()
6513 ioc->request, (unsigned long long) ioc->request_dma, in _base_allocate_memory_pools()
6514 ioc->hba_queue_depth, ioc->request_sz, in _base_allocate_memory_pools()
6515 (ioc->hba_queue_depth * ioc->request_sz) / 1024); in _base_allocate_memory_pools()
6521 ioc->request, ioc->scsiio_depth)); in _base_allocate_memory_pools()
6523 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH); in _base_allocate_memory_pools()
6524 sz = ioc->scsiio_depth * sizeof(struct chain_lookup); in _base_allocate_memory_pools()
6525 ioc->chain_lookup = kzalloc(sz, GFP_KERNEL); in _base_allocate_memory_pools()
6526 if (!ioc->chain_lookup) { in _base_allocate_memory_pools()
6531 sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker); in _base_allocate_memory_pools()
6532 for (i = 0; i < ioc->scsiio_depth; i++) { in _base_allocate_memory_pools()
6533 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL); in _base_allocate_memory_pools()
6534 if (!ioc->chain_lookup[i].chains_per_smid) { in _base_allocate_memory_pools()
6540 /* initialize hi-priority queue smid's */ in _base_allocate_memory_pools()
6541 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, in _base_allocate_memory_pools()
6543 if (!ioc->hpr_lookup) { in _base_allocate_memory_pools()
6547 ioc->hi_priority_smid = ioc->scsiio_depth + 1; in _base_allocate_memory_pools()
6550 ioc->hi_priority, in _base_allocate_memory_pools()
6551 ioc->hi_priority_depth, ioc->hi_priority_smid)); in _base_allocate_memory_pools()
6554 ioc->internal_lookup = kcalloc(ioc->internal_depth, in _base_allocate_memory_pools()
6556 if (!ioc->internal_lookup) { in _base_allocate_memory_pools()
6560 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; in _base_allocate_memory_pools()
6563 ioc->internal, in _base_allocate_memory_pools()
6564 ioc->internal_depth, ioc->internal_smid)); in _base_allocate_memory_pools()
6566 ioc->io_queue_num = kcalloc(ioc->scsiio_depth, in _base_allocate_memory_pools()
6568 if (!ioc->io_queue_num) in _base_allocate_memory_pools()
6572 * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1 in _base_allocate_memory_pools()
6573 * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry in _base_allocate_memory_pools()
6584 ioc->chains_per_prp_buffer = 0; in _base_allocate_memory_pools()
6585 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { in _base_allocate_memory_pools()
6587 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1; in _base_allocate_memory_pools()
6588 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE); in _base_allocate_memory_pools()
6591 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth; in _base_allocate_memory_pools()
6592 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL); in _base_allocate_memory_pools()
6593 if (!ioc->pcie_sg_lookup) { in _base_allocate_memory_pools()
6597 sz = nvme_blocks_needed * ioc->page_size; in _base_allocate_memory_pools()
6599 if (rc == -ENOMEM) in _base_allocate_memory_pools()
6600 return -ENOMEM; in _base_allocate_memory_pools()
6601 else if (rc == -EAGAIN) in _base_allocate_memory_pools()
6603 total_sz += sz * ioc->scsiio_depth; in _base_allocate_memory_pools()
6606 rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz); in _base_allocate_memory_pools()
6607 if (rc == -ENOMEM) in _base_allocate_memory_pools()
6608 return -ENOMEM; in _base_allocate_memory_pools()
6609 else if (rc == -EAGAIN) in _base_allocate_memory_pools()
6611 total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io - in _base_allocate_memory_pools()
6612 ioc->chains_per_prp_buffer) * ioc->scsiio_depth); in _base_allocate_memory_pools()
6615 ioc->chain_depth, ioc->chain_segment_sz, in _base_allocate_memory_pools()
6616 (ioc->chain_depth * ioc->chain_segment_sz) / 1024)); in _base_allocate_memory_pools()
6618 sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; in _base_allocate_memory_pools()
6620 if (rc == -ENOMEM) in _base_allocate_memory_pools()
6621 return -ENOMEM; in _base_allocate_memory_pools()
6622 else if (rc == -EAGAIN) in _base_allocate_memory_pools()
6626 sz = ioc->reply_free_queue_depth * ioc->reply_sz; in _base_allocate_memory_pools()
6628 if (rc == -ENOMEM) in _base_allocate_memory_pools()
6629 return -ENOMEM; in _base_allocate_memory_pools()
6630 else if (rc == -EAGAIN) in _base_allocate_memory_pools()
6635 sz = ioc->reply_free_queue_depth * 4; in _base_allocate_memory_pools()
6637 if (rc == -ENOMEM) in _base_allocate_memory_pools()
6638 return -ENOMEM; in _base_allocate_memory_pools()
6639 else if (rc == -EAGAIN) in _base_allocate_memory_pools()
6643 (unsigned long long)ioc->reply_free_dma)); in _base_allocate_memory_pools()
6645 if (ioc->rdpq_array_enable) { in _base_allocate_memory_pools()
6646 reply_post_free_array_sz = ioc->reply_queue_count * in _base_allocate_memory_pools()
6650 if (rc == -ENOMEM) in _base_allocate_memory_pools()
6651 return -ENOMEM; in _base_allocate_memory_pools()
6652 else if (rc == -EAGAIN) in _base_allocate_memory_pools()
6655 ioc->config_page_sz = 512; in _base_allocate_memory_pools()
6656 ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev, in _base_allocate_memory_pools()
6657 ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL); in _base_allocate_memory_pools()
6658 if (!ioc->config_page) { in _base_allocate_memory_pools()
6663 ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n", in _base_allocate_memory_pools()
6664 ioc->config_page, (unsigned long long)ioc->config_page_dma, in _base_allocate_memory_pools()
6665 ioc->config_page_sz); in _base_allocate_memory_pools()
6666 total_sz += ioc->config_page_sz; in _base_allocate_memory_pools()
6671 ioc->shost->can_queue, facts->RequestCredit); in _base_allocate_memory_pools()
6673 ioc->shost->sg_tablesize); in _base_allocate_memory_pools()
6678 if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) { in _base_allocate_memory_pools()
6680 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) { in _base_allocate_memory_pools()
6682 pci_name(ioc->pdev)); in _base_allocate_memory_pools()
6683 return -ENODEV; in _base_allocate_memory_pools()
6686 return -ENOMEM; in _base_allocate_memory_pools()
6690 return -ENOMEM; in _base_allocate_memory_pools()
6694 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
6706 s = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); in mpt3sas_base_get_iocstate()
6712 * _base_wait_on_iocstate - waiting on a particular ioc state
6717 * Return: 0 for success, non-zero for failure.
6738 } while (--cntdn); in _base_wait_on_iocstate()
6744 * _base_dump_reg_set - This function will print hexdump of register set.
6753 u32 __iomem *reg = (u32 __iomem *)ioc->chip; in _base_dump_reg_set()
6761 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
6766 * Return: 0 for success, non-zero for failure.
6768 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
6780 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); in _base_wait_for_doorbell_int()
6790 } while (--cntdn); in _base_wait_for_doorbell_int()
6794 return -EFAULT; in _base_wait_for_doorbell_int()
6806 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); in _base_spin_on_doorbell_int()
6816 } while (--cntdn); in _base_spin_on_doorbell_int()
6820 return -EFAULT; in _base_spin_on_doorbell_int()
6825 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
6829 * Return: 0 for success, non-zero for failure.
6831 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
6844 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus); in _base_wait_for_doorbell_ack()
6851 doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); in _base_wait_for_doorbell_ack()
6855 return -EFAULT; in _base_wait_for_doorbell_ack()
6860 return -EFAULT; in _base_wait_for_doorbell_ack()
6867 } while (--cntdn); in _base_wait_for_doorbell_ack()
6872 return -EFAULT; in _base_wait_for_doorbell_ack()
6876 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
6880 * Return: 0 for success, non-zero for failure.
6891 doorbell_reg = ioc->base_readl_ext_retry(&ioc->chip->Doorbell); in _base_wait_for_doorbell_not_used()
6901 } while (--cntdn); in _base_wait_for_doorbell_not_used()
6905 return -EFAULT; in _base_wait_for_doorbell_not_used()
6909 * _base_send_ioc_reset - send doorbell reset
6914 * Return: 0 for success, non-zero for failure.
6925 return -EFAULT; in _base_send_ioc_reset()
6928 if (!(ioc->facts.IOCCapabilities & in _base_send_ioc_reset()
6930 return -EFAULT; in _base_send_ioc_reset()
6935 &ioc->chip->Doorbell); in _base_send_ioc_reset()
6937 r = -EFAULT; in _base_send_ioc_reset()
6945 r = -EFAULT; in _base_send_ioc_reset()
6951 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); in _base_send_ioc_reset()
6957 MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 || in _base_send_ioc_reset()
6958 ioc->fault_reset_work_q == NULL)) { in _base_send_ioc_reset()
6960 &ioc->ioc_reset_in_progress_lock, flags); in _base_send_ioc_reset()
6965 &ioc->ioc_reset_in_progress_lock, flags); in _base_send_ioc_reset()
6967 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); in _base_send_ioc_reset()
6975 * mpt3sas_wait_for_ioc - IOC's operational state is checked here.
6981 * and operational; otherwise returns %-EFAULT.
7002 if (ioc->is_driver_loading) in mpt3sas_wait_for_ioc()
7003 return -ETIME; in mpt3sas_wait_for_ioc()
7008 } while (--timeout); in mpt3sas_wait_for_ioc()
7011 return -EFAULT; in mpt3sas_wait_for_ioc()
7019 * _base_handshake_req_reply_wait - send request thru doorbell interface
7027 * Return: 0 for success, non-zero for failure.
7039 if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { in _base_handshake_req_reply_wait()
7041 return -EFAULT; in _base_handshake_req_reply_wait()
7045 if (ioc->base_readl(&ioc->chip->HostInterruptStatus) & in _base_handshake_req_reply_wait()
7047 writel(0, &ioc->chip->HostInterruptStatus); in _base_handshake_req_reply_wait()
7052 &ioc->chip->Doorbell); in _base_handshake_req_reply_wait()
7057 return -EFAULT; in _base_handshake_req_reply_wait()
7059 writel(0, &ioc->chip->HostInterruptStatus); in _base_handshake_req_reply_wait()
7064 return -EFAULT; in _base_handshake_req_reply_wait()
7067 /* send message 32-bits at a time */ in _base_handshake_req_reply_wait()
7069 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell); in _base_handshake_req_reply_wait()
7077 return -EFAULT; in _base_handshake_req_reply_wait()
7084 return -EFAULT; in _base_handshake_req_reply_wait()
7087 /* read the first two 16-bits, it gives the total length of the reply */ in _base_handshake_req_reply_wait()
7088 reply[0] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell) in _base_handshake_req_reply_wait()
7090 writel(0, &ioc->chip->HostInterruptStatus); in _base_handshake_req_reply_wait()
7094 return -EFAULT; in _base_handshake_req_reply_wait()
7096 reply[1] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell) in _base_handshake_req_reply_wait()
7098 writel(0, &ioc->chip->HostInterruptStatus); in _base_handshake_req_reply_wait()
7100 for (i = 2; i < default_reply->MsgLength * 2; i++) { in _base_handshake_req_reply_wait()
7104 return -EFAULT; in _base_handshake_req_reply_wait()
7107 ioc->base_readl_ext_retry(&ioc->chip->Doorbell); in _base_handshake_req_reply_wait()
7110 ioc->base_readl_ext_retry(&ioc->chip->Doorbell) in _base_handshake_req_reply_wait()
7112 writel(0, &ioc->chip->HostInterruptStatus); in _base_handshake_req_reply_wait()
7121 writel(0, &ioc->chip->HostInterruptStatus); in _base_handshake_req_reply_wait()
7123 if (ioc->logging_level & MPT_DEBUG_INIT) { in _base_handshake_req_reply_wait()
7134 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
7139 * The SAS IO Unit Control Request message allows the host to perform low-level
7145 * Return: 0 for success, non-zero for failure.
7159 mutex_lock(&ioc->base_cmds.mutex); in mpt3sas_base_sas_iounit_control()
7161 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { in mpt3sas_base_sas_iounit_control()
7163 rc = -EAGAIN; in mpt3sas_base_sas_iounit_control()
7171 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); in mpt3sas_base_sas_iounit_control()
7174 rc = -EAGAIN; in mpt3sas_base_sas_iounit_control()
7179 ioc->base_cmds.status = MPT3_CMD_PENDING; in mpt3sas_base_sas_iounit_control()
7181 ioc->base_cmds.smid = smid; in mpt3sas_base_sas_iounit_control()
7183 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || in mpt3sas_base_sas_iounit_control()
7184 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) in mpt3sas_base_sas_iounit_control()
7185 ioc->ioc_link_reset_in_progress = 1; in mpt3sas_base_sas_iounit_control()
7186 init_completion(&ioc->base_cmds.done); in mpt3sas_base_sas_iounit_control()
7187 ioc->put_smid_default(ioc, smid); in mpt3sas_base_sas_iounit_control()
7188 wait_for_completion_timeout(&ioc->base_cmds.done, in mpt3sas_base_sas_iounit_control()
7190 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || in mpt3sas_base_sas_iounit_control()
7191 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) && in mpt3sas_base_sas_iounit_control()
7192 ioc->ioc_link_reset_in_progress) in mpt3sas_base_sas_iounit_control()
7193 ioc->ioc_link_reset_in_progress = 0; in mpt3sas_base_sas_iounit_control()
7194 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { in mpt3sas_base_sas_iounit_control()
7195 mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status, in mpt3sas_base_sas_iounit_control()
7200 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) in mpt3sas_base_sas_iounit_control()
7201 memcpy(mpi_reply, ioc->base_cmds.reply, in mpt3sas_base_sas_iounit_control()
7205 ioc->base_cmds.status = MPT3_CMD_NOT_USED; in mpt3sas_base_sas_iounit_control()
7211 ioc->base_cmds.status = MPT3_CMD_NOT_USED; in mpt3sas_base_sas_iounit_control()
7212 rc = -EFAULT; in mpt3sas_base_sas_iounit_control()
7214 mutex_unlock(&ioc->base_cmds.mutex); in mpt3sas_base_sas_iounit_control()
7219 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
7227 * Return: 0 for success, non-zero for failure.
7240 mutex_lock(&ioc->base_cmds.mutex); in mpt3sas_base_scsi_enclosure_processor()
7242 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { in mpt3sas_base_scsi_enclosure_processor()
7244 rc = -EAGAIN; in mpt3sas_base_scsi_enclosure_processor()
7252 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); in mpt3sas_base_scsi_enclosure_processor()
7255 rc = -EAGAIN; in mpt3sas_base_scsi_enclosure_processor()
7260 ioc->base_cmds.status = MPT3_CMD_PENDING; in mpt3sas_base_scsi_enclosure_processor()
7262 ioc->base_cmds.smid = smid; in mpt3sas_base_scsi_enclosure_processor()
7263 memset(request, 0, ioc->request_sz); in mpt3sas_base_scsi_enclosure_processor()
7265 init_completion(&ioc->base_cmds.done); in mpt3sas_base_scsi_enclosure_processor()
7266 ioc->put_smid_default(ioc, smid); in mpt3sas_base_scsi_enclosure_processor()
7267 wait_for_completion_timeout(&ioc->base_cmds.done, in mpt3sas_base_scsi_enclosure_processor()
7269 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { in mpt3sas_base_scsi_enclosure_processor()
7271 ioc->base_cmds.status, mpi_request, in mpt3sas_base_scsi_enclosure_processor()
7275 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) in mpt3sas_base_scsi_enclosure_processor()
7276 memcpy(mpi_reply, ioc->base_cmds.reply, in mpt3sas_base_scsi_enclosure_processor()
7280 ioc->base_cmds.status = MPT3_CMD_NOT_USED; in mpt3sas_base_scsi_enclosure_processor()
7286 ioc->base_cmds.status = MPT3_CMD_NOT_USED; in mpt3sas_base_scsi_enclosure_processor()
7287 rc = -EFAULT; in mpt3sas_base_scsi_enclosure_processor()
7289 mutex_unlock(&ioc->base_cmds.mutex); in mpt3sas_base_scsi_enclosure_processor()
7294 * _base_get_port_facts - obtain port facts reply and save in ioc
7298 * Return: 0 for success, non-zero for failure.
7323 pfacts = &ioc->pfacts[port]; in _base_get_port_facts()
7325 pfacts->PortNumber = mpi_reply.PortNumber; in _base_get_port_facts()
7326 pfacts->VP_ID = mpi_reply.VP_ID; in _base_get_port_facts()
7327 pfacts->VF_ID = mpi_reply.VF_ID; in _base_get_port_facts()
7328 pfacts->MaxPostedCmdBuffers = in _base_get_port_facts()
7335 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
7339 * Return: 0 for success, non-zero for failure.
7349 if (ioc->pci_error_recovery) { in _base_wait_for_iocstate()
7353 return -EFAULT; in _base_wait_for_iocstate()
7379 return -EFAULT; in _base_wait_for_iocstate()
7387 return -EFAULT; in _base_wait_for_iocstate()
7398 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
7401 * Return: 0 for success, non-zero for failure.
7432 facts = &ioc->facts; in _base_get_ioc_facts()
7434 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); in _base_get_ioc_facts()
7435 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); in _base_get_ioc_facts()
7436 facts->VP_ID = mpi_reply.VP_ID; in _base_get_ioc_facts()
7437 facts->VF_ID = mpi_reply.VF_ID; in _base_get_ioc_facts()
7438 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); in _base_get_ioc_facts()
7439 facts->MaxChainDepth = mpi_reply.MaxChainDepth; in _base_get_ioc_facts()
7440 facts->WhoInit = mpi_reply.WhoInit; in _base_get_ioc_facts()
7441 facts->NumberOfPorts = mpi_reply.NumberOfPorts; in _base_get_ioc_facts()
7442 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; in _base_get_ioc_facts()
7443 if (ioc->msix_enable && (facts->MaxMSIxVectors <= in _base_get_ioc_facts()
7444 MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc))) in _base_get_ioc_facts()
7445 ioc->combined_reply_queue = 0; in _base_get_ioc_facts()
7446 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); in _base_get_ioc_facts()
7447 facts->MaxReplyDescriptorPostQueueDepth = in _base_get_ioc_facts()
7449 facts->ProductID = le16_to_cpu(mpi_reply.ProductID); in _base_get_ioc_facts()
7450 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); in _base_get_ioc_facts()
7451 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) in _base_get_ioc_facts()
7452 ioc->ir_firmware = 1; in _base_get_ioc_facts()
7453 if ((facts->IOCCapabilities & in _base_get_ioc_facts()
7455 ioc->rdpq_array_capable = 1; in _base_get_ioc_facts()
7456 if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) in _base_get_ioc_facts()
7457 && ioc->is_aero_ioc) in _base_get_ioc_facts()
7458 ioc->atomic_desc_capable = 1; in _base_get_ioc_facts()
7459 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); in _base_get_ioc_facts()
7460 facts->IOCRequestFrameSize = in _base_get_ioc_facts()
7462 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { in _base_get_ioc_facts()
7463 facts->IOCMaxChainSegmentSize = in _base_get_ioc_facts()
7466 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); in _base_get_ioc_facts()
7467 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); in _base_get_ioc_facts()
7468 ioc->shost->max_id = -1; in _base_get_ioc_facts()
7469 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); in _base_get_ioc_facts()
7470 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); in _base_get_ioc_facts()
7471 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); in _base_get_ioc_facts()
7472 facts->HighPriorityCredit = in _base_get_ioc_facts()
7474 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; in _base_get_ioc_facts()
7475 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); in _base_get_ioc_facts()
7476 facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize; in _base_get_ioc_facts()
7481 ioc->page_size = 1 << facts->CurrentHostPageSize; in _base_get_ioc_facts()
7482 if (ioc->page_size == 1) { in _base_get_ioc_facts()
7484 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K; in _base_get_ioc_facts()
7488 facts->CurrentHostPageSize)); in _base_get_ioc_facts()
7492 facts->RequestCredit, facts->MaxChainDepth)); in _base_get_ioc_facts()
7495 facts->IOCRequestFrameSize * 4, in _base_get_ioc_facts()
7496 facts->ReplyFrameSize * 4)); in _base_get_ioc_facts()
7501 * _base_send_ioc_init - send ioc_init to firmware
7504 * Return: 0 for success, non-zero for failure.
7523 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged); in _base_send_ioc_init()
7528 mpi_request.HostMSIxVectors = ioc->reply_queue_count; in _base_send_ioc_init()
7529 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); in _base_send_ioc_init()
7531 cpu_to_le16(ioc->reply_post_queue_depth); in _base_send_ioc_init()
7533 cpu_to_le16(ioc->reply_free_queue_depth); in _base_send_ioc_init()
7536 cpu_to_le32((u64)ioc->sense_dma >> 32); in _base_send_ioc_init()
7538 cpu_to_le32((u64)ioc->reply_dma >> 32); in _base_send_ioc_init()
7540 cpu_to_le64((u64)ioc->request_dma); in _base_send_ioc_init()
7542 cpu_to_le64((u64)ioc->reply_free_dma); in _base_send_ioc_init()
7544 if (ioc->rdpq_array_enable) { in _base_send_ioc_init()
7545 reply_post_free_array_sz = ioc->reply_queue_count * in _base_send_ioc_init()
7547 memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz); in _base_send_ioc_init()
7548 for (i = 0; i < ioc->reply_queue_count; i++) in _base_send_ioc_init()
7549 ioc->reply_post_free_array[i].RDPQBaseAddress = in _base_send_ioc_init()
7551 (u64)ioc->reply_post[i].reply_post_free_dma); in _base_send_ioc_init()
7554 cpu_to_le64((u64)ioc->reply_post_free_array_dma); in _base_send_ioc_init()
7557 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma); in _base_send_ioc_init()
7572 if (ioc->logging_level & MPT_DEBUG_INIT) { in _base_send_ioc_init()
7596 r = -EIO; in _base_send_ioc_init()
7600 ioc->timestamp_update_count = 0; in _base_send_ioc_init()
7605 * mpt3sas_port_enable_done - command completion routine for port enable
7621 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED) in mpt3sas_port_enable_done()
7628 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE) in mpt3sas_port_enable_done()
7631 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING; in mpt3sas_port_enable_done()
7632 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE; in mpt3sas_port_enable_done()
7633 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID; in mpt3sas_port_enable_done()
7634 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); in mpt3sas_port_enable_done()
7635 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; in mpt3sas_port_enable_done()
7637 ioc->port_enable_failed = 1; in mpt3sas_port_enable_done()
7639 if (ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE_ASYNC) { in mpt3sas_port_enable_done()
7640 ioc->port_enable_cmds.status &= ~MPT3_CMD_COMPLETE_ASYNC; in mpt3sas_port_enable_done()
7645 ioc->start_scan_failed = ioc_status; in mpt3sas_port_enable_done()
7646 ioc->start_scan = 0; in mpt3sas_port_enable_done()
7650 complete(&ioc->port_enable_cmds.done); in mpt3sas_port_enable_done()
7655 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
7658 * Return: 0 for success, non-zero for failure.
7671 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { in _base_send_port_enable()
7673 return -EAGAIN; in _base_send_port_enable()
7676 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); in _base_send_port_enable()
7679 return -EAGAIN; in _base_send_port_enable()
7682 ioc->port_enable_cmds.status = MPT3_CMD_PENDING; in _base_send_port_enable()
7684 ioc->port_enable_cmds.smid = smid; in _base_send_port_enable()
7686 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; in _base_send_port_enable()
7688 init_completion(&ioc->port_enable_cmds.done); in _base_send_port_enable()
7689 ioc->put_smid_default(ioc, smid); in _base_send_port_enable()
7690 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ); in _base_send_port_enable()
7691 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) { in _base_send_port_enable()
7695 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) in _base_send_port_enable()
7696 r = -EFAULT; in _base_send_port_enable()
7698 r = -ETIME; in _base_send_port_enable()
7702 mpi_reply = ioc->port_enable_cmds.reply; in _base_send_port_enable()
7703 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; in _base_send_port_enable()
7707 r = -EFAULT; in _base_send_port_enable()
7712 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; in _base_send_port_enable()
7718 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
7721 * Return: 0 for success, non-zero for failure.
7731 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { in mpt3sas_port_enable()
7733 return -EAGAIN; in mpt3sas_port_enable()
7736 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); in mpt3sas_port_enable()
7739 return -EAGAIN; in mpt3sas_port_enable()
7741 ioc->drv_internal_flags |= MPT_DRV_INTERNAL_FIRST_PE_ISSUED; in mpt3sas_port_enable()
7742 ioc->port_enable_cmds.status = MPT3_CMD_PENDING; in mpt3sas_port_enable()
7743 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE_ASYNC; in mpt3sas_port_enable()
7745 ioc->port_enable_cmds.smid = smid; in mpt3sas_port_enable()
7747 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; in mpt3sas_port_enable()
7749 ioc->put_smid_default(ioc, smid); in mpt3sas_port_enable()
7754 * _base_determine_wait_on_discovery - desposition
7767 * turn on the bit in ioc->pd_handles to indicate PD in _base_determine_wait_on_discovery()
7771 if (ioc->ir_firmware) in _base_determine_wait_on_discovery()
7775 if (!ioc->bios_pg3.BiosVersion) in _base_determine_wait_on_discovery()
7785 if ((ioc->bios_pg2.CurrentBootDeviceForm & in _base_determine_wait_on_discovery()
7789 (ioc->bios_pg2.ReqBootDeviceForm & in _base_determine_wait_on_discovery()
7793 (ioc->bios_pg2.ReqAltBootDeviceForm & in _base_determine_wait_on_discovery()
7802 * _base_unmask_events - turn on notification for this event
7806 * The mask is stored in ioc->event_masks.
7819 ioc->event_masks[0] &= ~desired_event; in _base_unmask_events()
7821 ioc->event_masks[1] &= ~desired_event; in _base_unmask_events()
7823 ioc->event_masks[2] &= ~desired_event; in _base_unmask_events()
7825 ioc->event_masks[3] &= ~desired_event; in _base_unmask_events()
7829 * _base_event_notification - send event notification
7832 * Return: 0 for success, non-zero for failure.
7844 if (ioc->base_cmds.status & MPT3_CMD_PENDING) { in _base_event_notification()
7846 return -EAGAIN; in _base_event_notification()
7849 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); in _base_event_notification()
7852 return -EAGAIN; in _base_event_notification()
7854 ioc->base_cmds.status = MPT3_CMD_PENDING; in _base_event_notification()
7856 ioc->base_cmds.smid = smid; in _base_event_notification()
7858 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; in _base_event_notification()
7859 mpi_request->VF_ID = 0; /* TODO */ in _base_event_notification()
7860 mpi_request->VP_ID = 0; in _base_event_notification()
7862 mpi_request->EventMasks[i] = in _base_event_notification()
7863 cpu_to_le32(ioc->event_masks[i]); in _base_event_notification()
7864 init_completion(&ioc->base_cmds.done); in _base_event_notification()
7865 ioc->put_smid_default(ioc, smid); in _base_event_notification()
7866 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); in _base_event_notification()
7867 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { in _base_event_notification()
7871 if (ioc->base_cmds.status & MPT3_CMD_RESET) in _base_event_notification()
7872 r = -EFAULT; in _base_event_notification()
7878 ioc->base_cmds.status = MPT3_CMD_NOT_USED; in _base_event_notification()
7881 if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED) in _base_event_notification()
7882 return -EFAULT; in _base_event_notification()
7884 return -EFAULT; in _base_event_notification()
7885 r = -EAGAIN; in _base_event_notification()
7891 * mpt3sas_base_validate_event_type - validating event types
7911 (ioc->event_masks[i] & desired_event)) { in mpt3sas_base_validate_event_type()
7912 ioc->event_masks[i] &= ~desired_event; in mpt3sas_base_validate_event_type()
7922 mutex_lock(&ioc->base_cmds.mutex); in mpt3sas_base_validate_event_type()
7924 mutex_unlock(&ioc->base_cmds.mutex); in mpt3sas_base_validate_event_type()
7928 * _base_diag_reset - the "big hammer" start of day reset
7931 * Return: 0 for success, non-zero for failure.
7943 pci_cfg_access_lock(ioc->pdev); in _base_diag_reset()
7953 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); in _base_diag_reset()
7954 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence); in _base_diag_reset()
7955 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence); in _base_diag_reset()
7956 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence); in _base_diag_reset()
7957 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence); in _base_diag_reset()
7958 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence); in _base_diag_reset()
7959 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence); in _base_diag_reset()
7971 host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic); in _base_diag_reset()
7978 hcb_size = ioc->base_readl(&ioc->chip->HCBSize); in _base_diag_reset()
7982 &ioc->chip->HostDiagnostic); in _base_diag_reset()
7991 host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic); in _base_diag_reset()
8011 writel(host_diagnostic, &ioc->chip->HostDiagnostic); in _base_diag_reset()
8013 drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n")); in _base_diag_reset()
8015 &ioc->chip->HCBSize); in _base_diag_reset()
8020 &ioc->chip->HostDiagnostic); in _base_diag_reset()
8024 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); in _base_diag_reset()
8035 pci_cfg_access_unlock(ioc->pdev); in _base_diag_reset()
8040 pci_cfg_access_unlock(ioc->pdev); in _base_diag_reset()
8042 return -EFAULT; in _base_diag_reset()
8046 * mpt3sas_base_make_ioc_ready - put controller in READY state
8050 * Return: 0 for success, non-zero for failure.
8061 if (ioc->pci_error_recovery) in mpt3sas_base_make_ioc_ready()
8077 return -EFAULT; in mpt3sas_base_make_ioc_ready()
8106 if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) { in mpt3sas_base_make_ioc_ready()
8130 * _base_make_ioc_operational - put controller in OPERATIONAL state
8133 * Return: 0 for success, non-zero for failure.
8153 &ioc->delayed_tr_list, list) { in _base_make_ioc_operational()
8154 list_del(&delayed_tr->list); in _base_make_ioc_operational()
8160 &ioc->delayed_tr_volume_list, list) { in _base_make_ioc_operational()
8161 list_del(&delayed_tr->list); in _base_make_ioc_operational()
8166 &ioc->delayed_sc_list, list) { in _base_make_ioc_operational()
8167 list_del(&delayed_sc->list); in _base_make_ioc_operational()
8172 &ioc->delayed_event_ack_list, list) { in _base_make_ioc_operational()
8173 list_del(&delayed_event_ack->list); in _base_make_ioc_operational()
8177 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); in _base_make_ioc_operational()
8179 /* hi-priority queue */ in _base_make_ioc_operational()
8180 INIT_LIST_HEAD(&ioc->hpr_free_list); in _base_make_ioc_operational()
8181 smid = ioc->hi_priority_smid; in _base_make_ioc_operational()
8182 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) { in _base_make_ioc_operational()
8183 ioc->hpr_lookup[i].cb_idx = 0xFF; in _base_make_ioc_operational()
8184 ioc->hpr_lookup[i].smid = smid; in _base_make_ioc_operational()
8185 list_add_tail(&ioc->hpr_lookup[i].tracker_list, in _base_make_ioc_operational()
8186 &ioc->hpr_free_list); in _base_make_ioc_operational()
8190 INIT_LIST_HEAD(&ioc->internal_free_list); in _base_make_ioc_operational()
8191 smid = ioc->internal_smid; in _base_make_ioc_operational()
8192 for (i = 0; i < ioc->internal_depth; i++, smid++) { in _base_make_ioc_operational()
8193 ioc->internal_lookup[i].cb_idx = 0xFF; in _base_make_ioc_operational()
8194 ioc->internal_lookup[i].smid = smid; in _base_make_ioc_operational()
8195 list_add_tail(&ioc->internal_lookup[i].tracker_list, in _base_make_ioc_operational()
8196 &ioc->internal_free_list); in _base_make_ioc_operational()
8199 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); in _base_make_ioc_operational()
8202 for (i = 0, reply_address = (u32)ioc->reply_dma ; in _base_make_ioc_operational()
8203 i < ioc->reply_free_queue_depth ; i++, reply_address += in _base_make_ioc_operational()
8204 ioc->reply_sz) { in _base_make_ioc_operational()
8205 ioc->reply_free[i] = cpu_to_le32(reply_address); in _base_make_ioc_operational()
8206 if (ioc->is_mcpu_endpoint) in _base_make_ioc_operational()
8212 if (ioc->is_driver_loading) in _base_make_ioc_operational()
8217 reply_post_free_contig = ioc->reply_post[0].reply_post_free; in _base_make_ioc_operational()
8218 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { in _base_make_ioc_operational()
8223 if (ioc->rdpq_array_enable) { in _base_make_ioc_operational()
8224 reply_q->reply_post_free = in _base_make_ioc_operational()
8225 ioc->reply_post[index++].reply_post_free; in _base_make_ioc_operational()
8227 reply_q->reply_post_free = reply_post_free_contig; in _base_make_ioc_operational()
8228 reply_post_free_contig += ioc->reply_post_queue_depth; in _base_make_ioc_operational()
8231 reply_q->reply_post_host_index = 0; in _base_make_ioc_operational()
8232 for (i = 0; i < ioc->reply_post_queue_depth; i++) in _base_make_ioc_operational()
8233 reply_q->reply_post_free[i].Words = in _base_make_ioc_operational()
8247 if (!ioc->is_driver_loading) in _base_make_ioc_operational()
8256 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; in _base_make_ioc_operational()
8257 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); in _base_make_ioc_operational()
8260 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { in _base_make_ioc_operational()
8261 if (ioc->combined_reply_queue) in _base_make_ioc_operational()
8262 writel((reply_q->msix_index & 7)<< in _base_make_ioc_operational()
8264 ioc->replyPostRegisterIndex[reply_q->msix_index/8]); in _base_make_ioc_operational()
8266 writel(reply_q->msix_index << in _base_make_ioc_operational()
8268 &ioc->chip->ReplyPostHostIndex); in _base_make_ioc_operational()
8278 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { in _base_make_ioc_operational()
8292 if (!ioc->shost_recovery) { in _base_make_ioc_operational()
8294 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier in _base_make_ioc_operational()
8297 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) & in _base_make_ioc_operational()
8300 ioc->mfg_pg10_hide_flag = hide_flag; in _base_make_ioc_operational()
8303 ioc->wait_for_discovery_to_complete = in _base_make_ioc_operational()
8317 * mpt3sas_base_free_resources - free resources controller resources
8326 mutex_lock(&ioc->pci_access_mutex); in mpt3sas_base_free_resources()
8327 if (ioc->chip_phys && ioc->chip) { in mpt3sas_base_free_resources()
8329 ioc->shost_recovery = 1; in mpt3sas_base_free_resources()
8331 ioc->shost_recovery = 0; in mpt3sas_base_free_resources()
8335 mutex_unlock(&ioc->pci_access_mutex); in mpt3sas_base_free_resources()
8340 * mpt3sas_base_attach - attach controller instance
8343 * Return: 0 for success, non-zero for failure.
8354 ioc->cpu_count = num_online_cpus(); in mpt3sas_base_attach()
8357 ioc->cpu_msix_table_sz = last_cpu_id + 1; in mpt3sas_base_attach()
8358 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); in mpt3sas_base_attach()
8359 ioc->reply_queue_count = 1; in mpt3sas_base_attach()
8360 if (!ioc->cpu_msix_table) { in mpt3sas_base_attach()
8362 r = -ENOMEM; in mpt3sas_base_attach()
8366 if (ioc->is_warpdrive) { in mpt3sas_base_attach()
8367 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz, in mpt3sas_base_attach()
8369 if (!ioc->reply_post_host_index) { in mpt3sas_base_attach()
8371 r = -ENOMEM; in mpt3sas_base_attach()
8376 ioc->smp_affinity_enable = smp_affinity_enable; in mpt3sas_base_attach()
8378 ioc->rdpq_array_enable_assigned = 0; in mpt3sas_base_attach()
8379 ioc->use_32bit_dma = false; in mpt3sas_base_attach()
8380 ioc->dma_mask = 64; in mpt3sas_base_attach()
8381 if (ioc->is_aero_ioc) { in mpt3sas_base_attach()
8382 ioc->base_readl = &_base_readl_aero; in mpt3sas_base_attach()
8383 ioc->base_readl_ext_retry = &_base_readl_ext_retry; in mpt3sas_base_attach()
8385 ioc->base_readl = &_base_readl; in mpt3sas_base_attach()
8386 ioc->base_readl_ext_retry = &_base_readl; in mpt3sas_base_attach()
8392 pci_set_drvdata(ioc->pdev, ioc->shost); in mpt3sas_base_attach()
8400 switch (ioc->hba_mpi_version_belonged) { in mpt3sas_base_attach()
8402 ioc->build_sg_scmd = &_base_build_sg_scmd; in mpt3sas_base_attach()
8403 ioc->build_sg = &_base_build_sg; in mpt3sas_base_attach()
8404 ioc->build_zero_len_sge = &_base_build_zero_len_sge; in mpt3sas_base_attach()
8405 ioc->get_msix_index_for_smlio = &_base_get_msix_index; in mpt3sas_base_attach()
8412 * Target Status - all require the IEEE formatted scatter gather in mpt3sas_base_attach()
8415 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee; in mpt3sas_base_attach()
8416 ioc->build_sg = &_base_build_sg_ieee; in mpt3sas_base_attach()
8417 ioc->build_nvme_prp = &_base_build_nvme_prp; in mpt3sas_base_attach()
8418 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; in mpt3sas_base_attach()
8419 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); in mpt3sas_base_attach()
8420 if (ioc->high_iops_queues) in mpt3sas_base_attach()
8421 ioc->get_msix_index_for_smlio = in mpt3sas_base_attach()
8424 ioc->get_msix_index_for_smlio = &_base_get_msix_index; in mpt3sas_base_attach()
8427 if (ioc->atomic_desc_capable) { in mpt3sas_base_attach()
8428 ioc->put_smid_default = &_base_put_smid_default_atomic; in mpt3sas_base_attach()
8429 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic; in mpt3sas_base_attach()
8430 ioc->put_smid_fast_path = in mpt3sas_base_attach()
8432 ioc->put_smid_hi_priority = in mpt3sas_base_attach()
8435 ioc->put_smid_default = &_base_put_smid_default; in mpt3sas_base_attach()
8436 ioc->put_smid_fast_path = &_base_put_smid_fast_path; in mpt3sas_base_attach()
8437 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority; in mpt3sas_base_attach()
8438 if (ioc->is_mcpu_endpoint) in mpt3sas_base_attach()
8439 ioc->put_smid_scsi_io = in mpt3sas_base_attach()
8442 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io; in mpt3sas_base_attach()
8450 ioc->build_sg_mpi = &_base_build_sg; in mpt3sas_base_attach()
8451 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge; in mpt3sas_base_attach()
8457 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, in mpt3sas_base_attach()
8459 if (!ioc->pfacts) { in mpt3sas_base_attach()
8460 r = -ENOMEM; in mpt3sas_base_attach()
8464 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) { in mpt3sas_base_attach()
8478 ioc->thresh_hold = irqpoll_weight; in mpt3sas_base_attach()
8480 ioc->thresh_hold = ioc->hba_queue_depth/4; in mpt3sas_base_attach()
8483 init_waitqueue_head(&ioc->reset_wq); in mpt3sas_base_attach()
8486 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); in mpt3sas_base_attach()
8487 if (ioc->facts.MaxDevHandle % 8) in mpt3sas_base_attach()
8488 ioc->pd_handles_sz++; in mpt3sas_base_attach()
8489 ioc->pd_handles = kzalloc(ioc->pd_handles_sz, in mpt3sas_base_attach()
8491 if (!ioc->pd_handles) { in mpt3sas_base_attach()
8492 r = -ENOMEM; in mpt3sas_base_attach()
8495 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz, in mpt3sas_base_attach()
8497 if (!ioc->blocking_handles) { in mpt3sas_base_attach()
8498 r = -ENOMEM; in mpt3sas_base_attach()
8503 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8); in mpt3sas_base_attach()
8504 if (ioc->facts.MaxDevHandle % 8) in mpt3sas_base_attach()
8505 ioc->pend_os_device_add_sz++; in mpt3sas_base_attach()
8506 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, in mpt3sas_base_attach()
8508 if (!ioc->pend_os_device_add) { in mpt3sas_base_attach()
8509 r = -ENOMEM; in mpt3sas_base_attach()
8513 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; in mpt3sas_base_attach()
8514 ioc->device_remove_in_progress = in mpt3sas_base_attach()
8515 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); in mpt3sas_base_attach()
8516 if (!ioc->device_remove_in_progress) { in mpt3sas_base_attach()
8517 r = -ENOMEM; in mpt3sas_base_attach()
8521 ioc->fwfault_debug = mpt3sas_fwfault_debug; in mpt3sas_base_attach()
8524 mutex_init(&ioc->base_cmds.mutex); in mpt3sas_base_attach()
8525 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); in mpt3sas_base_attach()
8526 ioc->base_cmds.status = MPT3_CMD_NOT_USED; in mpt3sas_base_attach()
8529 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); in mpt3sas_base_attach()
8530 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; in mpt3sas_base_attach()
8533 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); in mpt3sas_base_attach()
8534 ioc->transport_cmds.status = MPT3_CMD_NOT_USED; in mpt3sas_base_attach()
8535 mutex_init(&ioc->transport_cmds.mutex); in mpt3sas_base_attach()
8538 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); in mpt3sas_base_attach()
8539 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; in mpt3sas_base_attach()
8540 mutex_init(&ioc->scsih_cmds.mutex); in mpt3sas_base_attach()
8543 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); in mpt3sas_base_attach()
8544 ioc->tm_cmds.status = MPT3_CMD_NOT_USED; in mpt3sas_base_attach()
8545 mutex_init(&ioc->tm_cmds.mutex); in mpt3sas_base_attach()
8548 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); in mpt3sas_base_attach()
8549 ioc->config_cmds.status = MPT3_CMD_NOT_USED; in mpt3sas_base_attach()
8550 mutex_init(&ioc->config_cmds.mutex); in mpt3sas_base_attach()
8553 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); in mpt3sas_base_attach()
8554 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); in mpt3sas_base_attach()
8555 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; in mpt3sas_base_attach()
8556 mutex_init(&ioc->ctl_cmds.mutex); in mpt3sas_base_attach()
8558 if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply || in mpt3sas_base_attach()
8559 !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply || in mpt3sas_base_attach()
8560 !ioc->tm_cmds.reply || !ioc->config_cmds.reply || in mpt3sas_base_attach()
8561 !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) { in mpt3sas_base_attach()
8562 r = -ENOMEM; in mpt3sas_base_attach()
8567 ioc->event_masks[i] = -1; in mpt3sas_base_attach()
8583 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) { in mpt3sas_base_attach()
8584 if (ioc->is_gen35_ioc) { in mpt3sas_base_attach()
8593 if (r == -EAGAIN) { in mpt3sas_base_attach()
8603 memcpy(&ioc->prev_fw_facts, &ioc->facts, in mpt3sas_base_attach()
8606 ioc->non_operational_loop = 0; in mpt3sas_base_attach()
8607 ioc->ioc_coredump_loop = 0; in mpt3sas_base_attach()
8608 ioc->got_task_abort_from_ioctl = 0; in mpt3sas_base_attach()
8613 ioc->remove_host = 1; in mpt3sas_base_attach()
8617 pci_set_drvdata(ioc->pdev, NULL); in mpt3sas_base_attach()
8618 kfree(ioc->cpu_msix_table); in mpt3sas_base_attach()
8619 if (ioc->is_warpdrive) in mpt3sas_base_attach()
8620 kfree(ioc->reply_post_host_index); in mpt3sas_base_attach()
8621 kfree(ioc->pd_handles); in mpt3sas_base_attach()
8622 kfree(ioc->blocking_handles); in mpt3sas_base_attach()
8623 kfree(ioc->device_remove_in_progress); in mpt3sas_base_attach()
8624 kfree(ioc->pend_os_device_add); in mpt3sas_base_attach()
8625 kfree(ioc->tm_cmds.reply); in mpt3sas_base_attach()
8626 kfree(ioc->transport_cmds.reply); in mpt3sas_base_attach()
8627 kfree(ioc->scsih_cmds.reply); in mpt3sas_base_attach()
8628 kfree(ioc->config_cmds.reply); in mpt3sas_base_attach()
8629 kfree(ioc->base_cmds.reply); in mpt3sas_base_attach()
8630 kfree(ioc->port_enable_cmds.reply); in mpt3sas_base_attach()
8631 kfree(ioc->ctl_cmds.reply); in mpt3sas_base_attach()
8632 kfree(ioc->ctl_cmds.sense); in mpt3sas_base_attach()
8633 kfree(ioc->pfacts); in mpt3sas_base_attach()
8634 ioc->ctl_cmds.reply = NULL; in mpt3sas_base_attach()
8635 ioc->base_cmds.reply = NULL; in mpt3sas_base_attach()
8636 ioc->tm_cmds.reply = NULL; in mpt3sas_base_attach()
8637 ioc->scsih_cmds.reply = NULL; in mpt3sas_base_attach()
8638 ioc->transport_cmds.reply = NULL; in mpt3sas_base_attach()
8639 ioc->config_cmds.reply = NULL; in mpt3sas_base_attach()
8640 ioc->pfacts = NULL; in mpt3sas_base_attach()
8646 * mpt3sas_base_detach - remove controller instance
8658 pci_set_drvdata(ioc->pdev, NULL); in mpt3sas_base_detach()
8659 kfree(ioc->cpu_msix_table); in mpt3sas_base_detach()
8660 if (ioc->is_warpdrive) in mpt3sas_base_detach()
8661 kfree(ioc->reply_post_host_index); in mpt3sas_base_detach()
8662 kfree(ioc->pd_handles); in mpt3sas_base_detach()
8663 kfree(ioc->blocking_handles); in mpt3sas_base_detach()
8664 kfree(ioc->device_remove_in_progress); in mpt3sas_base_detach()
8665 kfree(ioc->pend_os_device_add); in mpt3sas_base_detach()
8666 kfree(ioc->pfacts); in mpt3sas_base_detach()
8667 kfree(ioc->ctl_cmds.reply); in mpt3sas_base_detach()
8668 kfree(ioc->ctl_cmds.sense); in mpt3sas_base_detach()
8669 kfree(ioc->base_cmds.reply); in mpt3sas_base_detach()
8670 kfree(ioc->port_enable_cmds.reply); in mpt3sas_base_detach()
8671 kfree(ioc->tm_cmds.reply); in mpt3sas_base_detach()
8672 kfree(ioc->transport_cmds.reply); in mpt3sas_base_detach()
8673 kfree(ioc->scsih_cmds.reply); in mpt3sas_base_detach()
8674 kfree(ioc->config_cmds.reply); in mpt3sas_base_detach()
8678 * _base_pre_reset_handler - pre reset handler
8689 * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands
8697 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { in _base_clear_outstanding_mpt_commands()
8698 ioc->transport_cmds.status |= MPT3_CMD_RESET; in _base_clear_outstanding_mpt_commands()
8699 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid); in _base_clear_outstanding_mpt_commands()
8700 complete(&ioc->transport_cmds.done); in _base_clear_outstanding_mpt_commands()
8702 if (ioc->base_cmds.status & MPT3_CMD_PENDING) { in _base_clear_outstanding_mpt_commands()
8703 ioc->base_cmds.status |= MPT3_CMD_RESET; in _base_clear_outstanding_mpt_commands()
8704 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid); in _base_clear_outstanding_mpt_commands()
8705 complete(&ioc->base_cmds.done); in _base_clear_outstanding_mpt_commands()
8707 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { in _base_clear_outstanding_mpt_commands()
8708 ioc->port_enable_failed = 1; in _base_clear_outstanding_mpt_commands()
8709 ioc->port_enable_cmds.status |= MPT3_CMD_RESET; in _base_clear_outstanding_mpt_commands()
8710 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid); in _base_clear_outstanding_mpt_commands()
8711 if (ioc->is_driver_loading) { in _base_clear_outstanding_mpt_commands()
8712 ioc->start_scan_failed = in _base_clear_outstanding_mpt_commands()
8714 ioc->start_scan = 0; in _base_clear_outstanding_mpt_commands()
8716 complete(&ioc->port_enable_cmds.done); in _base_clear_outstanding_mpt_commands()
8719 if (ioc->config_cmds.status & MPT3_CMD_PENDING) { in _base_clear_outstanding_mpt_commands()
8720 ioc->config_cmds.status |= MPT3_CMD_RESET; in _base_clear_outstanding_mpt_commands()
8721 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid); in _base_clear_outstanding_mpt_commands()
8722 ioc->config_cmds.smid = USHRT_MAX; in _base_clear_outstanding_mpt_commands()
8723 complete(&ioc->config_cmds.done); in _base_clear_outstanding_mpt_commands()
8728 * _base_clear_outstanding_commands - clear all outstanding commands
8739 * _base_reset_done_handler - reset done handler
8750 * mpt3sas_wait_for_commands_to_complete - reset controller
8761 ioc->pending_io_count = 0; in mpt3sas_wait_for_commands_to_complete()
8768 ioc->pending_io_count = scsi_host_busy(ioc->shost); in mpt3sas_wait_for_commands_to_complete()
8770 if (!ioc->pending_io_count) in mpt3sas_wait_for_commands_to_complete()
8774 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ); in mpt3sas_wait_for_commands_to_complete()
8778 * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts
8790 struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts; in _base_check_ioc_facts_changes()
8792 if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) { in _base_check_ioc_facts_changes()
8793 pd_handles_sz = (ioc->facts.MaxDevHandle / 8); in _base_check_ioc_facts_changes()
8794 if (ioc->facts.MaxDevHandle % 8) in _base_check_ioc_facts_changes()
8797 pd_handles = krealloc(ioc->pd_handles, pd_handles_sz, in _base_check_ioc_facts_changes()
8803 return -ENOMEM; in _base_check_ioc_facts_changes()
8805 memset(pd_handles + ioc->pd_handles_sz, 0, in _base_check_ioc_facts_changes()
8806 (pd_handles_sz - ioc->pd_handles_sz)); in _base_check_ioc_facts_changes()
8807 ioc->pd_handles = pd_handles; in _base_check_ioc_facts_changes()
8809 blocking_handles = krealloc(ioc->blocking_handles, in _base_check_ioc_facts_changes()
8816 return -ENOMEM; in _base_check_ioc_facts_changes()
8818 memset(blocking_handles + ioc->pd_handles_sz, 0, in _base_check_ioc_facts_changes()
8819 (pd_handles_sz - ioc->pd_handles_sz)); in _base_check_ioc_facts_changes()
8820 ioc->blocking_handles = blocking_handles; in _base_check_ioc_facts_changes()
8821 ioc->pd_handles_sz = pd_handles_sz; in _base_check_ioc_facts_changes()
8823 pend_os_device_add = krealloc(ioc->pend_os_device_add, in _base_check_ioc_facts_changes()
8829 return -ENOMEM; in _base_check_ioc_facts_changes()
8831 memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0, in _base_check_ioc_facts_changes()
8832 (pd_handles_sz - ioc->pend_os_device_add_sz)); in _base_check_ioc_facts_changes()
8833 ioc->pend_os_device_add = pend_os_device_add; in _base_check_ioc_facts_changes()
8834 ioc->pend_os_device_add_sz = pd_handles_sz; in _base_check_ioc_facts_changes()
8837 ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL); in _base_check_ioc_facts_changes()
8843 return -ENOMEM; in _base_check_ioc_facts_changes()
8846 ioc->device_remove_in_progress_sz, 0, in _base_check_ioc_facts_changes()
8847 (pd_handles_sz - ioc->device_remove_in_progress_sz)); in _base_check_ioc_facts_changes()
8848 ioc->device_remove_in_progress = device_remove_in_progress; in _base_check_ioc_facts_changes()
8849 ioc->device_remove_in_progress_sz = pd_handles_sz; in _base_check_ioc_facts_changes()
8852 memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts)); in _base_check_ioc_facts_changes()
8857 * mpt3sas_base_hard_reset_handler - reset controller
8861 * Return: 0 for success, non-zero for failure.
8874 if (ioc->pci_error_recovery) { in mpt3sas_base_hard_reset_handler()
8884 mutex_lock(&ioc->reset_in_progress_mutex); in mpt3sas_base_hard_reset_handler()
8886 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); in mpt3sas_base_hard_reset_handler()
8887 ioc->shost_recovery = 1; in mpt3sas_base_hard_reset_handler()
8888 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); in mpt3sas_base_hard_reset_handler()
8890 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & in mpt3sas_base_hard_reset_handler()
8892 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & in mpt3sas_base_hard_reset_handler()
8900 ioc->htb_rel.trigger_info_dwords[1] = in mpt3sas_base_hard_reset_handler()
8916 if (ioc->is_driver_loading && ioc->port_enable_failed) { in mpt3sas_base_hard_reset_handler()
8917 ioc->remove_host = 1; in mpt3sas_base_hard_reset_handler()
8918 r = -EFAULT; in mpt3sas_base_hard_reset_handler()
8932 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) in mpt3sas_base_hard_reset_handler()
8935 " firmware version is running\n", ioc->name); in mpt3sas_base_hard_reset_handler()
8944 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); in mpt3sas_base_hard_reset_handler()
8945 ioc->shost_recovery = 0; in mpt3sas_base_hard_reset_handler()
8946 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); in mpt3sas_base_hard_reset_handler()
8947 ioc->ioc_reset_count++; in mpt3sas_base_hard_reset_handler()
8948 mutex_unlock(&ioc->reset_in_progress_mutex); in mpt3sas_base_hard_reset_handler()