• Home
  • Raw
  • Download

Lines Matching refs:ctrl_info

69 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
72 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
74 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
77 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
80 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
84 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
88 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
92 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
93 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
94 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
95 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
96 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
97 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
98 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
100 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
247 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_offline() argument
249 return !ctrl_info->controller_online; in pqi_ctrl_offline()
252 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) in pqi_check_ctrl_health() argument
254 if (ctrl_info->controller_online) in pqi_check_ctrl_health()
255 if (!sis_is_firmware_running(ctrl_info)) in pqi_check_ctrl_health()
256 pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP); in pqi_check_ctrl_health()
267 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info) in pqi_get_ctrl_mode() argument
269 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE; in pqi_get_ctrl_mode()
272 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, in pqi_save_ctrl_mode() argument
277 driver_scratch = sis_read_driver_scratch(ctrl_info); in pqi_save_ctrl_mode()
284 sis_write_driver_scratch(ctrl_info, driver_scratch); in pqi_save_ctrl_mode()
287 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info) in pqi_is_fw_triage_supported() argument
289 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0; in pqi_is_fw_triage_supported()
292 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported) in pqi_save_fw_triage_setting() argument
296 driver_scratch = sis_read_driver_scratch(ctrl_info); in pqi_save_fw_triage_setting()
303 sis_write_driver_scratch(ctrl_info, driver_scratch); in pqi_save_fw_triage_setting()
306 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_block_scan() argument
308 ctrl_info->scan_blocked = true; in pqi_ctrl_block_scan()
309 mutex_lock(&ctrl_info->scan_mutex); in pqi_ctrl_block_scan()
312 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_unblock_scan() argument
314 ctrl_info->scan_blocked = false; in pqi_ctrl_unblock_scan()
315 mutex_unlock(&ctrl_info->scan_mutex); in pqi_ctrl_unblock_scan()
318 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_scan_blocked() argument
320 return ctrl_info->scan_blocked; in pqi_ctrl_scan_blocked()
323 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_block_device_reset() argument
325 mutex_lock(&ctrl_info->lun_reset_mutex); in pqi_ctrl_block_device_reset()
328 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_unblock_device_reset() argument
330 mutex_unlock(&ctrl_info->lun_reset_mutex); in pqi_ctrl_unblock_device_reset()
333 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info) in pqi_scsi_block_requests() argument
339 shost = ctrl_info->scsi_host; in pqi_scsi_block_requests()
353 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info) in pqi_scsi_unblock_requests() argument
355 scsi_unblock_requests(ctrl_info->scsi_host); in pqi_scsi_unblock_requests()
358 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_busy() argument
360 atomic_inc(&ctrl_info->num_busy_threads); in pqi_ctrl_busy()
363 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_unbusy() argument
365 atomic_dec(&ctrl_info->num_busy_threads); in pqi_ctrl_unbusy()
368 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_blocked() argument
370 return ctrl_info->block_requests; in pqi_ctrl_blocked()
373 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_block_requests() argument
375 ctrl_info->block_requests = true; in pqi_ctrl_block_requests()
378 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_unblock_requests() argument
380 ctrl_info->block_requests = false; in pqi_ctrl_unblock_requests()
381 wake_up_all(&ctrl_info->block_requests_wait); in pqi_ctrl_unblock_requests()
384 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) in pqi_wait_if_ctrl_blocked() argument
386 if (!pqi_ctrl_blocked(ctrl_info)) in pqi_wait_if_ctrl_blocked()
389 atomic_inc(&ctrl_info->num_blocked_threads); in pqi_wait_if_ctrl_blocked()
390 wait_event(ctrl_info->block_requests_wait, in pqi_wait_if_ctrl_blocked()
391 !pqi_ctrl_blocked(ctrl_info)); in pqi_wait_if_ctrl_blocked()
392 atomic_dec(&ctrl_info->num_blocked_threads); in pqi_wait_if_ctrl_blocked()
397 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_wait_until_quiesced() argument
407 while (atomic_read(&ctrl_info->num_busy_threads) > in pqi_ctrl_wait_until_quiesced()
408 atomic_read(&ctrl_info->num_blocked_threads)) { in pqi_ctrl_wait_until_quiesced()
410 dev_warn(&ctrl_info->pci_dev->dev, in pqi_ctrl_wait_until_quiesced()
420 dev_warn(&ctrl_info->pci_dev->dev, in pqi_ctrl_wait_until_quiesced()
430 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_ofa_start() argument
432 mutex_lock(&ctrl_info->ofa_mutex); in pqi_ctrl_ofa_start()
435 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_ofa_done() argument
437 mutex_unlock(&ctrl_info->ofa_mutex); in pqi_ctrl_ofa_done()
440 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) in pqi_wait_until_ofa_finished() argument
442 mutex_lock(&ctrl_info->ofa_mutex); in pqi_wait_until_ofa_finished()
443 mutex_unlock(&ctrl_info->ofa_mutex); in pqi_wait_until_ofa_finished()
446 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_in_progress() argument
448 return mutex_is_locked(&ctrl_info->ofa_mutex); in pqi_ofa_in_progress()
492 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info, in pqi_schedule_rescan_worker_with_delay() argument
495 if (pqi_ctrl_offline(ctrl_info)) in pqi_schedule_rescan_worker_with_delay()
498 schedule_delayed_work(&ctrl_info->rescan_work, delay); in pqi_schedule_rescan_worker_with_delay()
501 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) in pqi_schedule_rescan_worker() argument
503 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); in pqi_schedule_rescan_worker()
508 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info) in pqi_schedule_rescan_worker_delayed() argument
510 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); in pqi_schedule_rescan_worker_delayed()
513 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) in pqi_cancel_rescan_worker() argument
515 cancel_delayed_work_sync(&ctrl_info->rescan_work); in pqi_cancel_rescan_worker()
518 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) in pqi_read_heartbeat_counter() argument
520 if (!ctrl_info->heartbeat_counter) in pqi_read_heartbeat_counter()
523 return readl(ctrl_info->heartbeat_counter); in pqi_read_heartbeat_counter()
526 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) in pqi_read_soft_reset_status() argument
528 return readb(ctrl_info->soft_reset_status); in pqi_read_soft_reset_status()
531 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info) in pqi_clear_soft_reset_status() argument
535 status = pqi_read_soft_reset_status(ctrl_info); in pqi_clear_soft_reset_status()
537 writeb(status, ctrl_info->soft_reset_status); in pqi_clear_soft_reset_status()
607 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, in pqi_build_raid_path_request() argument
643 if (ctrl_info->rpl_extended_format_4_5_supported) in pqi_build_raid_path_request()
648 cdb[1] = ctrl_info->ciss_report_log_flags; in pqi_build_raid_path_request()
694 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); in pqi_build_raid_path_request()
713 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], in pqi_build_raid_path_request()
725 static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct s… in pqi_alloc_io_request() argument
734 io_request = &ctrl_info->io_request_pool[i]; in pqi_alloc_io_request()
747 io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i]; in pqi_alloc_io_request()
766 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, in pqi_send_scsi_raid_request() argument
774 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr, in pqi_send_scsi_raid_request()
779 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info); in pqi_send_scsi_raid_request()
781 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); in pqi_send_scsi_raid_request()
788 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, in pqi_send_ctrl_raid_request() argument
791 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, in pqi_send_ctrl_raid_request()
795 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, in pqi_send_ctrl_raid_with_error() argument
799 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, in pqi_send_ctrl_raid_with_error()
803 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, in pqi_identify_controller() argument
806 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, in pqi_identify_controller()
810 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, in pqi_sense_subsystem_info() argument
813 return pqi_send_ctrl_raid_request(ctrl_info, in pqi_sense_subsystem_info()
818 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, in pqi_scsi_inquiry() argument
821 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, in pqi_scsi_inquiry()
825 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, in pqi_identify_physical_device() argument
834 rc = pqi_build_raid_path_request(ctrl_info, &request, in pqi_identify_physical_device()
844 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_identify_physical_device()
846 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); in pqi_identify_physical_device()
882 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info) in pqi_get_advanced_raid_bypass_config() argument
893 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID, in pqi_get_advanced_raid_bypass_config()
901 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_get_advanced_raid_bypass_config()
903 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); in pqi_get_advanced_raid_bypass_config()
922 ctrl_info->max_transfer_encrypted_sas_sata = in pqi_get_advanced_raid_bypass_config()
926 ctrl_info->max_transfer_encrypted_nvme = in pqi_get_advanced_raid_bypass_config()
930 ctrl_info->max_write_raid_5_6 = in pqi_get_advanced_raid_bypass_config()
934 ctrl_info->max_write_raid_1_10_2drive = in pqi_get_advanced_raid_bypass_config()
938 ctrl_info->max_write_raid_1_10_3drive = in pqi_get_advanced_raid_bypass_config()
948 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, in pqi_flush_cache() argument
960 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache, in pqi_flush_cache()
968 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, in pqi_csmi_smp_passthru() argument
972 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, in pqi_csmi_smp_passthru()
978 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) in pqi_set_diag_rescan() argument
987 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, in pqi_set_diag_rescan()
994 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag, in pqi_set_diag_rescan()
1003 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, in pqi_write_host_wellness() argument
1006 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, in pqi_write_host_wellness()
1024 struct pqi_ctrl_info *ctrl_info) in pqi_write_driver_version_to_host_wellness() argument
1052 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); in pqi_write_driver_version_to_host_wellness()
1073 struct pqi_ctrl_info *ctrl_info) in pqi_write_current_time_to_host_wellness() argument
1115 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); in pqi_write_current_time_to_host_wellness()
1127 struct pqi_ctrl_info *ctrl_info; in pqi_update_time_worker() local
1129 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, in pqi_update_time_worker()
1132 rc = pqi_write_current_time_to_host_wellness(ctrl_info); in pqi_update_time_worker()
1134 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_time_worker()
1137 schedule_delayed_work(&ctrl_info->update_time_work, in pqi_update_time_worker()
1141 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info) in pqi_schedule_update_time_worker() argument
1143 schedule_delayed_work(&ctrl_info->update_time_work, 0); in pqi_schedule_update_time_worker()
1146 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info) in pqi_cancel_update_time_worker() argument
1148 cancel_delayed_work_sync(&ctrl_info->update_time_work); in pqi_cancel_update_time_worker()
1151 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer, in pqi_report_luns() argument
1154 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length); in pqi_report_luns()
1157 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer) in pqi_report_phys_logical_luns() argument
1172 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header)); in pqi_report_phys_logical_luns()
1192 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); in pqi_report_phys_logical_luns()
1218 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) in pqi_report_phys_luns() argument
1229 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list); in pqi_report_phys_luns()
1233 if (ctrl_info->rpl_extended_format_4_5_supported) { in pqi_report_phys_luns()
1240 dev_err(&ctrl_info->pci_dev->dev, in pqi_report_phys_luns()
1245 dev_warn(&ctrl_info->pci_dev->dev, in pqi_report_phys_luns()
1279 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) in pqi_report_logical_luns() argument
1281 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); in pqi_report_logical_luns()
1284 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, in pqi_get_device_lists() argument
1295 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); in pqi_get_device_lists()
1297 dev_err(&ctrl_info->pci_dev->dev, in pqi_get_device_lists()
1300 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); in pqi_get_device_lists()
1302 dev_err(&ctrl_info->pci_dev->dev, in pqi_get_device_lists()
1394 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, in pqi_get_raid_level() argument
1405 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_raid_level()
1418 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, in pqi_validate_raid_map() argument
1458 dev_warn(&ctrl_info->pci_dev->dev, in pqi_validate_raid_map()
1466 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, in pqi_get_raid_map() argument
1477 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, in pqi_get_raid_map()
1492 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, in pqi_get_raid_map()
1499 dev_warn(&ctrl_info->pci_dev->dev, in pqi_get_raid_map()
1508 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); in pqi_get_raid_map()
1522 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info, in pqi_set_max_transfer_encrypted() argument
1525 if (!ctrl_info->lv_drive_type_mix_valid) { in pqi_set_max_transfer_encrypted()
1539 ctrl_info->max_transfer_encrypted_sas_sata; in pqi_set_max_transfer_encrypted()
1543 ctrl_info->max_transfer_encrypted_nvme; in pqi_set_max_transfer_encrypted()
1549 min(ctrl_info->max_transfer_encrypted_sas_sata, in pqi_set_max_transfer_encrypted()
1550 ctrl_info->max_transfer_encrypted_nvme); in pqi_set_max_transfer_encrypted()
1555 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, in pqi_get_raid_bypass_status() argument
1566 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_raid_bypass_status()
1580 pqi_get_raid_map(ctrl_info, device) == 0) { in pqi_get_raid_bypass_status()
1584 pqi_set_max_transfer_encrypted(ctrl_info, device); in pqi_get_raid_bypass_status()
1595 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, in pqi_get_volume_status() argument
1609 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_volume_status()
1637 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, in pqi_get_physical_device_info() argument
1645 rc = pqi_identify_physical_device(ctrl_info, device, in pqi_get_physical_device_info()
1689 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info, in pqi_get_logical_device_info() argument
1700 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); in pqi_get_logical_device_info()
1717 pqi_get_raid_level(ctrl_info, device); in pqi_get_logical_device_info()
1718 pqi_get_raid_bypass_status(ctrl_info, device); in pqi_get_logical_device_info()
1719 pqi_get_volume_status(ctrl_info, device); in pqi_get_logical_device_info()
1744 static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info, in pqi_get_device_info_phys_logical() argument
1754 rc = pqi_get_logical_device_info(ctrl_info, device); in pqi_get_device_info_phys_logical()
1756 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys); in pqi_get_device_info_phys_logical()
1761 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, in pqi_get_device_info() argument
1767 rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys); in pqi_get_device_info()
1775 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, in pqi_show_volume_status() argument
1872 dev_info(&ctrl_info->pci_dev->dev, in pqi_show_volume_status()
1874 ctrl_info->scsi_host->host_no, in pqi_show_volume_status()
1880 struct pqi_ctrl_info *ctrl_info; in pqi_rescan_worker() local
1882 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, in pqi_rescan_worker()
1885 pqi_scan_scsi_devices(ctrl_info); in pqi_rescan_worker()
1888 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, in pqi_add_device() argument
1894 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, in pqi_add_device()
1897 rc = pqi_add_sas_device(ctrl_info->sas_host, device); in pqi_add_device()
1904 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) in pqi_remove_device() argument
1910 rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, in pqi_remove_device()
1913 dev_err(&ctrl_info->pci_dev->dev, in pqi_remove_device()
1915 ctrl_info->scsi_host->host_no, device->bus, in pqi_remove_device()
1930 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, in pqi_find_scsi_dev() argument
1935 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) in pqi_find_scsi_dev()
1959 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, in pqi_scsi_find_entry() argument
1964 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { in pqi_scsi_find_entry()
1989 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, in pqi_dev_info() argument
1996 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); in pqi_dev_info()
2047 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); in pqi_dev_info()
2069 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, in pqi_scsi_update_device() argument
2135 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, in pqi_fixup_botched_add() argument
2140 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_fixup_botched_add()
2142 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_fixup_botched_add()
2179 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, in pqi_update_device_list() argument
2199 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
2202 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) in pqi_update_device_list()
2208 find_result = pqi_scsi_find_entry(ctrl_info, device, in pqi_update_device_list()
2219 pqi_scsi_update_device(ctrl_info, matching_device, device); in pqi_update_device_list()
2239 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, in pqi_update_device_list()
2255 &ctrl_info->scsi_device_list); in pqi_update_device_list()
2262 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
2269 if (pqi_ofa_in_progress(ctrl_info)) { in pqi_update_device_list()
2273 pqi_ctrl_unblock_device_reset(ctrl_info); in pqi_update_device_list()
2274 pqi_scsi_unblock_requests(ctrl_info); in pqi_update_device_list()
2280 pqi_dev_info(ctrl_info, "offline", device); in pqi_update_device_list()
2281 pqi_show_volume_status(ctrl_info, device); in pqi_update_device_list()
2283 pqi_dev_info(ctrl_info, "removed", device); in pqi_update_device_list()
2286 pqi_remove_device(ctrl_info, device); in pqi_update_device_list()
2295 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { in pqi_update_device_list()
2299 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
2302 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
2305 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
2313 rc = pqi_add_device(ctrl_info, device); in pqi_update_device_list()
2315 pqi_dev_info(ctrl_info, "added", device); in pqi_update_device_list()
2317 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_device_list()
2319 ctrl_info->scsi_host->host_no, in pqi_update_device_list()
2322 pqi_fixup_botched_add(ctrl_info, device); in pqi_update_device_list()
2371 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) in pqi_update_scsi_devices() argument
2395 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); in pqi_update_scsi_devices()
2422 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2441 ctrl_info->lv_drive_type_mix_valid = true; in pqi_update_scsi_devices()
2449 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); in pqi_update_scsi_devices()
2457 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2510 rc = pqi_get_device_info(ctrl_info, device, id_phys); in pqi_update_scsi_devices()
2512 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2518 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_scsi_devices()
2523 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_scsi_devices()
2556 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); in pqi_update_scsi_devices()
2575 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) in pqi_scan_scsi_devices() argument
2580 if (pqi_ctrl_offline(ctrl_info)) in pqi_scan_scsi_devices()
2583 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); in pqi_scan_scsi_devices()
2586 if (pqi_ctrl_scan_blocked(ctrl_info)) in pqi_scan_scsi_devices()
2588 pqi_schedule_rescan_worker_delayed(ctrl_info); in pqi_scan_scsi_devices()
2592 rc = pqi_update_scsi_devices(ctrl_info); in pqi_scan_scsi_devices()
2593 if (rc && !pqi_ctrl_scan_blocked(ctrl_info)) in pqi_scan_scsi_devices()
2594 pqi_schedule_rescan_worker_delayed(ctrl_info); in pqi_scan_scsi_devices()
2596 mutex_unlock(&ctrl_info->scan_mutex); in pqi_scan_scsi_devices()
2603 struct pqi_ctrl_info *ctrl_info; in pqi_scan_start() local
2605 ctrl_info = shost_to_hba(shost); in pqi_scan_start()
2607 pqi_scan_scsi_devices(ctrl_info); in pqi_scan_start()
2615 struct pqi_ctrl_info *ctrl_info; in pqi_scan_finished() local
2617 ctrl_info = shost_priv(shost); in pqi_scan_finished()
2619 return !mutex_is_locked(&ctrl_info->scan_mutex); in pqi_scan_finished()
2646 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info, in pqi_aio_raid_level_supported() argument
2655 if (rmd->is_write && (!ctrl_info->enable_r1_writes || in pqi_aio_raid_level_supported()
2656 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive)) in pqi_aio_raid_level_supported()
2660 if (rmd->is_write && (!ctrl_info->enable_r1_writes || in pqi_aio_raid_level_supported()
2661 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive)) in pqi_aio_raid_level_supported()
2665 if (rmd->is_write && (!ctrl_info->enable_r5_writes || in pqi_aio_raid_level_supported()
2666 rmd->data_length > ctrl_info->max_write_raid_5_6)) in pqi_aio_raid_level_supported()
2670 if (rmd->is_write && (!ctrl_info->enable_r6_writes || in pqi_aio_raid_level_supported()
2671 rmd->data_length > ctrl_info->max_write_raid_5_6)) in pqi_aio_raid_level_supported()
2730 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info, in pci_get_aio_common_raid_map_values() argument
2964 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, in pqi_raid_bypass_submit_scsi_cmd() argument
2982 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd)) in pqi_raid_bypass_submit_scsi_cmd()
2990 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map); in pqi_raid_bypass_submit_scsi_cmd()
3047 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group, in pqi_raid_bypass_submit_scsi_cmd()
3051 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group, in pqi_raid_bypass_submit_scsi_cmd()
3056 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle, in pqi_raid_bypass_submit_scsi_cmd()
3075 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) in pqi_wait_for_pqi_mode_ready() argument
3082 pqi_registers = ctrl_info->pqi_registers; in pqi_wait_for_pqi_mode_ready()
3091 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
3103 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
3115 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
3136 struct pqi_ctrl_info *ctrl_info; in pqi_take_device_offline() local
3144 ctrl_info = shost_to_hba(sdev->host); in pqi_take_device_offline()
3145 pqi_schedule_rescan_worker(ctrl_info); in pqi_take_device_offline()
3146 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", in pqi_take_device_offline()
3147 path, ctrl_info->scsi_host->host_no, device->bus, in pqi_take_device_offline()
3222 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); in pqi_process_raid_io_error() local
3229 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); in pqi_process_raid_io_error()
3237 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); in pqi_process_raid_io_error()
3362 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info, in pqi_interpret_task_management_response() argument
3384 dev_err(&ctrl_info->pci_dev->dev, in pqi_interpret_task_management_response()
3390 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info, in pqi_invalid_response() argument
3393 pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason); in pqi_invalid_response()
3396 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) in pqi_process_io_intr() argument
3410 if (oq_pi >= ctrl_info->num_elements_per_oq) { in pqi_process_io_intr()
3411 pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE); in pqi_process_io_intr()
3412 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3414 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); in pqi_process_io_intr()
3425 if (request_id >= ctrl_info->max_io_slots) { in pqi_process_io_intr()
3426 pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID); in pqi_process_io_intr()
3427 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3429 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); in pqi_process_io_intr()
3433 io_request = &ctrl_info->io_request_pool[request_id]; in pqi_process_io_intr()
3435 pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID); in pqi_process_io_intr()
3436 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3456 io_request->status = pqi_interpret_task_management_response(ctrl_info, in pqi_process_io_intr()
3465 io_request->error_info = ctrl_info->error_buffer + in pqi_process_io_intr()
3471 pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE); in pqi_process_io_intr()
3472 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3484 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; in pqi_process_io_intr()
3508 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, in pqi_send_event_ack() argument
3517 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; in pqi_send_event_ack()
3527 ctrl_info->num_elements_per_iq)) in pqi_send_event_ack()
3533 if (pqi_ctrl_offline(ctrl_info)) in pqi_send_event_ack()
3542 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; in pqi_send_event_ack()
3554 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, in pqi_acknowledge_event() argument
3568 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); in pqi_acknowledge_event()
3575 struct pqi_ctrl_info *ctrl_info) in pqi_poll_for_soft_reset_status() argument
3583 status = pqi_read_soft_reset_status(ctrl_info); in pqi_poll_for_soft_reset_status()
3590 if (!sis_is_firmware_running(ctrl_info)) in pqi_poll_for_soft_reset_status()
3594 dev_warn(&ctrl_info->pci_dev->dev, in pqi_poll_for_soft_reset_status()
3603 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) in pqi_process_soft_reset() argument
3609 if (ctrl_info->soft_reset_handshake_supported) in pqi_process_soft_reset()
3610 reset_status = pqi_poll_for_soft_reset_status(ctrl_info); in pqi_process_soft_reset()
3621 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3623 sis_soft_reset(ctrl_info); in pqi_process_soft_reset()
3626 ctrl_info->pqi_mode_enabled = false; in pqi_process_soft_reset()
3627 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); in pqi_process_soft_reset()
3628 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); in pqi_process_soft_reset()
3629 pqi_ofa_free_host_buffer(ctrl_info); in pqi_process_soft_reset()
3630 pqi_ctrl_ofa_done(ctrl_info); in pqi_process_soft_reset()
3631 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3636 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3638 if (ctrl_info->soft_reset_handshake_supported) in pqi_process_soft_reset()
3639 pqi_clear_soft_reset_status(ctrl_info); in pqi_process_soft_reset()
3640 pqi_ofa_free_host_buffer(ctrl_info); in pqi_process_soft_reset()
3641 pqi_ctrl_ofa_done(ctrl_info); in pqi_process_soft_reset()
3642 pqi_ofa_ctrl_unquiesce(ctrl_info); in pqi_process_soft_reset()
3647 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3650 pqi_ofa_free_host_buffer(ctrl_info); in pqi_process_soft_reset()
3651 pqi_ctrl_ofa_done(ctrl_info); in pqi_process_soft_reset()
3652 pqi_ofa_ctrl_unquiesce(ctrl_info); in pqi_process_soft_reset()
3653 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT); in pqi_process_soft_reset()
3660 struct pqi_ctrl_info *ctrl_info; in pqi_ofa_memory_alloc_worker() local
3662 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work); in pqi_ofa_memory_alloc_worker()
3664 pqi_ctrl_ofa_start(ctrl_info); in pqi_ofa_memory_alloc_worker()
3665 pqi_ofa_setup_host_buffer(ctrl_info); in pqi_ofa_memory_alloc_worker()
3666 pqi_ofa_host_memory_update(ctrl_info); in pqi_ofa_memory_alloc_worker()
3671 struct pqi_ctrl_info *ctrl_info; in pqi_ofa_quiesce_worker() local
3674 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work); in pqi_ofa_quiesce_worker()
3676 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)]; in pqi_ofa_quiesce_worker()
3678 pqi_ofa_ctrl_quiesce(ctrl_info); in pqi_ofa_quiesce_worker()
3679 pqi_acknowledge_event(ctrl_info, event); in pqi_ofa_quiesce_worker()
3680 pqi_process_soft_reset(ctrl_info); in pqi_ofa_quiesce_worker()
3683 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, in pqi_ofa_process_event() argument
3692 dev_info(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3694 schedule_work(&ctrl_info->ofa_memory_alloc_work); in pqi_ofa_process_event()
3697 dev_info(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3699 schedule_work(&ctrl_info->ofa_quiesce_work); in pqi_ofa_process_event()
3703 dev_info(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3705 ctrl_info->ofa_cancel_reason); in pqi_ofa_process_event()
3706 pqi_ofa_free_host_buffer(ctrl_info); in pqi_ofa_process_event()
3707 pqi_ctrl_ofa_done(ctrl_info); in pqi_ofa_process_event()
3710 dev_err(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3719 static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info) in pqi_mark_volumes_for_rescan() argument
3724 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_mark_volumes_for_rescan()
3726 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { in pqi_mark_volumes_for_rescan()
3731 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_mark_volumes_for_rescan()
3734 static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info) in pqi_disable_raid_bypass() argument
3739 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_disable_raid_bypass()
3741 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) in pqi_disable_raid_bypass()
3745 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_disable_raid_bypass()
3752 struct pqi_ctrl_info *ctrl_info; in pqi_event_worker() local
3756 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); in pqi_event_worker()
3758 pqi_ctrl_busy(ctrl_info); in pqi_event_worker()
3759 pqi_wait_if_ctrl_blocked(ctrl_info); in pqi_event_worker()
3760 if (pqi_ctrl_offline(ctrl_info)) in pqi_event_worker()
3764 event = ctrl_info->events; in pqi_event_worker()
3769 ack_event = pqi_ofa_process_event(ctrl_info, event); in pqi_event_worker()
3774 pqi_mark_volumes_for_rescan(ctrl_info); in pqi_event_worker()
3776 pqi_disable_raid_bypass(ctrl_info); in pqi_event_worker()
3779 pqi_acknowledge_event(ctrl_info, event); in pqi_event_worker()
3787 pqi_schedule_rescan_worker_with_delay(ctrl_info, in pqi_event_worker()
3791 pqi_ctrl_unbusy(ctrl_info); in pqi_event_worker()
3800 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer); in pqi_heartbeat_timer_handler() local
3802 pqi_check_ctrl_health(ctrl_info); in pqi_heartbeat_timer_handler()
3803 if (pqi_ctrl_offline(ctrl_info)) in pqi_heartbeat_timer_handler()
3806 num_interrupts = atomic_read(&ctrl_info->num_interrupts); in pqi_heartbeat_timer_handler()
3807 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); in pqi_heartbeat_timer_handler()
3809 if (num_interrupts == ctrl_info->previous_num_interrupts) { in pqi_heartbeat_timer_handler()
3810 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { in pqi_heartbeat_timer_handler()
3811 dev_err(&ctrl_info->pci_dev->dev, in pqi_heartbeat_timer_handler()
3814 pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT); in pqi_heartbeat_timer_handler()
3818 ctrl_info->previous_num_interrupts = num_interrupts; in pqi_heartbeat_timer_handler()
3821 ctrl_info->previous_heartbeat_count = heartbeat_count; in pqi_heartbeat_timer_handler()
3822 mod_timer(&ctrl_info->heartbeat_timer, in pqi_heartbeat_timer_handler()
3826 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) in pqi_start_heartbeat_timer() argument
3828 if (!ctrl_info->heartbeat_counter) in pqi_start_heartbeat_timer()
3831 ctrl_info->previous_num_interrupts = in pqi_start_heartbeat_timer()
3832 atomic_read(&ctrl_info->num_interrupts); in pqi_start_heartbeat_timer()
3833 ctrl_info->previous_heartbeat_count = in pqi_start_heartbeat_timer()
3834 pqi_read_heartbeat_counter(ctrl_info); in pqi_start_heartbeat_timer()
3836 ctrl_info->heartbeat_timer.expires = in pqi_start_heartbeat_timer()
3838 add_timer(&ctrl_info->heartbeat_timer); in pqi_start_heartbeat_timer()
3841 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) in pqi_stop_heartbeat_timer() argument
3843 del_timer_sync(&ctrl_info->heartbeat_timer); in pqi_stop_heartbeat_timer()
3846 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info, in pqi_ofa_capture_event_payload() argument
3851 ctrl_info->ofa_bytes_requested = in pqi_ofa_capture_event_payload()
3855 ctrl_info->ofa_cancel_reason = in pqi_ofa_capture_event_payload()
3861 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) in pqi_process_event_intr() argument
3871 event_queue = &ctrl_info->event_queue; in pqi_process_event_intr()
3878 pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE); in pqi_process_event_intr()
3879 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_event_intr()
3894 event = &ctrl_info->events[event_index]; in pqi_process_event_intr()
3901 pqi_ofa_capture_event_payload(ctrl_info, event, response); in pqi_process_event_intr()
3910 schedule_work(&ctrl_info->event_work); in pqi_process_event_intr()
3918 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx) in pqi_configure_legacy_intx() argument
3924 pqi_registers = ctrl_info->pqi_registers; in pqi_configure_legacy_intx()
3936 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, in pqi_change_irq_mode() argument
3939 switch (ctrl_info->irq_mode) { in pqi_change_irq_mode()
3945 pqi_configure_legacy_intx(ctrl_info, true); in pqi_change_irq_mode()
3946 sis_enable_intx(ctrl_info); in pqi_change_irq_mode()
3955 pqi_configure_legacy_intx(ctrl_info, false); in pqi_change_irq_mode()
3956 sis_enable_msix(ctrl_info); in pqi_change_irq_mode()
3961 pqi_configure_legacy_intx(ctrl_info, false); in pqi_change_irq_mode()
3968 sis_enable_msix(ctrl_info); in pqi_change_irq_mode()
3971 pqi_configure_legacy_intx(ctrl_info, true); in pqi_change_irq_mode()
3972 sis_enable_intx(ctrl_info); in pqi_change_irq_mode()
3980 ctrl_info->irq_mode = new_mode; in pqi_change_irq_mode()
3985 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) in pqi_is_valid_irq() argument
3990 switch (ctrl_info->irq_mode) { in pqi_is_valid_irq()
3995 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status); in pqi_is_valid_irq()
4012 struct pqi_ctrl_info *ctrl_info; in pqi_irq_handler() local
4018 ctrl_info = queue_group->ctrl_info; in pqi_irq_handler()
4020 if (!pqi_is_valid_irq(ctrl_info)) in pqi_irq_handler()
4023 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); in pqi_irq_handler()
4027 if (irq == ctrl_info->event_irq) { in pqi_irq_handler()
4028 num_events_handled = pqi_process_event_intr(ctrl_info); in pqi_irq_handler()
4036 atomic_inc(&ctrl_info->num_interrupts); in pqi_irq_handler()
4038 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); in pqi_irq_handler()
4039 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); in pqi_irq_handler()
4045 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) in pqi_request_irqs() argument
4047 struct pci_dev *pci_dev = ctrl_info->pci_dev; in pqi_request_irqs()
4051 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); in pqi_request_irqs()
4053 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { in pqi_request_irqs()
4055 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); in pqi_request_irqs()
4062 ctrl_info->num_msix_vectors_initialized++; in pqi_request_irqs()
4068 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) in pqi_free_irqs() argument
4072 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) in pqi_free_irqs()
4073 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), in pqi_free_irqs()
4074 &ctrl_info->queue_groups[i]); in pqi_free_irqs()
4076 ctrl_info->num_msix_vectors_initialized = 0; in pqi_free_irqs()
4079 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) in pqi_enable_msix_interrupts() argument
4087 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, in pqi_enable_msix_interrupts()
4088 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, in pqi_enable_msix_interrupts()
4091 dev_err(&ctrl_info->pci_dev->dev, in pqi_enable_msix_interrupts()
4097 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; in pqi_enable_msix_interrupts()
4098 ctrl_info->irq_mode = IRQ_MODE_MSIX; in pqi_enable_msix_interrupts()
4102 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) in pqi_disable_msix_interrupts() argument
4104 if (ctrl_info->num_msix_vectors_enabled) { in pqi_disable_msix_interrupts()
4105 pci_free_irq_vectors(ctrl_info->pci_dev); in pqi_disable_msix_interrupts()
4106 ctrl_info->num_msix_vectors_enabled = 0; in pqi_disable_msix_interrupts()
4110 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) in pqi_alloc_operational_queues() argument
4126 ctrl_info->num_elements_per_iq; in pqi_alloc_operational_queues()
4129 ctrl_info->num_elements_per_oq; in pqi_alloc_operational_queues()
4130 num_inbound_queues = ctrl_info->num_queue_groups * 2; in pqi_alloc_operational_queues()
4131 num_outbound_queues = ctrl_info->num_queue_groups; in pqi_alloc_operational_queues()
4132 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; in pqi_alloc_operational_queues()
4164 ctrl_info->queue_memory_base = in pqi_alloc_operational_queues()
4165 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, in pqi_alloc_operational_queues()
4166 &ctrl_info->queue_memory_base_dma_handle, in pqi_alloc_operational_queues()
4169 if (!ctrl_info->queue_memory_base) in pqi_alloc_operational_queues()
4172 ctrl_info->queue_memory_length = alloc_length; in pqi_alloc_operational_queues()
4174 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, in pqi_alloc_operational_queues()
4177 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
4178 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
4181 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4182 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4188 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4189 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4195 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
4196 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
4199 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4200 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4206 ctrl_info->event_queue.oq_element_array = element_array; in pqi_alloc_operational_queues()
4207 ctrl_info->event_queue.oq_element_array_bus_addr = in pqi_alloc_operational_queues()
4208 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4209 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4216 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
4217 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
4220 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4222 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4228 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4230 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4236 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4238 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4244 ctrl_info->event_queue.oq_pi = next_queue_index; in pqi_alloc_operational_queues()
4245 ctrl_info->event_queue.oq_pi_bus_addr = in pqi_alloc_operational_queues()
4246 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4248 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4253 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) in pqi_init_operational_queues() argument
4263 for (i = 0; i < ctrl_info->num_queue_groups; i++) in pqi_init_operational_queues()
4264 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; in pqi_init_operational_queues()
4271 ctrl_info->event_queue.oq_id = next_oq_id++; in pqi_init_operational_queues()
4272 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_init_operational_queues()
4273 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; in pqi_init_operational_queues()
4274 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; in pqi_init_operational_queues()
4275 ctrl_info->queue_groups[i].oq_id = next_oq_id++; in pqi_init_operational_queues()
4282 ctrl_info->event_queue.int_msg_num = 0; in pqi_init_operational_queues()
4283 for (i = 0; i < ctrl_info->num_queue_groups; i++) in pqi_init_operational_queues()
4284 ctrl_info->queue_groups[i].int_msg_num = i; in pqi_init_operational_queues()
4286 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_init_operational_queues()
4287 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); in pqi_init_operational_queues()
4288 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); in pqi_init_operational_queues()
4289 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); in pqi_init_operational_queues()
4290 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); in pqi_init_operational_queues()
4294 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) in pqi_alloc_admin_queues() argument
4303 ctrl_info->admin_queue_memory_base = in pqi_alloc_admin_queues()
4304 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, in pqi_alloc_admin_queues()
4305 &ctrl_info->admin_queue_memory_base_dma_handle, in pqi_alloc_admin_queues()
4308 if (!ctrl_info->admin_queue_memory_base) in pqi_alloc_admin_queues()
4311 ctrl_info->admin_queue_memory_length = alloc_length; in pqi_alloc_admin_queues()
4313 admin_queues = &ctrl_info->admin_queues; in pqi_alloc_admin_queues()
4314 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, in pqi_alloc_admin_queues()
4326 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4328 ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4330 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4332 ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4334 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4336 (void __iomem *)ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4338 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4340 (void __iomem *)ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4348 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) in pqi_create_admin_queues() argument
4356 pqi_registers = ctrl_info->pqi_registers; in pqi_create_admin_queues()
4357 admin_queues = &ctrl_info->admin_queues; in pqi_create_admin_queues()
4391 admin_queues->iq_pi = ctrl_info->iomem_base + in pqi_create_admin_queues()
4394 admin_queues->oq_ci = ctrl_info->iomem_base + in pqi_create_admin_queues()
4401 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, in pqi_submit_admin_request() argument
4408 admin_queues = &ctrl_info->admin_queues; in pqi_submit_admin_request()
4428 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, in pqi_poll_for_admin_response() argument
4436 admin_queues = &ctrl_info->admin_queues; in pqi_poll_for_admin_response()
4446 dev_err(&ctrl_info->pci_dev->dev, in pqi_poll_for_admin_response()
4450 if (!sis_is_firmware_running(ctrl_info)) in pqi_poll_for_admin_response()
4465 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, in pqi_start_io() argument
4504 ctrl_info->num_elements_per_iq)) in pqi_start_io()
4514 ctrl_info->num_elements_per_iq - iq_pi; in pqi_start_io()
4528 ctrl_info->num_elements_per_iq; in pqi_start_io()
4547 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, in pqi_wait_for_completion_io() argument
4559 pqi_check_ctrl_health(ctrl_info); in pqi_wait_for_completion_io()
4560 if (pqi_ctrl_offline(ctrl_info)) { in pqi_wait_for_completion_io()
4605 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, in pqi_submit_raid_request_synchronous() argument
4615 if (down_interruptible(&ctrl_info->sync_request_sem)) in pqi_submit_raid_request_synchronous()
4618 down(&ctrl_info->sync_request_sem); in pqi_submit_raid_request_synchronous()
4621 pqi_ctrl_busy(ctrl_info); in pqi_submit_raid_request_synchronous()
4627 pqi_wait_if_ctrl_blocked(ctrl_info); in pqi_submit_raid_request_synchronous()
4629 if (pqi_ctrl_offline(ctrl_info)) { in pqi_submit_raid_request_synchronous()
4634 io_request = pqi_alloc_io_request(ctrl_info, NULL); in pqi_submit_raid_request_synchronous()
4650 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, in pqi_submit_raid_request_synchronous()
4653 pqi_wait_for_completion_io(ctrl_info, &wait); in pqi_submit_raid_request_synchronous()
4667 pqi_ctrl_unbusy(ctrl_info); in pqi_submit_raid_request_synchronous()
4668 up(&ctrl_info->sync_request_sem); in pqi_submit_raid_request_synchronous()
4693 struct pqi_ctrl_info *ctrl_info, in pqi_submit_admin_request_synchronous() argument
4699 pqi_submit_admin_request(ctrl_info, request); in pqi_submit_admin_request_synchronous()
4701 rc = pqi_poll_for_admin_response(ctrl_info, response); in pqi_submit_admin_request_synchronous()
4709 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) in pqi_report_device_capability() argument
4731 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_report_device_capability()
4738 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response); in pqi_report_device_capability()
4740 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_report_device_capability()
4752 ctrl_info->max_inbound_queues = in pqi_report_device_capability()
4754 ctrl_info->max_elements_per_iq = in pqi_report_device_capability()
4756 ctrl_info->max_iq_element_length = in pqi_report_device_capability()
4759 ctrl_info->max_outbound_queues = in pqi_report_device_capability()
4761 ctrl_info->max_elements_per_oq = in pqi_report_device_capability()
4763 ctrl_info->max_oq_element_length = in pqi_report_device_capability()
4770 ctrl_info->max_inbound_iu_length_per_firmware = in pqi_report_device_capability()
4773 ctrl_info->inbound_spanning_supported = in pqi_report_device_capability()
4775 ctrl_info->outbound_spanning_supported = in pqi_report_device_capability()
4784 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) in pqi_validate_device_capability() argument
4786 if (ctrl_info->max_iq_element_length < in pqi_validate_device_capability()
4788 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4790 ctrl_info->max_iq_element_length, in pqi_validate_device_capability()
4795 if (ctrl_info->max_oq_element_length < in pqi_validate_device_capability()
4797 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4799 ctrl_info->max_oq_element_length, in pqi_validate_device_capability()
4804 if (ctrl_info->max_inbound_iu_length_per_firmware < in pqi_validate_device_capability()
4806 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4808 ctrl_info->max_inbound_iu_length_per_firmware, in pqi_validate_device_capability()
4813 if (!ctrl_info->inbound_spanning_supported) { in pqi_validate_device_capability()
4814 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4819 if (ctrl_info->outbound_spanning_supported) { in pqi_validate_device_capability()
4820 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4828 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) in pqi_create_event_queue() argument
4835 event_queue = &ctrl_info->event_queue; in pqi_create_event_queue()
4860 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_event_queue()
4865 event_queue->oq_ci = ctrl_info->iomem_base + in pqi_create_event_queue()
4873 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, in pqi_create_queue_group() argument
4881 queue_group = &ctrl_info->queue_groups[group_number]; in pqi_create_queue_group()
4899 put_unaligned_le16(ctrl_info->num_elements_per_iq, in pqi_create_queue_group()
4905 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_queue_group()
4908 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4913 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + in pqi_create_queue_group()
4934 put_unaligned_le16(ctrl_info->num_elements_per_iq, in pqi_create_queue_group()
4940 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_queue_group()
4943 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4948 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + in pqi_create_queue_group()
4968 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_queue_group()
4971 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4990 put_unaligned_le16(ctrl_info->num_elements_per_oq, in pqi_create_queue_group()
4998 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_queue_group()
5001 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
5006 queue_group->oq_ci = ctrl_info->iomem_base + in pqi_create_queue_group()
5014 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) in pqi_create_queues() argument
5019 rc = pqi_create_event_queue(ctrl_info); in pqi_create_queues()
5021 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queues()
5026 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_create_queues()
5027 rc = pqi_create_queue_group(ctrl_info, i); in pqi_create_queues()
5029 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queues()
5031 i, ctrl_info->num_queue_groups); in pqi_create_queues()
5042 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, in pqi_configure_events() argument
5065 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_configure_events()
5072 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_configure_events()
5074 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_configure_events()
5085 put_unaligned_le16(ctrl_info->event_queue.oq_id, in pqi_configure_events()
5100 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_configure_events()
5107 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_configure_events()
5109 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_configure_events()
5119 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) in pqi_enable_events() argument
5121 return pqi_configure_events(ctrl_info, true); in pqi_enable_events()
5124 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) in pqi_free_all_io_requests() argument
5131 if (!ctrl_info->io_request_pool) in pqi_free_all_io_requests()
5134 dev = &ctrl_info->pci_dev->dev; in pqi_free_all_io_requests()
5135 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; in pqi_free_all_io_requests()
5136 io_request = ctrl_info->io_request_pool; in pqi_free_all_io_requests()
5138 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_free_all_io_requests()
5148 kfree(ctrl_info->io_request_pool); in pqi_free_all_io_requests()
5149 ctrl_info->io_request_pool = NULL; in pqi_free_all_io_requests()
5152 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) in pqi_alloc_error_buffer() argument
5154 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, in pqi_alloc_error_buffer()
5155 ctrl_info->error_buffer_length, in pqi_alloc_error_buffer()
5156 &ctrl_info->error_buffer_dma_handle, in pqi_alloc_error_buffer()
5158 if (!ctrl_info->error_buffer) in pqi_alloc_error_buffer()
5164 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) in pqi_alloc_io_resources() argument
5173 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots, in pqi_alloc_io_resources()
5174 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); in pqi_alloc_io_resources()
5176 if (!ctrl_info->io_request_pool) { in pqi_alloc_io_resources()
5177 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
5182 dev = &ctrl_info->pci_dev->dev; in pqi_alloc_io_resources()
5183 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; in pqi_alloc_io_resources()
5184 io_request = ctrl_info->io_request_pool; in pqi_alloc_io_resources()
5186 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_alloc_io_resources()
5187 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); in pqi_alloc_io_resources()
5190 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
5200 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
5214 pqi_free_all_io_requests(ctrl_info); in pqi_alloc_io_resources()
5224 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) in pqi_calculate_io_resources() argument
5229 ctrl_info->scsi_ml_can_queue = in pqi_calculate_io_resources()
5230 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; in pqi_calculate_io_resources()
5231 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; in pqi_calculate_io_resources()
5233 ctrl_info->error_buffer_length = in pqi_calculate_io_resources()
5234 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; in pqi_calculate_io_resources()
5237 max_transfer_size = min(ctrl_info->max_transfer_size, in pqi_calculate_io_resources()
5240 max_transfer_size = min(ctrl_info->max_transfer_size, in pqi_calculate_io_resources()
5248 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); in pqi_calculate_io_resources()
5252 ctrl_info->sg_chain_buffer_length = in pqi_calculate_io_resources()
5255 ctrl_info->sg_tablesize = max_sg_entries; in pqi_calculate_io_resources()
5256 ctrl_info->max_sectors = max_transfer_size / 512; in pqi_calculate_io_resources()
5259 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) in pqi_calculate_queue_resources() argument
5271 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, in pqi_calculate_queue_resources()
5272 ctrl_info->max_outbound_queues - 1); in pqi_calculate_queue_resources()
5276 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); in pqi_calculate_queue_resources()
5280 ctrl_info->num_queue_groups = num_queue_groups; in pqi_calculate_queue_resources()
5286 ctrl_info->max_inbound_iu_length = in pqi_calculate_queue_resources()
5287 (ctrl_info->max_inbound_iu_length_per_firmware / in pqi_calculate_queue_resources()
5292 (ctrl_info->max_inbound_iu_length / in pqi_calculate_queue_resources()
5299 ctrl_info->max_elements_per_iq); in pqi_calculate_queue_resources()
5303 ctrl_info->max_elements_per_oq); in pqi_calculate_queue_resources()
5305 ctrl_info->num_elements_per_iq = num_elements_per_iq; in pqi_calculate_queue_resources()
5306 ctrl_info->num_elements_per_oq = num_elements_per_oq; in pqi_calculate_queue_resources()
5308 ctrl_info->max_sg_per_iu = in pqi_calculate_queue_resources()
5309 ((ctrl_info->max_inbound_iu_length - in pqi_calculate_queue_resources()
5314 ctrl_info->max_sg_per_r56_iu = in pqi_calculate_queue_resources()
5315 ((ctrl_info->max_inbound_iu_length - in pqi_calculate_queue_resources()
5370 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, in pqi_build_raid_sg_list() argument
5395 ctrl_info->max_sg_per_iu, &chained); in pqi_build_raid_sg_list()
5406 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info, in pqi_build_aio_r1_sg_list() argument
5432 ctrl_info->max_sg_per_iu, &chained); in pqi_build_aio_r1_sg_list()
5444 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info, in pqi_build_aio_r56_sg_list() argument
5468 ctrl_info->max_sg_per_r56_iu, &chained); in pqi_build_aio_r56_sg_list()
5480 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, in pqi_build_aio_sg_list() argument
5506 ctrl_info->max_sg_per_iu, &chained); in pqi_build_aio_sg_list()
5529 static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info, in pqi_raid_submit_io() argument
5538 io_request = pqi_alloc_io_request(ctrl_info, scmd); in pqi_raid_submit_io()
5596 dev_err(&ctrl_info->pci_dev->dev, in pqi_raid_submit_io()
5602 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); in pqi_raid_submit_io()
5608 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); in pqi_raid_submit_io()
5613 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, in pqi_raid_submit_scsi_cmd() argument
5621 return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio); in pqi_raid_submit_scsi_cmd()
5628 struct pqi_ctrl_info *ctrl_info; in pqi_raid_bypass_retry_needed() local
5643 ctrl_info = shost_to_hba(scmd->device->host); in pqi_raid_bypass_retry_needed()
5644 if (pqi_ctrl_offline(ctrl_info)) in pqi_raid_bypass_retry_needed()
5666 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, in pqi_aio_submit_scsi_cmd() argument
5674 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, in pqi_aio_submit_scsi_cmd()
5679 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, in pqi_aio_submit_io() argument
5689 io_request = pqi_alloc_io_request(ctrl_info, scmd); in pqi_aio_submit_io()
5707 if (!raid_bypass && ctrl_info->multi_lun_device_supported) in pqi_aio_submit_io()
5728 dev_err(&ctrl_info->pci_dev->dev, in pqi_aio_submit_io()
5744 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); in pqi_aio_submit_io()
5750 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); in pqi_aio_submit_io()
5755 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, in pqi_aio_submit_r1_write_io() argument
5764 io_request = pqi_alloc_io_request(ctrl_info, scmd); in pqi_aio_submit_r1_write_io()
5805 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request); in pqi_aio_submit_r1_write_io()
5811 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); in pqi_aio_submit_r1_write_io()
5816 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, in pqi_aio_submit_r56_write_io() argument
5825 io_request = pqi_alloc_io_request(ctrl_info, scmd); in pqi_aio_submit_r56_write_io()
5872 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request); in pqi_aio_submit_r56_write_io()
5878 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); in pqi_aio_submit_r56_write_io()
5883 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, in pqi_get_hw_queue() argument
5928 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, in pqi_is_parity_write_stream() argument
5939 if (!ctrl_info->enable_stream_detection) in pqi_is_parity_write_stream()
5960 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) || in pqi_is_parity_write_stream()
5961 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes)) in pqi_is_parity_write_stream()
6006 struct pqi_ctrl_info *ctrl_info; in pqi_scsi_queue_command() local
6027 ctrl_info = shost_to_hba(shost); in pqi_scsi_queue_command()
6029 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) { in pqi_scsi_queue_command()
6035 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) { in pqi_scsi_queue_command()
6046 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); in pqi_scsi_queue_command()
6047 queue_group = &ctrl_info->queue_groups[hw_queue]; in pqi_scsi_queue_command()
6053 !pqi_is_parity_write_stream(ctrl_info, scmd)) { in pqi_scsi_queue_command()
6054 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); in pqi_scsi_queue_command()
6061 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); in pqi_scsi_queue_command()
6064 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); in pqi_scsi_queue_command()
6066 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); in pqi_scsi_queue_command()
6078 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info) in pqi_queued_io_count() argument
6089 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_queued_io_count()
6090 queue_group = &ctrl_info->queue_groups[i]; in pqi_queued_io_count()
6102 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info) in pqi_nonempty_inbound_queue_count() argument
6113 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_nonempty_inbound_queue_count()
6114 queue_group = &ctrl_info->queue_groups[i]; in pqi_nonempty_inbound_queue_count()
6128 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) in pqi_wait_until_inbound_queues_empty() argument
6141 queued_io_count = pqi_queued_io_count(ctrl_info); in pqi_wait_until_inbound_queues_empty()
6142 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info); in pqi_wait_until_inbound_queues_empty()
6145 pqi_check_ctrl_health(ctrl_info); in pqi_wait_until_inbound_queues_empty()
6146 if (pqi_ctrl_offline(ctrl_info)) in pqi_wait_until_inbound_queues_empty()
6149 dev_warn(&ctrl_info->pci_dev->dev, in pqi_wait_until_inbound_queues_empty()
6159 dev_warn(&ctrl_info->pci_dev->dev, in pqi_wait_until_inbound_queues_empty()
6166 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, in pqi_fail_io_queued_for_device() argument
6178 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_fail_io_queued_for_device()
6179 queue_group = &ctrl_info->queue_groups[i]; in pqi_fail_io_queued_for_device()
6215 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, in pqi_device_wait_for_pending_io() argument
6227 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) { in pqi_device_wait_for_pending_io()
6228 pqi_check_ctrl_health(ctrl_info); in pqi_device_wait_for_pending_io()
6229 if (pqi_ctrl_offline(ctrl_info)) in pqi_device_wait_for_pending_io()
6234 dev_err(&ctrl_info->pci_dev->dev, in pqi_device_wait_for_pending_io()
6236 ctrl_info->scsi_host->host_no, device->bus, device->target, in pqi_device_wait_for_pending_io()
6241 dev_warn(&ctrl_info->pci_dev->dev, in pqi_device_wait_for_pending_io()
6243 ctrl_info->scsi_host->host_no, device->bus, device->target, in pqi_device_wait_for_pending_io()
6263 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, in pqi_wait_for_lun_reset_completion() argument
6279 pqi_check_ctrl_health(ctrl_info); in pqi_wait_for_lun_reset_completion()
6280 if (pqi_ctrl_offline(ctrl_info)) { in pqi_wait_for_lun_reset_completion()
6287 dev_warn(&ctrl_info->pci_dev->dev, in pqi_wait_for_lun_reset_completion()
6289 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding); in pqi_wait_for_lun_reset_completion()
6297 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) in pqi_lun_reset() argument
6304 io_request = pqi_alloc_io_request(ctrl_info, NULL); in pqi_lun_reset()
6317 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported) in pqi_lun_reset()
6320 if (ctrl_info->tmf_iu_timeout_supported) in pqi_lun_reset()
6323 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, in pqi_lun_reset()
6326 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait); in pqi_lun_reset()
6340 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device,… in pqi_lun_reset_with_retries() argument
6348 reset_rc = pqi_lun_reset(ctrl_info, device, lun); in pqi_lun_reset_with_retries()
6357 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs); in pqi_lun_reset_with_retries()
6364 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) in pqi_device_reset() argument
6368 pqi_ctrl_block_requests(ctrl_info); in pqi_device_reset()
6369 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_device_reset()
6370 pqi_fail_io_queued_for_device(ctrl_info, device, lun); in pqi_device_reset()
6371 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); in pqi_device_reset()
6373 pqi_ctrl_unblock_requests(ctrl_info); in pqi_device_reset()
6377 rc = pqi_lun_reset_with_retries(ctrl_info, device, lun); in pqi_device_reset()
6383 static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u… in pqi_device_reset_handler() argument
6387 mutex_lock(&ctrl_info->lun_reset_mutex); in pqi_device_reset_handler()
6389 dev_err(&ctrl_info->pci_dev->dev, in pqi_device_reset_handler()
6391 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode); in pqi_device_reset_handler()
6393 pqi_check_ctrl_health(ctrl_info); in pqi_device_reset_handler()
6394 if (pqi_ctrl_offline(ctrl_info)) in pqi_device_reset_handler()
6397 rc = pqi_device_reset(ctrl_info, device, lun); in pqi_device_reset_handler()
6399 dev_err(&ctrl_info->pci_dev->dev, in pqi_device_reset_handler()
6401 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, in pqi_device_reset_handler()
6404 mutex_unlock(&ctrl_info->lun_reset_mutex); in pqi_device_reset_handler()
6412 struct pqi_ctrl_info *ctrl_info; in pqi_eh_device_reset_handler() local
6417 ctrl_info = shost_to_hba(shost); in pqi_eh_device_reset_handler()
6421 return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode); in pqi_eh_device_reset_handler()
6432 …pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scs… in pqi_tmf_worker()
6438 struct pqi_ctrl_info *ctrl_info; in pqi_eh_abort_handler() local
6444 ctrl_info = shost_to_hba(shost); in pqi_eh_abort_handler()
6447 dev_err(&ctrl_info->pci_dev->dev, in pqi_eh_abort_handler()
6452 dev_err(&ctrl_info->pci_dev->dev, in pqi_eh_abort_handler()
6462 tmf_work->ctrl_info = ctrl_info; in pqi_eh_abort_handler()
6471 dev_err(&ctrl_info->pci_dev->dev, in pqi_eh_abort_handler()
6484 struct pqi_ctrl_info *ctrl_info; in pqi_slave_alloc() local
6488 ctrl_info = shost_to_hba(sdev->host); in pqi_slave_alloc()
6490 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_alloc()
6495 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); in pqi_slave_alloc()
6506 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), in pqi_slave_alloc()
6527 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_alloc()
6534 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_map_queues() local
6536 if (!ctrl_info->disable_managed_interrupts) in pqi_map_queues()
6538 ctrl_info->pci_dev, 0); in pqi_map_queues()
6566 struct pqi_ctrl_info *ctrl_info; in pqi_slave_destroy() local
6571 ctrl_info = shost_to_hba(sdev->host); in pqi_slave_destroy()
6573 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); in pqi_slave_destroy()
6579 mutex_unlock(&ctrl_info->scan_mutex); in pqi_slave_destroy()
6585 mutex_unlock(&ctrl_info->scan_mutex); in pqi_slave_destroy()
6589 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_destroy()
6591 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_destroy()
6593 mutex_unlock(&ctrl_info->scan_mutex); in pqi_slave_destroy()
6595 pqi_dev_info(ctrl_info, "removed", device); in pqi_slave_destroy()
6599 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) in pqi_getpciinfo_ioctl() argument
6609 pci_dev = ctrl_info->pci_dev; in pqi_getpciinfo_ioctl()
6709 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) in pqi_passthru_ioctl() argument
6720 if (pqi_ctrl_offline(ctrl_info)) in pqi_passthru_ioctl()
6722 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info)) in pqi_passthru_ioctl()
6793 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_passthru_ioctl()
6804 if (ctrl_info->raid_iu_timeout_supported) in pqi_passthru_ioctl()
6807 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, in pqi_passthru_ioctl()
6811 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, in pqi_passthru_ioctl()
6856 struct pqi_ctrl_info *ctrl_info; in pqi_ioctl() local
6858 ctrl_info = shost_to_hba(sdev->host); in pqi_ioctl()
6864 rc = pqi_scan_scsi_devices(ctrl_info); in pqi_ioctl()
6867 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); in pqi_ioctl()
6873 rc = pqi_passthru_ioctl(ctrl_info, arg); in pqi_ioctl()
6887 struct pqi_ctrl_info *ctrl_info; in pqi_firmware_version_show() local
6890 ctrl_info = shost_to_hba(shost); in pqi_firmware_version_show()
6892 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); in pqi_firmware_version_show()
6905 struct pqi_ctrl_info *ctrl_info; in pqi_serial_number_show() local
6908 ctrl_info = shost_to_hba(shost); in pqi_serial_number_show()
6910 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); in pqi_serial_number_show()
6917 struct pqi_ctrl_info *ctrl_info; in pqi_model_show() local
6920 ctrl_info = shost_to_hba(shost); in pqi_model_show()
6922 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); in pqi_model_show()
6929 struct pqi_ctrl_info *ctrl_info; in pqi_vendor_show() local
6932 ctrl_info = shost_to_hba(shost); in pqi_vendor_show()
6934 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); in pqi_vendor_show()
6991 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_stream_detection_show() local
6994 ctrl_info->enable_stream_detection); in pqi_host_enable_stream_detection_show()
7001 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_stream_detection_store() local
7010 ctrl_info->enable_stream_detection = set_stream_detection; in pqi_host_enable_stream_detection_store()
7019 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_r5_writes_show() local
7021 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes); in pqi_host_enable_r5_writes_show()
7028 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_r5_writes_store() local
7037 ctrl_info->enable_r5_writes = set_r5_writes; in pqi_host_enable_r5_writes_store()
7046 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_r6_writes_show() local
7048 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes); in pqi_host_enable_r6_writes_show()
7055 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_r6_writes_store() local
7064 ctrl_info->enable_r6_writes = set_r6_writes; in pqi_host_enable_r6_writes_store()
7104 struct pqi_ctrl_info *ctrl_info; in pqi_unique_id_show() local
7111 ctrl_info = shost_to_hba(sdev->host); in pqi_unique_id_show()
7113 if (pqi_ctrl_offline(ctrl_info)) in pqi_unique_id_show()
7116 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
7120 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
7129 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
7143 struct pqi_ctrl_info *ctrl_info; in pqi_lunid_show() local
7150 ctrl_info = shost_to_hba(sdev->host); in pqi_lunid_show()
7152 if (pqi_ctrl_offline(ctrl_info)) in pqi_lunid_show()
7155 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
7159 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
7165 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
7175 struct pqi_ctrl_info *ctrl_info; in pqi_path_info_show() local
7188 ctrl_info = shost_to_hba(sdev->host); in pqi_path_info_show()
7190 if (pqi_ctrl_offline(ctrl_info)) in pqi_path_info_show()
7193 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
7197 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
7214 ctrl_info->scsi_host->host_no, in pqi_path_info_show()
7253 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
7261 struct pqi_ctrl_info *ctrl_info; in pqi_sas_address_show() local
7268 ctrl_info = shost_to_hba(sdev->host); in pqi_sas_address_show()
7270 if (pqi_ctrl_offline(ctrl_info)) in pqi_sas_address_show()
7273 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
7277 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
7283 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
7291 struct pqi_ctrl_info *ctrl_info; in pqi_ssd_smart_path_enabled_show() local
7297 ctrl_info = shost_to_hba(sdev->host); in pqi_ssd_smart_path_enabled_show()
7299 if (pqi_ctrl_offline(ctrl_info)) in pqi_ssd_smart_path_enabled_show()
7302 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
7306 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
7314 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
7322 struct pqi_ctrl_info *ctrl_info; in pqi_raid_level_show() local
7329 ctrl_info = shost_to_hba(sdev->host); in pqi_raid_level_show()
7331 if (pqi_ctrl_offline(ctrl_info)) in pqi_raid_level_show()
7334 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
7338 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
7347 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
7355 struct pqi_ctrl_info *ctrl_info; in pqi_raid_bypass_cnt_show() local
7362 ctrl_info = shost_to_hba(sdev->host); in pqi_raid_bypass_cnt_show()
7364 if (pqi_ctrl_offline(ctrl_info)) in pqi_raid_bypass_cnt_show()
7367 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
7371 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
7377 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
7385 struct pqi_ctrl_info *ctrl_info; in pqi_sas_ncq_prio_enable_show() local
7392 ctrl_info = shost_to_hba(sdev->host); in pqi_sas_ncq_prio_enable_show()
7394 if (pqi_ctrl_offline(ctrl_info)) in pqi_sas_ncq_prio_enable_show()
7397 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_show()
7401 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_show()
7407 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_show()
7416 struct pqi_ctrl_info *ctrl_info; in pqi_sas_ncq_prio_enable_store() local
7426 ctrl_info = shost_to_hba(sdev->host); in pqi_sas_ncq_prio_enable_store()
7428 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_store()
7433 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_store()
7438 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_store()
7444 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_store()
7453 struct pqi_ctrl_info *ctrl_info; in pqi_numa_node_show() local
7456 ctrl_info = shost_to_hba(sdev->host); in pqi_numa_node_show()
7458 return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node); in pqi_numa_node_show()
7507 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) in pqi_register_scsi() argument
7512 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); in pqi_register_scsi()
7514 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n"); in pqi_register_scsi()
7525 shost->max_sectors = ctrl_info->max_sectors; in pqi_register_scsi()
7526 shost->can_queue = ctrl_info->scsi_ml_can_queue; in pqi_register_scsi()
7528 shost->sg_tablesize = ctrl_info->sg_tablesize; in pqi_register_scsi()
7530 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); in pqi_register_scsi()
7532 shost->nr_hw_queues = ctrl_info->num_queue_groups; in pqi_register_scsi()
7534 shost->hostdata[0] = (unsigned long)ctrl_info; in pqi_register_scsi()
7536 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); in pqi_register_scsi()
7538 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n"); in pqi_register_scsi()
7542 rc = pqi_add_sas_host(shost, ctrl_info); in pqi_register_scsi()
7544 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n"); in pqi_register_scsi()
7548 ctrl_info->scsi_host = shost; in pqi_register_scsi()
7560 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) in pqi_unregister_scsi() argument
7564 pqi_delete_sas_host(ctrl_info); in pqi_unregister_scsi()
7566 shost = ctrl_info->scsi_host; in pqi_unregister_scsi()
7574 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) in pqi_wait_for_pqi_reset_completion() argument
7582 pqi_registers = ctrl_info->pqi_registers; in pqi_wait_for_pqi_reset_completion()
7591 if (!sis_is_firmware_running(ctrl_info)) { in pqi_wait_for_pqi_reset_completion()
7604 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) in pqi_reset() argument
7609 if (ctrl_info->pqi_reset_quiesce_supported) { in pqi_reset()
7610 rc = sis_pqi_reset_quiesce(ctrl_info); in pqi_reset()
7612 dev_err(&ctrl_info->pci_dev->dev, in pqi_reset()
7622 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); in pqi_reset()
7624 rc = pqi_wait_for_pqi_reset_completion(ctrl_info); in pqi_reset()
7626 dev_err(&ctrl_info->pci_dev->dev, in pqi_reset()
7632 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info) in pqi_get_ctrl_serial_number() argument
7641 rc = pqi_sense_subsystem_info(ctrl_info, sense_info); in pqi_get_ctrl_serial_number()
7645 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, in pqi_get_ctrl_serial_number()
7647 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; in pqi_get_ctrl_serial_number()
7655 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) in pqi_get_ctrl_product_details() argument
7664 rc = pqi_identify_controller(ctrl_info, identify); in pqi_get_ctrl_product_details()
7670 memcpy(ctrl_info->firmware_version, in pqi_get_ctrl_product_details()
7674 memcpy(ctrl_info->firmware_version, in pqi_get_ctrl_product_details()
7677 ctrl_info->firmware_version in pqi_get_ctrl_product_details()
7679 snprintf(ctrl_info->firmware_version + in pqi_get_ctrl_product_details()
7680 strlen(ctrl_info->firmware_version), in pqi_get_ctrl_product_details()
7681 sizeof(ctrl_info->firmware_version) - in pqi_get_ctrl_product_details()
7687 memcpy(ctrl_info->model, identify->product_id, in pqi_get_ctrl_product_details()
7689 ctrl_info->model[sizeof(identify->product_id)] = '\0'; in pqi_get_ctrl_product_details()
7691 memcpy(ctrl_info->vendor, identify->vendor_id, in pqi_get_ctrl_product_details()
7693 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; in pqi_get_ctrl_product_details()
7695 dev_info(&ctrl_info->pci_dev->dev, in pqi_get_ctrl_product_details()
7696 "Firmware version: %s\n", ctrl_info->firmware_version); in pqi_get_ctrl_product_details()
7705 struct pqi_ctrl_info *ctrl_info; member
7758 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, in pqi_config_table_update() argument
7775 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_config_table_update()
7778 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, in pqi_enable_firmware_features() argument
7805 return pqi_config_table_update(ctrl_info, in pqi_enable_firmware_features()
7815 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7819 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, in pqi_firmware_feature_status() argument
7823 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", in pqi_firmware_feature_status()
7829 dev_info(&ctrl_info->pci_dev->dev, in pqi_firmware_feature_status()
7834 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", in pqi_firmware_feature_status()
7838 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, in pqi_ctrl_update_feature_flags() argument
7843 ctrl_info->enable_r1_writes = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7846 ctrl_info->enable_r5_writes = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7849 ctrl_info->enable_r6_writes = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7852 ctrl_info->soft_reset_handshake_supported = in pqi_ctrl_update_feature_flags()
7854 pqi_read_soft_reset_status(ctrl_info); in pqi_ctrl_update_feature_flags()
7857 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7860 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7863 ctrl_info->firmware_triage_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7864 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled); in pqi_ctrl_update_feature_flags()
7867 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7870 ctrl_info->multi_lun_device_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7874 pqi_firmware_feature_status(ctrl_info, firmware_feature); in pqi_ctrl_update_feature_flags()
7877 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, in pqi_firmware_feature_update() argument
7881 firmware_feature->feature_status(ctrl_info, firmware_feature); in pqi_firmware_feature_update()
7983 struct pqi_ctrl_info *ctrl_info; in pqi_process_firmware_features() local
7989 ctrl_info = section_info->ctrl_info; in pqi_process_firmware_features()
8000 pqi_firmware_feature_update(ctrl_info, in pqi_process_firmware_features()
8015 rc = pqi_enable_firmware_features(ctrl_info, firmware_features, in pqi_process_firmware_features()
8018 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_firmware_features()
8023 pqi_firmware_feature_update(ctrl_info, in pqi_process_firmware_features()
8037 pqi_firmware_feature_update(ctrl_info, in pqi_process_firmware_features()
8066 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_reset_config() argument
8068 ctrl_info->heartbeat_counter = NULL; in pqi_ctrl_reset_config()
8069 ctrl_info->soft_reset_status = NULL; in pqi_ctrl_reset_config()
8070 ctrl_info->soft_reset_handshake_supported = false; in pqi_ctrl_reset_config()
8071 ctrl_info->enable_r1_writes = false; in pqi_ctrl_reset_config()
8072 ctrl_info->enable_r5_writes = false; in pqi_ctrl_reset_config()
8073 ctrl_info->enable_r6_writes = false; in pqi_ctrl_reset_config()
8074 ctrl_info->raid_iu_timeout_supported = false; in pqi_ctrl_reset_config()
8075 ctrl_info->tmf_iu_timeout_supported = false; in pqi_ctrl_reset_config()
8076 ctrl_info->firmware_triage_supported = false; in pqi_ctrl_reset_config()
8077 ctrl_info->rpl_extended_format_4_5_supported = false; in pqi_ctrl_reset_config()
8078 ctrl_info->multi_lun_device_supported = false; in pqi_ctrl_reset_config()
8081 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) in pqi_process_config_table() argument
8092 table_length = ctrl_info->config_table_length; in pqi_process_config_table()
8098 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_config_table()
8107 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset; in pqi_process_config_table()
8111 section_info.ctrl_info = ctrl_info; in pqi_process_config_table()
8128 dev_warn(&ctrl_info->pci_dev->dev, in pqi_process_config_table()
8131 ctrl_info->heartbeat_counter = in pqi_process_config_table()
8138 ctrl_info->soft_reset_status = in pqi_process_config_table()
8164 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) in pqi_revert_to_sis_mode() argument
8168 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); in pqi_revert_to_sis_mode()
8169 rc = pqi_reset(ctrl_info); in pqi_revert_to_sis_mode()
8172 rc = sis_reenable_sis_mode(ctrl_info); in pqi_revert_to_sis_mode()
8174 dev_err(&ctrl_info->pci_dev->dev, in pqi_revert_to_sis_mode()
8178 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); in pqi_revert_to_sis_mode()
8188 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) in pqi_force_sis_mode() argument
8190 if (!sis_is_firmware_running(ctrl_info)) in pqi_force_sis_mode()
8193 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) in pqi_force_sis_mode()
8196 if (sis_is_kernel_up(ctrl_info)) { in pqi_force_sis_mode()
8197 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); in pqi_force_sis_mode()
8201 return pqi_revert_to_sis_mode(ctrl_info); in pqi_force_sis_mode()
8219 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_init() argument
8225 if (pqi_is_fw_triage_supported(ctrl_info)) { in pqi_ctrl_init()
8226 rc = sis_wait_for_fw_triage_completion(ctrl_info); in pqi_ctrl_init()
8230 sis_soft_reset(ctrl_info); in pqi_ctrl_init()
8233 rc = pqi_force_sis_mode(ctrl_info); in pqi_ctrl_init()
8242 rc = sis_wait_for_ctrl_ready(ctrl_info); in pqi_ctrl_init()
8245 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8257 rc = sis_get_ctrl_properties(ctrl_info); in pqi_ctrl_init()
8259 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8264 rc = sis_get_pqi_capabilities(ctrl_info); in pqi_ctrl_init()
8266 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8271 product_id = sis_get_product_id(ctrl_info); in pqi_ctrl_init()
8272 ctrl_info->product_id = (u8)product_id; in pqi_ctrl_init()
8273 ctrl_info->product_revision = (u8)(product_id >> 8); in pqi_ctrl_init()
8276 if (ctrl_info->max_outstanding_requests > in pqi_ctrl_init()
8278 ctrl_info->max_outstanding_requests = in pqi_ctrl_init()
8281 if (ctrl_info->max_outstanding_requests > in pqi_ctrl_init()
8283 ctrl_info->max_outstanding_requests = in pqi_ctrl_init()
8287 pqi_calculate_io_resources(ctrl_info); in pqi_ctrl_init()
8289 rc = pqi_alloc_error_buffer(ctrl_info); in pqi_ctrl_init()
8291 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8301 rc = sis_init_base_struct_addr(ctrl_info); in pqi_ctrl_init()
8303 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8309 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); in pqi_ctrl_init()
8311 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8317 ctrl_info->pqi_mode_enabled = true; in pqi_ctrl_init()
8318 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); in pqi_ctrl_init()
8320 rc = pqi_alloc_admin_queues(ctrl_info); in pqi_ctrl_init()
8322 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8327 rc = pqi_create_admin_queues(ctrl_info); in pqi_ctrl_init()
8329 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8334 rc = pqi_report_device_capability(ctrl_info); in pqi_ctrl_init()
8336 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8341 rc = pqi_validate_device_capability(ctrl_info); in pqi_ctrl_init()
8345 pqi_calculate_queue_resources(ctrl_info); in pqi_ctrl_init()
8347 rc = pqi_enable_msix_interrupts(ctrl_info); in pqi_ctrl_init()
8351 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { in pqi_ctrl_init()
8352 ctrl_info->max_msix_vectors = in pqi_ctrl_init()
8353 ctrl_info->num_msix_vectors_enabled; in pqi_ctrl_init()
8354 pqi_calculate_queue_resources(ctrl_info); in pqi_ctrl_init()
8357 rc = pqi_alloc_io_resources(ctrl_info); in pqi_ctrl_init()
8361 rc = pqi_alloc_operational_queues(ctrl_info); in pqi_ctrl_init()
8363 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8368 pqi_init_operational_queues(ctrl_info); in pqi_ctrl_init()
8370 rc = pqi_create_queues(ctrl_info); in pqi_ctrl_init()
8374 rc = pqi_request_irqs(ctrl_info); in pqi_ctrl_init()
8378 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); in pqi_ctrl_init()
8380 ctrl_info->controller_online = true; in pqi_ctrl_init()
8382 rc = pqi_process_config_table(ctrl_info); in pqi_ctrl_init()
8386 pqi_start_heartbeat_timer(ctrl_info); in pqi_ctrl_init()
8388 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { in pqi_ctrl_init()
8389 rc = pqi_get_advanced_raid_bypass_config(ctrl_info); in pqi_ctrl_init()
8391 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8395 ctrl_info->ciss_report_log_flags |= in pqi_ctrl_init()
8399 rc = pqi_enable_events(ctrl_info); in pqi_ctrl_init()
8401 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8407 rc = pqi_register_scsi(ctrl_info); in pqi_ctrl_init()
8411 rc = pqi_get_ctrl_product_details(ctrl_info); in pqi_ctrl_init()
8413 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8418 rc = pqi_get_ctrl_serial_number(ctrl_info); in pqi_ctrl_init()
8420 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8425 rc = pqi_set_diag_rescan(ctrl_info); in pqi_ctrl_init()
8427 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8432 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); in pqi_ctrl_init()
8434 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8439 pqi_schedule_update_time_worker(ctrl_info); in pqi_ctrl_init()
8441 pqi_scan_scsi_devices(ctrl_info); in pqi_ctrl_init()
8446 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) in pqi_reinit_queues() argument
8452 admin_queues = &ctrl_info->admin_queues; in pqi_reinit_queues()
8457 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_reinit_queues()
8458 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; in pqi_reinit_queues()
8459 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; in pqi_reinit_queues()
8460 ctrl_info->queue_groups[i].oq_ci_copy = 0; in pqi_reinit_queues()
8462 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); in pqi_reinit_queues()
8463 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); in pqi_reinit_queues()
8464 writel(0, ctrl_info->queue_groups[i].oq_pi); in pqi_reinit_queues()
8467 event_queue = &ctrl_info->event_queue; in pqi_reinit_queues()
8472 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_init_resume() argument
8476 rc = pqi_force_sis_mode(ctrl_info); in pqi_ctrl_init_resume()
8484 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); in pqi_ctrl_init_resume()
8492 rc = sis_get_ctrl_properties(ctrl_info); in pqi_ctrl_init_resume()
8494 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8499 rc = sis_get_pqi_capabilities(ctrl_info); in pqi_ctrl_init_resume()
8501 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8511 rc = sis_init_base_struct_addr(ctrl_info); in pqi_ctrl_init_resume()
8513 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8519 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); in pqi_ctrl_init_resume()
8521 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8527 ctrl_info->pqi_mode_enabled = true; in pqi_ctrl_init_resume()
8528 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); in pqi_ctrl_init_resume()
8530 pqi_reinit_queues(ctrl_info); in pqi_ctrl_init_resume()
8532 rc = pqi_create_admin_queues(ctrl_info); in pqi_ctrl_init_resume()
8534 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8539 rc = pqi_create_queues(ctrl_info); in pqi_ctrl_init_resume()
8543 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); in pqi_ctrl_init_resume()
8545 ctrl_info->controller_online = true; in pqi_ctrl_init_resume()
8546 pqi_ctrl_unblock_requests(ctrl_info); in pqi_ctrl_init_resume()
8548 pqi_ctrl_reset_config(ctrl_info); in pqi_ctrl_init_resume()
8550 rc = pqi_process_config_table(ctrl_info); in pqi_ctrl_init_resume()
8554 pqi_start_heartbeat_timer(ctrl_info); in pqi_ctrl_init_resume()
8556 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { in pqi_ctrl_init_resume()
8557 rc = pqi_get_advanced_raid_bypass_config(ctrl_info); in pqi_ctrl_init_resume()
8559 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8563 ctrl_info->ciss_report_log_flags |= in pqi_ctrl_init_resume()
8567 rc = pqi_enable_events(ctrl_info); in pqi_ctrl_init_resume()
8569 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8574 rc = pqi_get_ctrl_product_details(ctrl_info); in pqi_ctrl_init_resume()
8576 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8581 rc = pqi_set_diag_rescan(ctrl_info); in pqi_ctrl_init_resume()
8583 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8588 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); in pqi_ctrl_init_resume()
8590 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8595 if (pqi_ofa_in_progress(ctrl_info)) in pqi_ctrl_init_resume()
8596 pqi_ctrl_unblock_scan(ctrl_info); in pqi_ctrl_init_resume()
8598 pqi_scan_scsi_devices(ctrl_info); in pqi_ctrl_init_resume()
8613 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) in pqi_pci_init() argument
8618 rc = pci_enable_device(ctrl_info->pci_dev); in pqi_pci_init()
8620 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8630 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); in pqi_pci_init()
8632 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); in pqi_pci_init()
8636 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); in pqi_pci_init()
8638 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8643 ctrl_info->iomem_base = ioremap(pci_resource_start( in pqi_pci_init()
8644 ctrl_info->pci_dev, 0), in pqi_pci_init()
8645 pci_resource_len(ctrl_info->pci_dev, 0)); in pqi_pci_init()
8646 if (!ctrl_info->iomem_base) { in pqi_pci_init()
8647 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8656 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, in pqi_pci_init()
8659 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8665 pci_set_master(ctrl_info->pci_dev); in pqi_pci_init()
8667 ctrl_info->registers = ctrl_info->iomem_base; in pqi_pci_init()
8668 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; in pqi_pci_init()
8670 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); in pqi_pci_init()
8675 pci_release_regions(ctrl_info->pci_dev); in pqi_pci_init()
8677 pci_disable_device(ctrl_info->pci_dev); in pqi_pci_init()
8682 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) in pqi_cleanup_pci_init() argument
8684 iounmap(ctrl_info->iomem_base); in pqi_cleanup_pci_init()
8685 pci_release_regions(ctrl_info->pci_dev); in pqi_cleanup_pci_init()
8686 if (pci_is_enabled(ctrl_info->pci_dev)) in pqi_cleanup_pci_init()
8687 pci_disable_device(ctrl_info->pci_dev); in pqi_cleanup_pci_init()
8688 pci_set_drvdata(ctrl_info->pci_dev, NULL); in pqi_cleanup_pci_init()
8693 struct pqi_ctrl_info *ctrl_info; in pqi_alloc_ctrl_info() local
8695 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), in pqi_alloc_ctrl_info()
8697 if (!ctrl_info) in pqi_alloc_ctrl_info()
8700 mutex_init(&ctrl_info->scan_mutex); in pqi_alloc_ctrl_info()
8701 mutex_init(&ctrl_info->lun_reset_mutex); in pqi_alloc_ctrl_info()
8702 mutex_init(&ctrl_info->ofa_mutex); in pqi_alloc_ctrl_info()
8704 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); in pqi_alloc_ctrl_info()
8705 spin_lock_init(&ctrl_info->scsi_device_list_lock); in pqi_alloc_ctrl_info()
8707 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); in pqi_alloc_ctrl_info()
8708 atomic_set(&ctrl_info->num_interrupts, 0); in pqi_alloc_ctrl_info()
8710 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); in pqi_alloc_ctrl_info()
8711 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); in pqi_alloc_ctrl_info()
8713 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); in pqi_alloc_ctrl_info()
8714 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); in pqi_alloc_ctrl_info()
8716 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker); in pqi_alloc_ctrl_info()
8717 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker); in pqi_alloc_ctrl_info()
8719 sema_init(&ctrl_info->sync_request_sem, in pqi_alloc_ctrl_info()
8721 init_waitqueue_head(&ctrl_info->block_requests_wait); in pqi_alloc_ctrl_info()
8723 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; in pqi_alloc_ctrl_info()
8724 ctrl_info->irq_mode = IRQ_MODE_NONE; in pqi_alloc_ctrl_info()
8725 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; in pqi_alloc_ctrl_info()
8727 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; in pqi_alloc_ctrl_info()
8728 ctrl_info->max_transfer_encrypted_sas_sata = in pqi_alloc_ctrl_info()
8730 ctrl_info->max_transfer_encrypted_nvme = in pqi_alloc_ctrl_info()
8732 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6; in pqi_alloc_ctrl_info()
8733 ctrl_info->max_write_raid_1_10_2drive = ~0; in pqi_alloc_ctrl_info()
8734 ctrl_info->max_write_raid_1_10_3drive = ~0; in pqi_alloc_ctrl_info()
8735 ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts; in pqi_alloc_ctrl_info()
8737 return ctrl_info; in pqi_alloc_ctrl_info()
8740 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) in pqi_free_ctrl_info() argument
8742 kfree(ctrl_info); in pqi_free_ctrl_info()
8745 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) in pqi_free_interrupts() argument
8747 pqi_free_irqs(ctrl_info); in pqi_free_interrupts()
8748 pqi_disable_msix_interrupts(ctrl_info); in pqi_free_interrupts()
8751 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) in pqi_free_ctrl_resources() argument
8753 pqi_free_interrupts(ctrl_info); in pqi_free_ctrl_resources()
8754 if (ctrl_info->queue_memory_base) in pqi_free_ctrl_resources()
8755 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
8756 ctrl_info->queue_memory_length, in pqi_free_ctrl_resources()
8757 ctrl_info->queue_memory_base, in pqi_free_ctrl_resources()
8758 ctrl_info->queue_memory_base_dma_handle); in pqi_free_ctrl_resources()
8759 if (ctrl_info->admin_queue_memory_base) in pqi_free_ctrl_resources()
8760 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
8761 ctrl_info->admin_queue_memory_length, in pqi_free_ctrl_resources()
8762 ctrl_info->admin_queue_memory_base, in pqi_free_ctrl_resources()
8763 ctrl_info->admin_queue_memory_base_dma_handle); in pqi_free_ctrl_resources()
8764 pqi_free_all_io_requests(ctrl_info); in pqi_free_ctrl_resources()
8765 if (ctrl_info->error_buffer) in pqi_free_ctrl_resources()
8766 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
8767 ctrl_info->error_buffer_length, in pqi_free_ctrl_resources()
8768 ctrl_info->error_buffer, in pqi_free_ctrl_resources()
8769 ctrl_info->error_buffer_dma_handle); in pqi_free_ctrl_resources()
8770 if (ctrl_info->iomem_base) in pqi_free_ctrl_resources()
8771 pqi_cleanup_pci_init(ctrl_info); in pqi_free_ctrl_resources()
8772 pqi_free_ctrl_info(ctrl_info); in pqi_free_ctrl_resources()
8775 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) in pqi_remove_ctrl() argument
8777 ctrl_info->controller_online = false; in pqi_remove_ctrl()
8778 pqi_stop_heartbeat_timer(ctrl_info); in pqi_remove_ctrl()
8779 pqi_ctrl_block_requests(ctrl_info); in pqi_remove_ctrl()
8780 pqi_cancel_rescan_worker(ctrl_info); in pqi_remove_ctrl()
8781 pqi_cancel_update_time_worker(ctrl_info); in pqi_remove_ctrl()
8782 if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) { in pqi_remove_ctrl()
8783 pqi_fail_all_outstanding_requests(ctrl_info); in pqi_remove_ctrl()
8784 ctrl_info->pqi_mode_enabled = false; in pqi_remove_ctrl()
8786 pqi_unregister_scsi(ctrl_info); in pqi_remove_ctrl()
8787 if (ctrl_info->pqi_mode_enabled) in pqi_remove_ctrl()
8788 pqi_revert_to_sis_mode(ctrl_info); in pqi_remove_ctrl()
8789 pqi_free_ctrl_resources(ctrl_info); in pqi_remove_ctrl()
8792 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_ctrl_quiesce() argument
8794 pqi_ctrl_block_scan(ctrl_info); in pqi_ofa_ctrl_quiesce()
8795 pqi_scsi_block_requests(ctrl_info); in pqi_ofa_ctrl_quiesce()
8796 pqi_ctrl_block_device_reset(ctrl_info); in pqi_ofa_ctrl_quiesce()
8797 pqi_ctrl_block_requests(ctrl_info); in pqi_ofa_ctrl_quiesce()
8798 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_ofa_ctrl_quiesce()
8799 pqi_stop_heartbeat_timer(ctrl_info); in pqi_ofa_ctrl_quiesce()
8802 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_ctrl_unquiesce() argument
8804 pqi_start_heartbeat_timer(ctrl_info); in pqi_ofa_ctrl_unquiesce()
8805 pqi_ctrl_unblock_requests(ctrl_info); in pqi_ofa_ctrl_unquiesce()
8806 pqi_ctrl_unblock_device_reset(ctrl_info); in pqi_ofa_ctrl_unquiesce()
8807 pqi_scsi_unblock_requests(ctrl_info); in pqi_ofa_ctrl_unquiesce()
8808 pqi_ctrl_unblock_scan(ctrl_info); in pqi_ofa_ctrl_unquiesce()
8811 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size) in pqi_ofa_alloc_mem() argument
8820 ofap = ctrl_info->pqi_ofa_mem_virt_addr; in pqi_ofa_alloc_mem()
8826 ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL); in pqi_ofa_alloc_mem()
8827 if (!ctrl_info->pqi_ofa_chunk_virt_addr) in pqi_ofa_alloc_mem()
8830 dev = &ctrl_info->pci_dev->dev; in pqi_ofa_alloc_mem()
8833 ctrl_info->pqi_ofa_chunk_virt_addr[i] = in pqi_ofa_alloc_mem()
8835 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) in pqi_ofa_alloc_mem()
8852 ctrl_info->pqi_ofa_chunk_virt_addr[i], in pqi_ofa_alloc_mem()
8855 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); in pqi_ofa_alloc_mem()
8861 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_alloc_host_buffer() argument
8867 if (ctrl_info->ofa_bytes_requested == 0) in pqi_ofa_alloc_host_buffer()
8870 total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested); in pqi_ofa_alloc_host_buffer()
8875 if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0) in pqi_ofa_alloc_host_buffer()
8884 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_setup_host_buffer() argument
8889 dev = &ctrl_info->pci_dev->dev; in pqi_ofa_setup_host_buffer()
8892 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL); in pqi_ofa_setup_host_buffer()
8896 ctrl_info->pqi_ofa_mem_virt_addr = ofap; in pqi_ofa_setup_host_buffer()
8898 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { in pqi_ofa_setup_host_buffer()
8901 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle); in pqi_ofa_setup_host_buffer()
8902 ctrl_info->pqi_ofa_mem_virt_addr = NULL; in pqi_ofa_setup_host_buffer()
8910 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_free_host_buffer() argument
8918 ofap = ctrl_info->pqi_ofa_mem_virt_addr; in pqi_ofa_free_host_buffer()
8922 dev = &ctrl_info->pci_dev->dev; in pqi_ofa_free_host_buffer()
8934 ctrl_info->pqi_ofa_chunk_virt_addr[i], in pqi_ofa_free_host_buffer()
8937 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); in pqi_ofa_free_host_buffer()
8941 ctrl_info->pqi_ofa_mem_dma_handle); in pqi_ofa_free_host_buffer()
8942 ctrl_info->pqi_ofa_mem_virt_addr = NULL; in pqi_ofa_free_host_buffer()
8945 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_host_memory_update() argument
8959 ofap = ctrl_info->pqi_ofa_mem_virt_addr; in pqi_ofa_host_memory_update()
8966 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, in pqi_ofa_host_memory_update()
8972 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_ofa_host_memory_update()
8975 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) in pqi_ofa_ctrl_restart() argument
8979 return pqi_ctrl_init_resume(ctrl_info); in pqi_ofa_ctrl_restart()
8987 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) in pqi_fail_all_outstanding_requests() argument
8994 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_fail_all_outstanding_requests()
8995 io_request = &ctrl_info->io_request_pool[i]; in pqi_fail_all_outstanding_requests()
9019 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) in pqi_take_ctrl_offline_deferred() argument
9022 pqi_stop_heartbeat_timer(ctrl_info); in pqi_take_ctrl_offline_deferred()
9023 pqi_free_interrupts(ctrl_info); in pqi_take_ctrl_offline_deferred()
9024 pqi_cancel_rescan_worker(ctrl_info); in pqi_take_ctrl_offline_deferred()
9025 pqi_cancel_update_time_worker(ctrl_info); in pqi_take_ctrl_offline_deferred()
9026 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_take_ctrl_offline_deferred()
9027 pqi_fail_all_outstanding_requests(ctrl_info); in pqi_take_ctrl_offline_deferred()
9028 pqi_ctrl_unblock_requests(ctrl_info); in pqi_take_ctrl_offline_deferred()
9033 struct pqi_ctrl_info *ctrl_info; in pqi_ctrl_offline_worker() local
9035 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); in pqi_ctrl_offline_worker()
9036 pqi_take_ctrl_offline_deferred(ctrl_info); in pqi_ctrl_offline_worker()
9085 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, in pqi_take_ctrl_offline() argument
9088 if (!ctrl_info->controller_online) in pqi_take_ctrl_offline()
9091 ctrl_info->controller_online = false; in pqi_take_ctrl_offline()
9092 ctrl_info->pqi_mode_enabled = false; in pqi_take_ctrl_offline()
9093 pqi_ctrl_block_requests(ctrl_info); in pqi_take_ctrl_offline()
9095 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason); in pqi_take_ctrl_offline()
9096 pci_disable_device(ctrl_info->pci_dev); in pqi_take_ctrl_offline()
9097 dev_err(&ctrl_info->pci_dev->dev, in pqi_take_ctrl_offline()
9100 schedule_work(&ctrl_info->ctrl_offline_work); in pqi_take_ctrl_offline()
9121 struct pqi_ctrl_info *ctrl_info; in pqi_pci_probe() local
9145 ctrl_info = pqi_alloc_ctrl_info(node); in pqi_pci_probe()
9146 if (!ctrl_info) { in pqi_pci_probe()
9151 ctrl_info->numa_node = node; in pqi_pci_probe()
9153 ctrl_info->pci_dev = pci_dev; in pqi_pci_probe()
9155 rc = pqi_pci_init(ctrl_info); in pqi_pci_probe()
9159 rc = pqi_ctrl_init(ctrl_info); in pqi_pci_probe()
9166 pqi_remove_ctrl(ctrl_info); in pqi_pci_probe()
9173 struct pqi_ctrl_info *ctrl_info; in pqi_pci_remove() local
9177 ctrl_info = pci_get_drvdata(pci_dev); in pqi_pci_remove()
9178 if (!ctrl_info) in pqi_pci_remove()
9181 pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id); in pqi_pci_remove()
9183 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL; in pqi_pci_remove()
9185 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL; in pqi_pci_remove()
9187 if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) { in pqi_pci_remove()
9188 rc = pqi_flush_cache(ctrl_info, RESTART); in pqi_pci_remove()
9194 pqi_remove_ctrl(ctrl_info); in pqi_pci_remove()
9197 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) in pqi_crash_if_pending_command() argument
9203 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_crash_if_pending_command()
9204 io_request = &ctrl_info->io_request_pool[i]; in pqi_crash_if_pending_command()
9216 struct pqi_ctrl_info *ctrl_info; in pqi_shutdown() local
9219 ctrl_info = pci_get_drvdata(pci_dev); in pqi_shutdown()
9220 if (!ctrl_info) { in pqi_shutdown()
9226 pqi_wait_until_ofa_finished(ctrl_info); in pqi_shutdown()
9228 pqi_scsi_block_requests(ctrl_info); in pqi_shutdown()
9229 pqi_ctrl_block_device_reset(ctrl_info); in pqi_shutdown()
9230 pqi_ctrl_block_requests(ctrl_info); in pqi_shutdown()
9231 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_shutdown()
9242 rc = pqi_flush_cache(ctrl_info, shutdown_event); in pqi_shutdown()
9247 pqi_crash_if_pending_command(ctrl_info); in pqi_shutdown()
9248 pqi_reset(ctrl_info); in pqi_shutdown()
9310 struct pqi_ctrl_info *ctrl_info; in pqi_suspend_or_freeze() local
9313 ctrl_info = pci_get_drvdata(pci_dev); in pqi_suspend_or_freeze()
9315 pqi_wait_until_ofa_finished(ctrl_info); in pqi_suspend_or_freeze()
9317 pqi_ctrl_block_scan(ctrl_info); in pqi_suspend_or_freeze()
9318 pqi_scsi_block_requests(ctrl_info); in pqi_suspend_or_freeze()
9319 pqi_ctrl_block_device_reset(ctrl_info); in pqi_suspend_or_freeze()
9320 pqi_ctrl_block_requests(ctrl_info); in pqi_suspend_or_freeze()
9321 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_suspend_or_freeze()
9327 pqi_flush_cache(ctrl_info, shutdown_event); in pqi_suspend_or_freeze()
9330 pqi_stop_heartbeat_timer(ctrl_info); in pqi_suspend_or_freeze()
9331 pqi_crash_if_pending_command(ctrl_info); in pqi_suspend_or_freeze()
9332 pqi_free_irqs(ctrl_info); in pqi_suspend_or_freeze()
9334 ctrl_info->controller_online = false; in pqi_suspend_or_freeze()
9335 ctrl_info->pqi_mode_enabled = false; in pqi_suspend_or_freeze()
9349 struct pqi_ctrl_info *ctrl_info; in pqi_resume_or_restore() local
9352 ctrl_info = pci_get_drvdata(pci_dev); in pqi_resume_or_restore()
9354 rc = pqi_request_irqs(ctrl_info); in pqi_resume_or_restore()
9358 pqi_ctrl_unblock_device_reset(ctrl_info); in pqi_resume_or_restore()
9359 pqi_ctrl_unblock_requests(ctrl_info); in pqi_resume_or_restore()
9360 pqi_scsi_unblock_requests(ctrl_info); in pqi_resume_or_restore()
9361 pqi_ctrl_unblock_scan(ctrl_info); in pqi_resume_or_restore()
9365 return pqi_ctrl_init_resume(ctrl_info); in pqi_resume_or_restore()
9377 struct pqi_ctrl_info *ctrl_info; in pqi_thaw() local
9380 ctrl_info = pci_get_drvdata(pci_dev); in pqi_thaw()
9382 rc = pqi_request_irqs(ctrl_info); in pqi_thaw()
9386 ctrl_info->controller_online = true; in pqi_thaw()
9387 ctrl_info->pqi_mode_enabled = true; in pqi_thaw()
9389 pqi_ctrl_unblock_device_reset(ctrl_info); in pqi_thaw()
9390 pqi_ctrl_unblock_requests(ctrl_info); in pqi_thaw()
9391 pqi_scsi_unblock_requests(ctrl_info); in pqi_thaw()
9392 pqi_ctrl_unblock_scan(ctrl_info); in pqi_thaw()
9400 struct pqi_ctrl_info *ctrl_info; in pqi_poweroff() local
9404 ctrl_info = pci_get_drvdata(pci_dev); in pqi_poweroff()
9407 pqi_flush_cache(ctrl_info, shutdown_event); in pqi_poweroff()