Lines Matching refs:ctrl_info
57 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
59 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
61 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
64 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
67 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
71 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
75 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
79 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
80 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
81 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
82 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
83 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
84 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
85 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
220 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_offline() argument
222 return !ctrl_info->controller_online; in pqi_ctrl_offline()
225 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) in pqi_check_ctrl_health() argument
227 if (ctrl_info->controller_online) in pqi_check_ctrl_health()
228 if (!sis_is_firmware_running(ctrl_info)) in pqi_check_ctrl_health()
229 pqi_take_ctrl_offline(ctrl_info); in pqi_check_ctrl_health()
240 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info) in pqi_get_ctrl_mode() argument
242 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE; in pqi_get_ctrl_mode()
245 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, in pqi_save_ctrl_mode() argument
250 driver_scratch = sis_read_driver_scratch(ctrl_info); in pqi_save_ctrl_mode()
257 sis_write_driver_scratch(ctrl_info, driver_scratch); in pqi_save_ctrl_mode()
260 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info) in pqi_is_fw_triage_supported() argument
262 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0; in pqi_is_fw_triage_supported()
265 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported) in pqi_save_fw_triage_setting() argument
269 driver_scratch = sis_read_driver_scratch(ctrl_info); in pqi_save_fw_triage_setting()
276 sis_write_driver_scratch(ctrl_info, driver_scratch); in pqi_save_fw_triage_setting()
279 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_block_scan() argument
281 ctrl_info->scan_blocked = true; in pqi_ctrl_block_scan()
282 mutex_lock(&ctrl_info->scan_mutex); in pqi_ctrl_block_scan()
285 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_unblock_scan() argument
287 ctrl_info->scan_blocked = false; in pqi_ctrl_unblock_scan()
288 mutex_unlock(&ctrl_info->scan_mutex); in pqi_ctrl_unblock_scan()
291 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_scan_blocked() argument
293 return ctrl_info->scan_blocked; in pqi_ctrl_scan_blocked()
296 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_block_device_reset() argument
298 mutex_lock(&ctrl_info->lun_reset_mutex); in pqi_ctrl_block_device_reset()
301 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_unblock_device_reset() argument
303 mutex_unlock(&ctrl_info->lun_reset_mutex); in pqi_ctrl_unblock_device_reset()
306 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info) in pqi_scsi_block_requests() argument
312 shost = ctrl_info->scsi_host; in pqi_scsi_block_requests()
326 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info) in pqi_scsi_unblock_requests() argument
328 scsi_unblock_requests(ctrl_info->scsi_host); in pqi_scsi_unblock_requests()
331 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_busy() argument
333 atomic_inc(&ctrl_info->num_busy_threads); in pqi_ctrl_busy()
336 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_unbusy() argument
338 atomic_dec(&ctrl_info->num_busy_threads); in pqi_ctrl_unbusy()
341 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_blocked() argument
343 return ctrl_info->block_requests; in pqi_ctrl_blocked()
346 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_block_requests() argument
348 ctrl_info->block_requests = true; in pqi_ctrl_block_requests()
351 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_unblock_requests() argument
353 ctrl_info->block_requests = false; in pqi_ctrl_unblock_requests()
354 wake_up_all(&ctrl_info->block_requests_wait); in pqi_ctrl_unblock_requests()
357 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) in pqi_wait_if_ctrl_blocked() argument
359 if (!pqi_ctrl_blocked(ctrl_info)) in pqi_wait_if_ctrl_blocked()
362 atomic_inc(&ctrl_info->num_blocked_threads); in pqi_wait_if_ctrl_blocked()
363 wait_event(ctrl_info->block_requests_wait, in pqi_wait_if_ctrl_blocked()
364 !pqi_ctrl_blocked(ctrl_info)); in pqi_wait_if_ctrl_blocked()
365 atomic_dec(&ctrl_info->num_blocked_threads); in pqi_wait_if_ctrl_blocked()
370 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_wait_until_quiesced() argument
380 while (atomic_read(&ctrl_info->num_busy_threads) > in pqi_ctrl_wait_until_quiesced()
381 atomic_read(&ctrl_info->num_blocked_threads)) { in pqi_ctrl_wait_until_quiesced()
383 dev_warn(&ctrl_info->pci_dev->dev, in pqi_ctrl_wait_until_quiesced()
393 dev_warn(&ctrl_info->pci_dev->dev, in pqi_ctrl_wait_until_quiesced()
403 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_ofa_start() argument
405 mutex_lock(&ctrl_info->ofa_mutex); in pqi_ctrl_ofa_start()
408 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_ofa_done() argument
410 mutex_unlock(&ctrl_info->ofa_mutex); in pqi_ctrl_ofa_done()
413 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) in pqi_wait_until_ofa_finished() argument
415 mutex_lock(&ctrl_info->ofa_mutex); in pqi_wait_until_ofa_finished()
416 mutex_unlock(&ctrl_info->ofa_mutex); in pqi_wait_until_ofa_finished()
419 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_in_progress() argument
421 return mutex_is_locked(&ctrl_info->ofa_mutex); in pqi_ofa_in_progress()
450 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info, in pqi_schedule_rescan_worker_with_delay() argument
453 if (pqi_ctrl_offline(ctrl_info)) in pqi_schedule_rescan_worker_with_delay()
456 schedule_delayed_work(&ctrl_info->rescan_work, delay); in pqi_schedule_rescan_worker_with_delay()
459 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) in pqi_schedule_rescan_worker() argument
461 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); in pqi_schedule_rescan_worker()
466 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info) in pqi_schedule_rescan_worker_delayed() argument
468 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); in pqi_schedule_rescan_worker_delayed()
471 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) in pqi_cancel_rescan_worker() argument
473 cancel_delayed_work_sync(&ctrl_info->rescan_work); in pqi_cancel_rescan_worker()
476 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) in pqi_read_heartbeat_counter() argument
478 if (!ctrl_info->heartbeat_counter) in pqi_read_heartbeat_counter()
481 return readl(ctrl_info->heartbeat_counter); in pqi_read_heartbeat_counter()
484 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) in pqi_read_soft_reset_status() argument
486 return readb(ctrl_info->soft_reset_status); in pqi_read_soft_reset_status()
489 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info) in pqi_clear_soft_reset_status() argument
493 status = pqi_read_soft_reset_status(ctrl_info); in pqi_clear_soft_reset_status()
495 writeb(status, ctrl_info->soft_reset_status); in pqi_clear_soft_reset_status()
535 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, in pqi_build_raid_path_request() argument
573 cdb[1] = ctrl_info->ciss_report_log_flags; in pqi_build_raid_path_request()
618 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); in pqi_build_raid_path_request()
637 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], in pqi_build_raid_path_request()
650 struct pqi_ctrl_info *ctrl_info) in pqi_alloc_io_request() argument
653 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */ in pqi_alloc_io_request()
656 io_request = &ctrl_info->io_request_pool[i]; in pqi_alloc_io_request()
660 i = (i + 1) % ctrl_info->max_io_slots; in pqi_alloc_io_request()
664 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; in pqi_alloc_io_request()
676 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, in pqi_send_scsi_raid_request() argument
684 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr, in pqi_send_scsi_raid_request()
689 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info); in pqi_send_scsi_raid_request()
691 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); in pqi_send_scsi_raid_request()
698 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, in pqi_send_ctrl_raid_request() argument
701 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, in pqi_send_ctrl_raid_request()
705 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, in pqi_send_ctrl_raid_with_error() argument
709 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, in pqi_send_ctrl_raid_with_error()
713 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, in pqi_identify_controller() argument
716 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, in pqi_identify_controller()
720 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, in pqi_sense_subsystem_info() argument
723 return pqi_send_ctrl_raid_request(ctrl_info, in pqi_sense_subsystem_info()
728 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, in pqi_scsi_inquiry() argument
731 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, in pqi_scsi_inquiry()
735 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, in pqi_identify_physical_device() argument
744 rc = pqi_build_raid_path_request(ctrl_info, &request, in pqi_identify_physical_device()
754 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_identify_physical_device()
756 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); in pqi_identify_physical_device()
792 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info) in pqi_get_advanced_raid_bypass_config() argument
803 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID, in pqi_get_advanced_raid_bypass_config()
811 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_get_advanced_raid_bypass_config()
813 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); in pqi_get_advanced_raid_bypass_config()
832 ctrl_info->max_transfer_encrypted_sas_sata = in pqi_get_advanced_raid_bypass_config()
836 ctrl_info->max_transfer_encrypted_nvme = in pqi_get_advanced_raid_bypass_config()
840 ctrl_info->max_write_raid_5_6 = in pqi_get_advanced_raid_bypass_config()
844 ctrl_info->max_write_raid_1_10_2drive = in pqi_get_advanced_raid_bypass_config()
848 ctrl_info->max_write_raid_1_10_3drive = in pqi_get_advanced_raid_bypass_config()
858 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, in pqi_flush_cache() argument
870 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache, in pqi_flush_cache()
878 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, in pqi_csmi_smp_passthru() argument
882 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, in pqi_csmi_smp_passthru()
888 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) in pqi_set_diag_rescan() argument
897 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, in pqi_set_diag_rescan()
904 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag, in pqi_set_diag_rescan()
913 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, in pqi_write_host_wellness() argument
916 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, in pqi_write_host_wellness()
934 struct pqi_ctrl_info *ctrl_info) in pqi_write_driver_version_to_host_wellness() argument
962 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); in pqi_write_driver_version_to_host_wellness()
983 struct pqi_ctrl_info *ctrl_info) in pqi_write_current_time_to_host_wellness() argument
1025 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); in pqi_write_current_time_to_host_wellness()
1037 struct pqi_ctrl_info *ctrl_info; in pqi_update_time_worker() local
1039 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, in pqi_update_time_worker()
1042 rc = pqi_write_current_time_to_host_wellness(ctrl_info); in pqi_update_time_worker()
1044 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_time_worker()
1047 schedule_delayed_work(&ctrl_info->update_time_work, in pqi_update_time_worker()
1051 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info) in pqi_schedule_update_time_worker() argument
1053 schedule_delayed_work(&ctrl_info->update_time_work, 0); in pqi_schedule_update_time_worker()
1056 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info) in pqi_cancel_update_time_worker() argument
1058 cancel_delayed_work_sync(&ctrl_info->update_time_work); in pqi_cancel_update_time_worker()
1061 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer, in pqi_report_luns() argument
1064 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length); in pqi_report_luns()
1067 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer) in pqi_report_phys_logical_luns() argument
1082 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header)); in pqi_report_phys_logical_luns()
1102 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); in pqi_report_phys_logical_luns()
1128 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) in pqi_report_phys_luns() argument
1130 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, buffer); in pqi_report_phys_luns()
1133 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) in pqi_report_logical_luns() argument
1135 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); in pqi_report_logical_luns()
1138 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, in pqi_get_device_lists() argument
1149 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); in pqi_get_device_lists()
1151 dev_err(&ctrl_info->pci_dev->dev, in pqi_get_device_lists()
1154 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); in pqi_get_device_lists()
1156 dev_err(&ctrl_info->pci_dev->dev, in pqi_get_device_lists()
1247 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, in pqi_get_raid_level() argument
1258 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_raid_level()
1271 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, in pqi_validate_raid_map() argument
1311 dev_warn(&ctrl_info->pci_dev->dev, in pqi_validate_raid_map()
1319 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, in pqi_get_raid_map() argument
1330 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, in pqi_get_raid_map()
1345 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, in pqi_get_raid_map()
1352 dev_warn(&ctrl_info->pci_dev->dev, in pqi_get_raid_map()
1361 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); in pqi_get_raid_map()
1375 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info, in pqi_set_max_transfer_encrypted() argument
1378 if (!ctrl_info->lv_drive_type_mix_valid) { in pqi_set_max_transfer_encrypted()
1392 ctrl_info->max_transfer_encrypted_sas_sata; in pqi_set_max_transfer_encrypted()
1396 ctrl_info->max_transfer_encrypted_nvme; in pqi_set_max_transfer_encrypted()
1402 min(ctrl_info->max_transfer_encrypted_sas_sata, in pqi_set_max_transfer_encrypted()
1403 ctrl_info->max_transfer_encrypted_nvme); in pqi_set_max_transfer_encrypted()
1408 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, in pqi_get_raid_bypass_status() argument
1419 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_raid_bypass_status()
1433 pqi_get_raid_map(ctrl_info, device) == 0) { in pqi_get_raid_bypass_status()
1437 pqi_set_max_transfer_encrypted(ctrl_info, device); in pqi_get_raid_bypass_status()
1448 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, in pqi_get_volume_status() argument
1462 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_volume_status()
1488 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, in pqi_get_physical_device_info() argument
1496 rc = pqi_identify_physical_device(ctrl_info, device, in pqi_get_physical_device_info()
1537 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info, in pqi_get_logical_device_info() argument
1548 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); in pqi_get_logical_device_info()
1565 pqi_get_raid_level(ctrl_info, device); in pqi_get_logical_device_info()
1566 pqi_get_raid_bypass_status(ctrl_info, device); in pqi_get_logical_device_info()
1567 pqi_get_volume_status(ctrl_info, device); in pqi_get_logical_device_info()
1577 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, in pqi_get_device_info() argument
1587 rc = pqi_get_logical_device_info(ctrl_info, device); in pqi_get_device_info()
1589 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys); in pqi_get_device_info()
1594 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, in pqi_show_volume_status() argument
1691 dev_info(&ctrl_info->pci_dev->dev, in pqi_show_volume_status()
1693 ctrl_info->scsi_host->host_no, in pqi_show_volume_status()
1699 struct pqi_ctrl_info *ctrl_info; in pqi_rescan_worker() local
1701 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, in pqi_rescan_worker()
1704 pqi_scan_scsi_devices(ctrl_info); in pqi_rescan_worker()
1707 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, in pqi_add_device() argument
1713 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, in pqi_add_device()
1716 rc = pqi_add_sas_device(ctrl_info->sas_host, device); in pqi_add_device()
1723 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) in pqi_remove_device() argument
1729 rc = pqi_device_wait_for_pending_io(ctrl_info, device, in pqi_remove_device()
1732 dev_err(&ctrl_info->pci_dev->dev, in pqi_remove_device()
1734 ctrl_info->scsi_host->host_no, device->bus, in pqi_remove_device()
1746 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, in pqi_find_scsi_dev() argument
1751 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) in pqi_find_scsi_dev()
1775 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, in pqi_scsi_find_entry() argument
1780 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { in pqi_scsi_find_entry()
1805 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, in pqi_dev_info() argument
1812 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); in pqi_dev_info()
1861 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); in pqi_dev_info()
1936 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, in pqi_fixup_botched_add() argument
1941 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_fixup_botched_add()
1943 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_fixup_botched_add()
1957 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, in pqi_update_device_list() argument
1977 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
1980 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) in pqi_update_device_list()
1986 find_result = pqi_scsi_find_entry(ctrl_info, device, in pqi_update_device_list()
2017 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, in pqi_update_device_list()
2033 &ctrl_info->scsi_device_list); in pqi_update_device_list()
2039 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
2046 if (pqi_ofa_in_progress(ctrl_info)) { in pqi_update_device_list()
2050 pqi_ctrl_unblock_device_reset(ctrl_info); in pqi_update_device_list()
2051 pqi_scsi_unblock_requests(ctrl_info); in pqi_update_device_list()
2057 pqi_dev_info(ctrl_info, "offline", device); in pqi_update_device_list()
2058 pqi_show_volume_status(ctrl_info, device); in pqi_update_device_list()
2062 pqi_remove_device(ctrl_info, device); in pqi_update_device_list()
2065 pqi_dev_info(ctrl_info, "removed", device); in pqi_update_device_list()
2074 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { in pqi_update_device_list()
2088 rc = pqi_add_device(ctrl_info, device); in pqi_update_device_list()
2090 pqi_dev_info(ctrl_info, "added", device); in pqi_update_device_list()
2092 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_device_list()
2094 ctrl_info->scsi_host->host_no, in pqi_update_device_list()
2097 pqi_fixup_botched_add(ctrl_info, device); in pqi_update_device_list()
2149 static inline void pqi_set_physical_device_wwid(struct pqi_ctrl_info *ctrl_info, in pqi_set_physical_device_wwid() argument
2152 if (ctrl_info->unique_wwid_in_report_phys_lun_supported || in pqi_set_physical_device_wwid()
2159 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) in pqi_update_scsi_devices() argument
2183 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); in pqi_update_scsi_devices()
2210 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2230 ctrl_info->lv_drive_type_mix_valid = true; in pqi_update_scsi_devices()
2238 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); in pqi_update_scsi_devices()
2246 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2301 rc = pqi_get_device_info(ctrl_info, device, id_phys); in pqi_update_scsi_devices()
2303 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2309 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_scsi_devices()
2313 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_scsi_devices()
2324 pqi_set_physical_device_wwid(ctrl_info, device, phys_lun_ext_entry); in pqi_update_scsi_devices()
2343 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); in pqi_update_scsi_devices()
2362 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) in pqi_scan_scsi_devices() argument
2367 if (pqi_ctrl_offline(ctrl_info)) in pqi_scan_scsi_devices()
2370 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); in pqi_scan_scsi_devices()
2373 if (pqi_ctrl_scan_blocked(ctrl_info)) in pqi_scan_scsi_devices()
2375 pqi_schedule_rescan_worker_delayed(ctrl_info); in pqi_scan_scsi_devices()
2379 rc = pqi_update_scsi_devices(ctrl_info); in pqi_scan_scsi_devices()
2380 if (rc && !pqi_ctrl_scan_blocked(ctrl_info)) in pqi_scan_scsi_devices()
2381 pqi_schedule_rescan_worker_delayed(ctrl_info); in pqi_scan_scsi_devices()
2383 mutex_unlock(&ctrl_info->scan_mutex); in pqi_scan_scsi_devices()
2390 struct pqi_ctrl_info *ctrl_info; in pqi_scan_start() local
2392 ctrl_info = shost_to_hba(shost); in pqi_scan_start()
2394 pqi_scan_scsi_devices(ctrl_info); in pqi_scan_start()
2402 struct pqi_ctrl_info *ctrl_info; in pqi_scan_finished() local
2404 ctrl_info = shost_priv(shost); in pqi_scan_finished()
2406 return !mutex_is_locked(&ctrl_info->scan_mutex); in pqi_scan_finished()
2433 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info, in pqi_aio_raid_level_supported() argument
2442 if (rmd->is_write && (!ctrl_info->enable_r1_writes || in pqi_aio_raid_level_supported()
2443 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive)) in pqi_aio_raid_level_supported()
2447 if (rmd->is_write && (!ctrl_info->enable_r1_writes || in pqi_aio_raid_level_supported()
2448 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive)) in pqi_aio_raid_level_supported()
2452 if (rmd->is_write && (!ctrl_info->enable_r5_writes || in pqi_aio_raid_level_supported()
2453 rmd->data_length > ctrl_info->max_write_raid_5_6)) in pqi_aio_raid_level_supported()
2457 if (rmd->is_write && (!ctrl_info->enable_r6_writes || in pqi_aio_raid_level_supported()
2458 rmd->data_length > ctrl_info->max_write_raid_5_6)) in pqi_aio_raid_level_supported()
2517 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info, in pci_get_aio_common_raid_map_values() argument
2751 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, in pqi_raid_bypass_submit_scsi_cmd() argument
2769 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd)) in pqi_raid_bypass_submit_scsi_cmd()
2777 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map); in pqi_raid_bypass_submit_scsi_cmd()
2834 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group, in pqi_raid_bypass_submit_scsi_cmd()
2838 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group, in pqi_raid_bypass_submit_scsi_cmd()
2843 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle, in pqi_raid_bypass_submit_scsi_cmd()
2862 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) in pqi_wait_for_pqi_mode_ready() argument
2869 pqi_registers = ctrl_info->pqi_registers; in pqi_wait_for_pqi_mode_ready()
2878 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
2890 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
2902 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
2923 struct pqi_ctrl_info *ctrl_info; in pqi_take_device_offline() local
2931 ctrl_info = shost_to_hba(sdev->host); in pqi_take_device_offline()
2932 pqi_schedule_rescan_worker(ctrl_info); in pqi_take_device_offline()
2933 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", in pqi_take_device_offline()
2934 path, ctrl_info->scsi_host->host_no, device->bus, in pqi_take_device_offline()
3009 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); in pqi_process_raid_io_error() local
3016 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); in pqi_process_raid_io_error()
3024 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); in pqi_process_raid_io_error()
3141 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info, in pqi_interpret_task_management_response() argument
3160 dev_err(&ctrl_info->pci_dev->dev, in pqi_interpret_task_management_response()
3166 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info) in pqi_invalid_response() argument
3168 pqi_take_ctrl_offline(ctrl_info); in pqi_invalid_response()
3171 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) in pqi_process_io_intr() argument
3185 if (oq_pi >= ctrl_info->num_elements_per_oq) { in pqi_process_io_intr()
3186 pqi_invalid_response(ctrl_info); in pqi_process_io_intr()
3187 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3189 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); in pqi_process_io_intr()
3200 if (request_id >= ctrl_info->max_io_slots) { in pqi_process_io_intr()
3201 pqi_invalid_response(ctrl_info); in pqi_process_io_intr()
3202 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3204 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); in pqi_process_io_intr()
3208 io_request = &ctrl_info->io_request_pool[request_id]; in pqi_process_io_intr()
3210 pqi_invalid_response(ctrl_info); in pqi_process_io_intr()
3211 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3231 io_request->status = pqi_interpret_task_management_response(ctrl_info, in pqi_process_io_intr()
3240 io_request->error_info = ctrl_info->error_buffer + in pqi_process_io_intr()
3246 pqi_invalid_response(ctrl_info); in pqi_process_io_intr()
3247 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3259 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; in pqi_process_io_intr()
3283 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, in pqi_send_event_ack() argument
3292 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; in pqi_send_event_ack()
3302 ctrl_info->num_elements_per_iq)) in pqi_send_event_ack()
3308 if (pqi_ctrl_offline(ctrl_info)) in pqi_send_event_ack()
3317 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; in pqi_send_event_ack()
3329 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, in pqi_acknowledge_event() argument
3343 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); in pqi_acknowledge_event()
3350 struct pqi_ctrl_info *ctrl_info) in pqi_poll_for_soft_reset_status() argument
3358 status = pqi_read_soft_reset_status(ctrl_info); in pqi_poll_for_soft_reset_status()
3365 if (!sis_is_firmware_running(ctrl_info)) in pqi_poll_for_soft_reset_status()
3369 dev_warn(&ctrl_info->pci_dev->dev, in pqi_poll_for_soft_reset_status()
3378 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) in pqi_process_soft_reset() argument
3384 if (ctrl_info->soft_reset_handshake_supported) in pqi_process_soft_reset()
3385 reset_status = pqi_poll_for_soft_reset_status(ctrl_info); in pqi_process_soft_reset()
3396 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3398 sis_soft_reset(ctrl_info); in pqi_process_soft_reset()
3401 ctrl_info->pqi_mode_enabled = false; in pqi_process_soft_reset()
3402 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); in pqi_process_soft_reset()
3403 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); in pqi_process_soft_reset()
3404 pqi_ofa_free_host_buffer(ctrl_info); in pqi_process_soft_reset()
3405 pqi_ctrl_ofa_done(ctrl_info); in pqi_process_soft_reset()
3406 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3411 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3413 if (ctrl_info->soft_reset_handshake_supported) in pqi_process_soft_reset()
3414 pqi_clear_soft_reset_status(ctrl_info); in pqi_process_soft_reset()
3415 pqi_ofa_free_host_buffer(ctrl_info); in pqi_process_soft_reset()
3416 pqi_ctrl_ofa_done(ctrl_info); in pqi_process_soft_reset()
3417 pqi_ofa_ctrl_unquiesce(ctrl_info); in pqi_process_soft_reset()
3422 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3425 pqi_ofa_free_host_buffer(ctrl_info); in pqi_process_soft_reset()
3426 pqi_ctrl_ofa_done(ctrl_info); in pqi_process_soft_reset()
3427 pqi_ofa_ctrl_unquiesce(ctrl_info); in pqi_process_soft_reset()
3428 pqi_take_ctrl_offline(ctrl_info); in pqi_process_soft_reset()
3435 struct pqi_ctrl_info *ctrl_info; in pqi_ofa_memory_alloc_worker() local
3437 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work); in pqi_ofa_memory_alloc_worker()
3439 pqi_ctrl_ofa_start(ctrl_info); in pqi_ofa_memory_alloc_worker()
3440 pqi_ofa_setup_host_buffer(ctrl_info); in pqi_ofa_memory_alloc_worker()
3441 pqi_ofa_host_memory_update(ctrl_info); in pqi_ofa_memory_alloc_worker()
3446 struct pqi_ctrl_info *ctrl_info; in pqi_ofa_quiesce_worker() local
3449 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work); in pqi_ofa_quiesce_worker()
3451 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)]; in pqi_ofa_quiesce_worker()
3453 pqi_ofa_ctrl_quiesce(ctrl_info); in pqi_ofa_quiesce_worker()
3454 pqi_acknowledge_event(ctrl_info, event); in pqi_ofa_quiesce_worker()
3455 pqi_process_soft_reset(ctrl_info); in pqi_ofa_quiesce_worker()
3458 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, in pqi_ofa_process_event() argument
3467 dev_info(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3469 schedule_work(&ctrl_info->ofa_memory_alloc_work); in pqi_ofa_process_event()
3472 dev_info(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3474 schedule_work(&ctrl_info->ofa_quiesce_work); in pqi_ofa_process_event()
3478 dev_info(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3480 ctrl_info->ofa_cancel_reason); in pqi_ofa_process_event()
3481 pqi_ofa_free_host_buffer(ctrl_info); in pqi_ofa_process_event()
3482 pqi_ctrl_ofa_done(ctrl_info); in pqi_ofa_process_event()
3485 dev_err(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3498 struct pqi_ctrl_info *ctrl_info; in pqi_event_worker() local
3502 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); in pqi_event_worker()
3504 pqi_ctrl_busy(ctrl_info); in pqi_event_worker()
3505 pqi_wait_if_ctrl_blocked(ctrl_info); in pqi_event_worker()
3506 if (pqi_ctrl_offline(ctrl_info)) in pqi_event_worker()
3510 event = ctrl_info->events; in pqi_event_worker()
3515 ack_event = pqi_ofa_process_event(ctrl_info, event); in pqi_event_worker()
3521 pqi_acknowledge_event(ctrl_info, event); in pqi_event_worker()
3527 pqi_schedule_rescan_worker_delayed(ctrl_info); in pqi_event_worker()
3530 pqi_ctrl_unbusy(ctrl_info); in pqi_event_worker()
3539 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer); in pqi_heartbeat_timer_handler() local
3541 pqi_check_ctrl_health(ctrl_info); in pqi_heartbeat_timer_handler()
3542 if (pqi_ctrl_offline(ctrl_info)) in pqi_heartbeat_timer_handler()
3545 num_interrupts = atomic_read(&ctrl_info->num_interrupts); in pqi_heartbeat_timer_handler()
3546 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); in pqi_heartbeat_timer_handler()
3548 if (num_interrupts == ctrl_info->previous_num_interrupts) { in pqi_heartbeat_timer_handler()
3549 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { in pqi_heartbeat_timer_handler()
3550 dev_err(&ctrl_info->pci_dev->dev, in pqi_heartbeat_timer_handler()
3553 pqi_take_ctrl_offline(ctrl_info); in pqi_heartbeat_timer_handler()
3557 ctrl_info->previous_num_interrupts = num_interrupts; in pqi_heartbeat_timer_handler()
3560 ctrl_info->previous_heartbeat_count = heartbeat_count; in pqi_heartbeat_timer_handler()
3561 mod_timer(&ctrl_info->heartbeat_timer, in pqi_heartbeat_timer_handler()
3565 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) in pqi_start_heartbeat_timer() argument
3567 if (!ctrl_info->heartbeat_counter) in pqi_start_heartbeat_timer()
3570 ctrl_info->previous_num_interrupts = in pqi_start_heartbeat_timer()
3571 atomic_read(&ctrl_info->num_interrupts); in pqi_start_heartbeat_timer()
3572 ctrl_info->previous_heartbeat_count = in pqi_start_heartbeat_timer()
3573 pqi_read_heartbeat_counter(ctrl_info); in pqi_start_heartbeat_timer()
3575 ctrl_info->heartbeat_timer.expires = in pqi_start_heartbeat_timer()
3577 add_timer(&ctrl_info->heartbeat_timer); in pqi_start_heartbeat_timer()
3580 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) in pqi_stop_heartbeat_timer() argument
3582 del_timer_sync(&ctrl_info->heartbeat_timer); in pqi_stop_heartbeat_timer()
3585 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info, in pqi_ofa_capture_event_payload() argument
3590 ctrl_info->ofa_bytes_requested = in pqi_ofa_capture_event_payload()
3594 ctrl_info->ofa_cancel_reason = in pqi_ofa_capture_event_payload()
3600 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) in pqi_process_event_intr() argument
3610 event_queue = &ctrl_info->event_queue; in pqi_process_event_intr()
3617 pqi_invalid_response(ctrl_info); in pqi_process_event_intr()
3618 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_event_intr()
3633 event = &ctrl_info->events[event_index]; in pqi_process_event_intr()
3640 pqi_ofa_capture_event_payload(ctrl_info, event, response); in pqi_process_event_intr()
3649 schedule_work(&ctrl_info->event_work); in pqi_process_event_intr()
3657 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx) in pqi_configure_legacy_intx() argument
3663 pqi_registers = ctrl_info->pqi_registers; in pqi_configure_legacy_intx()
3675 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, in pqi_change_irq_mode() argument
3678 switch (ctrl_info->irq_mode) { in pqi_change_irq_mode()
3684 pqi_configure_legacy_intx(ctrl_info, true); in pqi_change_irq_mode()
3685 sis_enable_intx(ctrl_info); in pqi_change_irq_mode()
3694 pqi_configure_legacy_intx(ctrl_info, false); in pqi_change_irq_mode()
3695 sis_enable_msix(ctrl_info); in pqi_change_irq_mode()
3700 pqi_configure_legacy_intx(ctrl_info, false); in pqi_change_irq_mode()
3707 sis_enable_msix(ctrl_info); in pqi_change_irq_mode()
3710 pqi_configure_legacy_intx(ctrl_info, true); in pqi_change_irq_mode()
3711 sis_enable_intx(ctrl_info); in pqi_change_irq_mode()
3719 ctrl_info->irq_mode = new_mode; in pqi_change_irq_mode()
3724 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) in pqi_is_valid_irq() argument
3729 switch (ctrl_info->irq_mode) { in pqi_is_valid_irq()
3734 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status); in pqi_is_valid_irq()
3751 struct pqi_ctrl_info *ctrl_info; in pqi_irq_handler() local
3757 ctrl_info = queue_group->ctrl_info; in pqi_irq_handler()
3759 if (!pqi_is_valid_irq(ctrl_info)) in pqi_irq_handler()
3762 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); in pqi_irq_handler()
3766 if (irq == ctrl_info->event_irq) { in pqi_irq_handler()
3767 num_events_handled = pqi_process_event_intr(ctrl_info); in pqi_irq_handler()
3775 atomic_inc(&ctrl_info->num_interrupts); in pqi_irq_handler()
3777 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); in pqi_irq_handler()
3778 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); in pqi_irq_handler()
3784 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) in pqi_request_irqs() argument
3786 struct pci_dev *pci_dev = ctrl_info->pci_dev; in pqi_request_irqs()
3790 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); in pqi_request_irqs()
3792 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { in pqi_request_irqs()
3794 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); in pqi_request_irqs()
3801 ctrl_info->num_msix_vectors_initialized++; in pqi_request_irqs()
3807 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) in pqi_free_irqs() argument
3811 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) in pqi_free_irqs()
3812 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), in pqi_free_irqs()
3813 &ctrl_info->queue_groups[i]); in pqi_free_irqs()
3815 ctrl_info->num_msix_vectors_initialized = 0; in pqi_free_irqs()
3818 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) in pqi_enable_msix_interrupts() argument
3822 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, in pqi_enable_msix_interrupts()
3823 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, in pqi_enable_msix_interrupts()
3826 dev_err(&ctrl_info->pci_dev->dev, in pqi_enable_msix_interrupts()
3832 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; in pqi_enable_msix_interrupts()
3833 ctrl_info->irq_mode = IRQ_MODE_MSIX; in pqi_enable_msix_interrupts()
3837 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) in pqi_disable_msix_interrupts() argument
3839 if (ctrl_info->num_msix_vectors_enabled) { in pqi_disable_msix_interrupts()
3840 pci_free_irq_vectors(ctrl_info->pci_dev); in pqi_disable_msix_interrupts()
3841 ctrl_info->num_msix_vectors_enabled = 0; in pqi_disable_msix_interrupts()
3845 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) in pqi_alloc_operational_queues() argument
3861 ctrl_info->num_elements_per_iq; in pqi_alloc_operational_queues()
3864 ctrl_info->num_elements_per_oq; in pqi_alloc_operational_queues()
3865 num_inbound_queues = ctrl_info->num_queue_groups * 2; in pqi_alloc_operational_queues()
3866 num_outbound_queues = ctrl_info->num_queue_groups; in pqi_alloc_operational_queues()
3867 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; in pqi_alloc_operational_queues()
3899 ctrl_info->queue_memory_base = in pqi_alloc_operational_queues()
3900 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, in pqi_alloc_operational_queues()
3901 &ctrl_info->queue_memory_base_dma_handle, in pqi_alloc_operational_queues()
3904 if (!ctrl_info->queue_memory_base) in pqi_alloc_operational_queues()
3907 ctrl_info->queue_memory_length = alloc_length; in pqi_alloc_operational_queues()
3909 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, in pqi_alloc_operational_queues()
3912 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
3913 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
3916 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3917 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3923 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3924 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3930 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
3931 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
3934 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3935 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3941 ctrl_info->event_queue.oq_element_array = element_array; in pqi_alloc_operational_queues()
3942 ctrl_info->event_queue.oq_element_array_bus_addr = in pqi_alloc_operational_queues()
3943 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3944 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3951 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
3952 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
3955 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3957 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3963 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3965 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3971 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3973 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3979 ctrl_info->event_queue.oq_pi = next_queue_index; in pqi_alloc_operational_queues()
3980 ctrl_info->event_queue.oq_pi_bus_addr = in pqi_alloc_operational_queues()
3981 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
3983 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
3988 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) in pqi_init_operational_queues() argument
3998 for (i = 0; i < ctrl_info->num_queue_groups; i++) in pqi_init_operational_queues()
3999 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; in pqi_init_operational_queues()
4006 ctrl_info->event_queue.oq_id = next_oq_id++; in pqi_init_operational_queues()
4007 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_init_operational_queues()
4008 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; in pqi_init_operational_queues()
4009 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; in pqi_init_operational_queues()
4010 ctrl_info->queue_groups[i].oq_id = next_oq_id++; in pqi_init_operational_queues()
4017 ctrl_info->event_queue.int_msg_num = 0; in pqi_init_operational_queues()
4018 for (i = 0; i < ctrl_info->num_queue_groups; i++) in pqi_init_operational_queues()
4019 ctrl_info->queue_groups[i].int_msg_num = i; in pqi_init_operational_queues()
4021 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_init_operational_queues()
4022 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); in pqi_init_operational_queues()
4023 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); in pqi_init_operational_queues()
4024 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); in pqi_init_operational_queues()
4025 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); in pqi_init_operational_queues()
4029 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) in pqi_alloc_admin_queues() argument
4038 ctrl_info->admin_queue_memory_base = in pqi_alloc_admin_queues()
4039 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, in pqi_alloc_admin_queues()
4040 &ctrl_info->admin_queue_memory_base_dma_handle, in pqi_alloc_admin_queues()
4043 if (!ctrl_info->admin_queue_memory_base) in pqi_alloc_admin_queues()
4046 ctrl_info->admin_queue_memory_length = alloc_length; in pqi_alloc_admin_queues()
4048 admin_queues = &ctrl_info->admin_queues; in pqi_alloc_admin_queues()
4049 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, in pqi_alloc_admin_queues()
4061 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4063 ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4065 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4067 ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4069 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4071 (void __iomem *)ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4073 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4075 (void __iomem *)ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4083 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) in pqi_create_admin_queues() argument
4091 pqi_registers = ctrl_info->pqi_registers; in pqi_create_admin_queues()
4092 admin_queues = &ctrl_info->admin_queues; in pqi_create_admin_queues()
4126 admin_queues->iq_pi = ctrl_info->iomem_base + in pqi_create_admin_queues()
4129 admin_queues->oq_ci = ctrl_info->iomem_base + in pqi_create_admin_queues()
4136 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, in pqi_submit_admin_request() argument
4143 admin_queues = &ctrl_info->admin_queues; in pqi_submit_admin_request()
4163 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, in pqi_poll_for_admin_response() argument
4171 admin_queues = &ctrl_info->admin_queues; in pqi_poll_for_admin_response()
4181 dev_err(&ctrl_info->pci_dev->dev, in pqi_poll_for_admin_response()
4185 if (!sis_is_firmware_running(ctrl_info)) in pqi_poll_for_admin_response()
4200 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, in pqi_start_io() argument
4239 ctrl_info->num_elements_per_iq)) in pqi_start_io()
4249 ctrl_info->num_elements_per_iq - iq_pi; in pqi_start_io()
4263 ctrl_info->num_elements_per_iq; in pqi_start_io()
4282 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, in pqi_wait_for_completion_io() argument
4294 pqi_check_ctrl_health(ctrl_info); in pqi_wait_for_completion_io()
4295 if (pqi_ctrl_offline(ctrl_info)) { in pqi_wait_for_completion_io()
4340 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, in pqi_submit_raid_request_synchronous() argument
4350 if (down_interruptible(&ctrl_info->sync_request_sem)) in pqi_submit_raid_request_synchronous()
4353 down(&ctrl_info->sync_request_sem); in pqi_submit_raid_request_synchronous()
4356 pqi_ctrl_busy(ctrl_info); in pqi_submit_raid_request_synchronous()
4362 pqi_wait_if_ctrl_blocked(ctrl_info); in pqi_submit_raid_request_synchronous()
4364 if (pqi_ctrl_offline(ctrl_info)) { in pqi_submit_raid_request_synchronous()
4369 io_request = pqi_alloc_io_request(ctrl_info); in pqi_submit_raid_request_synchronous()
4385 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, in pqi_submit_raid_request_synchronous()
4388 pqi_wait_for_completion_io(ctrl_info, &wait); in pqi_submit_raid_request_synchronous()
4402 pqi_ctrl_unbusy(ctrl_info); in pqi_submit_raid_request_synchronous()
4403 up(&ctrl_info->sync_request_sem); in pqi_submit_raid_request_synchronous()
4428 struct pqi_ctrl_info *ctrl_info, in pqi_submit_admin_request_synchronous() argument
4434 pqi_submit_admin_request(ctrl_info, request); in pqi_submit_admin_request_synchronous()
4436 rc = pqi_poll_for_admin_response(ctrl_info, response); in pqi_submit_admin_request_synchronous()
4444 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) in pqi_report_device_capability() argument
4466 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_report_device_capability()
4473 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response); in pqi_report_device_capability()
4475 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_report_device_capability()
4487 ctrl_info->max_inbound_queues = in pqi_report_device_capability()
4489 ctrl_info->max_elements_per_iq = in pqi_report_device_capability()
4491 ctrl_info->max_iq_element_length = in pqi_report_device_capability()
4494 ctrl_info->max_outbound_queues = in pqi_report_device_capability()
4496 ctrl_info->max_elements_per_oq = in pqi_report_device_capability()
4498 ctrl_info->max_oq_element_length = in pqi_report_device_capability()
4505 ctrl_info->max_inbound_iu_length_per_firmware = in pqi_report_device_capability()
4508 ctrl_info->inbound_spanning_supported = in pqi_report_device_capability()
4510 ctrl_info->outbound_spanning_supported = in pqi_report_device_capability()
4519 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) in pqi_validate_device_capability() argument
4521 if (ctrl_info->max_iq_element_length < in pqi_validate_device_capability()
4523 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4525 ctrl_info->max_iq_element_length, in pqi_validate_device_capability()
4530 if (ctrl_info->max_oq_element_length < in pqi_validate_device_capability()
4532 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4534 ctrl_info->max_oq_element_length, in pqi_validate_device_capability()
4539 if (ctrl_info->max_inbound_iu_length_per_firmware < in pqi_validate_device_capability()
4541 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4543 ctrl_info->max_inbound_iu_length_per_firmware, in pqi_validate_device_capability()
4548 if (!ctrl_info->inbound_spanning_supported) { in pqi_validate_device_capability()
4549 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4554 if (ctrl_info->outbound_spanning_supported) { in pqi_validate_device_capability()
4555 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4563 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) in pqi_create_event_queue() argument
4570 event_queue = &ctrl_info->event_queue; in pqi_create_event_queue()
4595 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_event_queue()
4600 event_queue->oq_ci = ctrl_info->iomem_base + in pqi_create_event_queue()
4608 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, in pqi_create_queue_group() argument
4616 queue_group = &ctrl_info->queue_groups[group_number]; in pqi_create_queue_group()
4634 put_unaligned_le16(ctrl_info->num_elements_per_iq, in pqi_create_queue_group()
4640 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_queue_group()
4643 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4648 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + in pqi_create_queue_group()
4669 put_unaligned_le16(ctrl_info->num_elements_per_iq, in pqi_create_queue_group()
4675 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_queue_group()
4678 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4683 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + in pqi_create_queue_group()
4703 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_queue_group()
4706 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4725 put_unaligned_le16(ctrl_info->num_elements_per_oq, in pqi_create_queue_group()
4733 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, in pqi_create_queue_group()
4736 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4741 queue_group->oq_ci = ctrl_info->iomem_base + in pqi_create_queue_group()
4749 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) in pqi_create_queues() argument
4754 rc = pqi_create_event_queue(ctrl_info); in pqi_create_queues()
4756 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queues()
4761 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_create_queues()
4762 rc = pqi_create_queue_group(ctrl_info, i); in pqi_create_queues()
4764 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queues()
4766 i, ctrl_info->num_queue_groups); in pqi_create_queues()
4777 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, in pqi_configure_events() argument
4800 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_configure_events()
4807 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_configure_events()
4809 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_configure_events()
4820 put_unaligned_le16(ctrl_info->event_queue.oq_id, in pqi_configure_events()
4835 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_configure_events()
4842 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_configure_events()
4844 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_configure_events()
4854 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) in pqi_enable_events() argument
4856 return pqi_configure_events(ctrl_info, true); in pqi_enable_events()
4859 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) in pqi_free_all_io_requests() argument
4866 if (!ctrl_info->io_request_pool) in pqi_free_all_io_requests()
4869 dev = &ctrl_info->pci_dev->dev; in pqi_free_all_io_requests()
4870 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; in pqi_free_all_io_requests()
4871 io_request = ctrl_info->io_request_pool; in pqi_free_all_io_requests()
4873 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_free_all_io_requests()
4883 kfree(ctrl_info->io_request_pool); in pqi_free_all_io_requests()
4884 ctrl_info->io_request_pool = NULL; in pqi_free_all_io_requests()
4887 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) in pqi_alloc_error_buffer() argument
4889 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, in pqi_alloc_error_buffer()
4890 ctrl_info->error_buffer_length, in pqi_alloc_error_buffer()
4891 &ctrl_info->error_buffer_dma_handle, in pqi_alloc_error_buffer()
4893 if (!ctrl_info->error_buffer) in pqi_alloc_error_buffer()
4899 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) in pqi_alloc_io_resources() argument
4908 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots, in pqi_alloc_io_resources()
4909 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); in pqi_alloc_io_resources()
4911 if (!ctrl_info->io_request_pool) { in pqi_alloc_io_resources()
4912 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
4917 dev = &ctrl_info->pci_dev->dev; in pqi_alloc_io_resources()
4918 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; in pqi_alloc_io_resources()
4919 io_request = ctrl_info->io_request_pool; in pqi_alloc_io_resources()
4921 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_alloc_io_resources()
4922 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); in pqi_alloc_io_resources()
4925 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
4935 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
4949 pqi_free_all_io_requests(ctrl_info); in pqi_alloc_io_resources()
4959 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) in pqi_calculate_io_resources() argument
4964 ctrl_info->scsi_ml_can_queue = in pqi_calculate_io_resources()
4965 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; in pqi_calculate_io_resources()
4966 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; in pqi_calculate_io_resources()
4968 ctrl_info->error_buffer_length = in pqi_calculate_io_resources()
4969 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; in pqi_calculate_io_resources()
4972 max_transfer_size = min(ctrl_info->max_transfer_size, in pqi_calculate_io_resources()
4975 max_transfer_size = min(ctrl_info->max_transfer_size, in pqi_calculate_io_resources()
4983 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); in pqi_calculate_io_resources()
4987 ctrl_info->sg_chain_buffer_length = in pqi_calculate_io_resources()
4990 ctrl_info->sg_tablesize = max_sg_entries; in pqi_calculate_io_resources()
4991 ctrl_info->max_sectors = max_transfer_size / 512; in pqi_calculate_io_resources()
4994 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) in pqi_calculate_queue_resources() argument
5006 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, in pqi_calculate_queue_resources()
5007 ctrl_info->max_outbound_queues - 1); in pqi_calculate_queue_resources()
5011 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); in pqi_calculate_queue_resources()
5015 ctrl_info->num_queue_groups = num_queue_groups; in pqi_calculate_queue_resources()
5016 ctrl_info->max_hw_queue_index = num_queue_groups - 1; in pqi_calculate_queue_resources()
5022 ctrl_info->max_inbound_iu_length = in pqi_calculate_queue_resources()
5023 (ctrl_info->max_inbound_iu_length_per_firmware / in pqi_calculate_queue_resources()
5028 (ctrl_info->max_inbound_iu_length / in pqi_calculate_queue_resources()
5035 ctrl_info->max_elements_per_iq); in pqi_calculate_queue_resources()
5039 ctrl_info->max_elements_per_oq); in pqi_calculate_queue_resources()
5041 ctrl_info->num_elements_per_iq = num_elements_per_iq; in pqi_calculate_queue_resources()
5042 ctrl_info->num_elements_per_oq = num_elements_per_oq; in pqi_calculate_queue_resources()
5044 ctrl_info->max_sg_per_iu = in pqi_calculate_queue_resources()
5045 ((ctrl_info->max_inbound_iu_length - in pqi_calculate_queue_resources()
5050 ctrl_info->max_sg_per_r56_iu = in pqi_calculate_queue_resources()
5051 ((ctrl_info->max_inbound_iu_length - in pqi_calculate_queue_resources()
5106 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, in pqi_build_raid_sg_list() argument
5131 ctrl_info->max_sg_per_iu, &chained); in pqi_build_raid_sg_list()
5142 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info, in pqi_build_aio_r1_sg_list() argument
5168 ctrl_info->max_sg_per_iu, &chained); in pqi_build_aio_r1_sg_list()
5180 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info, in pqi_build_aio_r56_sg_list() argument
5204 ctrl_info->max_sg_per_r56_iu, &chained); in pqi_build_aio_r56_sg_list()
5216 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, in pqi_build_aio_sg_list() argument
5242 ctrl_info->max_sg_per_iu, &chained); in pqi_build_aio_sg_list()
5266 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, in pqi_raid_submit_scsi_cmd_with_io_request() argument
5326 dev_err(&ctrl_info->pci_dev->dev, in pqi_raid_submit_scsi_cmd_with_io_request()
5332 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); in pqi_raid_submit_scsi_cmd_with_io_request()
5338 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); in pqi_raid_submit_scsi_cmd_with_io_request()
5343 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, in pqi_raid_submit_scsi_cmd() argument
5349 io_request = pqi_alloc_io_request(ctrl_info); in pqi_raid_submit_scsi_cmd()
5351 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, in pqi_raid_submit_scsi_cmd()
5359 struct pqi_ctrl_info *ctrl_info; in pqi_raid_bypass_retry_needed() local
5374 ctrl_info = shost_to_hba(scmd->device->host); in pqi_raid_bypass_retry_needed()
5375 if (pqi_ctrl_offline(ctrl_info)) in pqi_raid_bypass_retry_needed()
5397 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, in pqi_aio_submit_scsi_cmd() argument
5401 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, in pqi_aio_submit_scsi_cmd()
5405 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, in pqi_aio_submit_io() argument
5414 io_request = pqi_alloc_io_request(ctrl_info); in pqi_aio_submit_io()
5447 dev_err(&ctrl_info->pci_dev->dev, in pqi_aio_submit_io()
5463 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); in pqi_aio_submit_io()
5469 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); in pqi_aio_submit_io()
5474 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, in pqi_aio_submit_r1_write_io() argument
5483 io_request = pqi_alloc_io_request(ctrl_info); in pqi_aio_submit_r1_write_io()
5521 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request); in pqi_aio_submit_r1_write_io()
5527 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); in pqi_aio_submit_r1_write_io()
5532 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, in pqi_aio_submit_r56_write_io() argument
5541 io_request = pqi_alloc_io_request(ctrl_info); in pqi_aio_submit_r56_write_io()
5586 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request); in pqi_aio_submit_r56_write_io()
5592 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); in pqi_aio_submit_r56_write_io()
5597 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, in pqi_get_hw_queue() argument
5603 if (hw_queue > ctrl_info->max_hw_queue_index) in pqi_get_hw_queue()
5640 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, in pqi_is_parity_write_stream() argument
5651 if (!ctrl_info->enable_stream_detection) in pqi_is_parity_write_stream()
5672 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) || in pqi_is_parity_write_stream()
5673 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes)) in pqi_is_parity_write_stream()
5718 struct pqi_ctrl_info *ctrl_info; in pqi_scsi_queue_command() local
5734 ctrl_info = shost_to_hba(shost); in pqi_scsi_queue_command()
5736 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) { in pqi_scsi_queue_command()
5742 if (pqi_ctrl_blocked(ctrl_info)) { in pqi_scsi_queue_command()
5753 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); in pqi_scsi_queue_command()
5754 queue_group = &ctrl_info->queue_groups[hw_queue]; in pqi_scsi_queue_command()
5760 !pqi_is_parity_write_stream(ctrl_info, scmd)) { in pqi_scsi_queue_command()
5761 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); in pqi_scsi_queue_command()
5768 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); in pqi_scsi_queue_command()
5771 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); in pqi_scsi_queue_command()
5773 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); in pqi_scsi_queue_command()
5783 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, in pqi_wait_until_queued_io_drained() argument
5800 pqi_check_ctrl_health(ctrl_info); in pqi_wait_until_queued_io_drained()
5801 if (pqi_ctrl_offline(ctrl_info)) in pqi_wait_until_queued_io_drained()
5810 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) in pqi_wait_until_inbound_queues_empty() argument
5819 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_wait_until_inbound_queues_empty()
5820 queue_group = &ctrl_info->queue_groups[i]; in pqi_wait_until_inbound_queues_empty()
5822 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); in pqi_wait_until_inbound_queues_empty()
5833 pqi_check_ctrl_health(ctrl_info); in pqi_wait_until_inbound_queues_empty()
5834 if (pqi_ctrl_offline(ctrl_info)) in pqi_wait_until_inbound_queues_empty()
5844 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, in pqi_fail_io_queued_for_device() argument
5856 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_fail_io_queued_for_device()
5857 queue_group = &ctrl_info->queue_groups[i]; in pqi_fail_io_queued_for_device()
5890 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, in pqi_device_wait_for_pending_io() argument
5902 pqi_check_ctrl_health(ctrl_info); in pqi_device_wait_for_pending_io()
5903 if (pqi_ctrl_offline(ctrl_info)) in pqi_device_wait_for_pending_io()
5907 dev_err(&ctrl_info->pci_dev->dev, in pqi_device_wait_for_pending_io()
5909 ctrl_info->scsi_host->host_no, device->bus, device->target, in pqi_device_wait_for_pending_io()
5914 dev_warn(&ctrl_info->pci_dev->dev, in pqi_device_wait_for_pending_io()
5916 ctrl_info->scsi_host->host_no, device->bus, device->target, in pqi_device_wait_for_pending_io()
5936 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, in pqi_wait_for_lun_reset_completion() argument
5951 pqi_check_ctrl_health(ctrl_info); in pqi_wait_for_lun_reset_completion()
5952 if (pqi_ctrl_offline(ctrl_info)) { in pqi_wait_for_lun_reset_completion()
5959 dev_warn(&ctrl_info->pci_dev->dev, in pqi_wait_for_lun_reset_completion()
5961 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun, in pqi_wait_for_lun_reset_completion()
5970 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) in pqi_lun_reset() argument
5977 io_request = pqi_alloc_io_request(ctrl_info); in pqi_lun_reset()
5991 if (ctrl_info->tmf_iu_timeout_supported) in pqi_lun_reset()
5994 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, in pqi_lun_reset()
5997 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait); in pqi_lun_reset()
6011 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) in pqi_lun_reset_with_retries() argument
6019 reset_rc = pqi_lun_reset(ctrl_info, device); in pqi_lun_reset_with_retries()
6028 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs); in pqi_lun_reset_with_retries()
6035 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, in pqi_device_reset() argument
6040 pqi_ctrl_block_requests(ctrl_info); in pqi_device_reset()
6041 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_device_reset()
6042 pqi_fail_io_queued_for_device(ctrl_info, device); in pqi_device_reset()
6043 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); in pqi_device_reset()
6047 rc = pqi_lun_reset_with_retries(ctrl_info, device); in pqi_device_reset()
6048 pqi_ctrl_unblock_requests(ctrl_info); in pqi_device_reset()
6057 struct pqi_ctrl_info *ctrl_info; in pqi_eh_device_reset_handler() local
6061 ctrl_info = shost_to_hba(shost); in pqi_eh_device_reset_handler()
6064 mutex_lock(&ctrl_info->lun_reset_mutex); in pqi_eh_device_reset_handler()
6066 dev_err(&ctrl_info->pci_dev->dev, in pqi_eh_device_reset_handler()
6072 pqi_check_ctrl_health(ctrl_info); in pqi_eh_device_reset_handler()
6073 if (pqi_ctrl_offline(ctrl_info)) in pqi_eh_device_reset_handler()
6076 rc = pqi_device_reset(ctrl_info, device); in pqi_eh_device_reset_handler()
6078 dev_err(&ctrl_info->pci_dev->dev, in pqi_eh_device_reset_handler()
6083 mutex_unlock(&ctrl_info->lun_reset_mutex); in pqi_eh_device_reset_handler()
6092 struct pqi_ctrl_info *ctrl_info; in pqi_slave_alloc() local
6096 ctrl_info = shost_to_hba(sdev->host); in pqi_slave_alloc()
6098 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_alloc()
6103 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); in pqi_slave_alloc()
6110 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), in pqi_slave_alloc()
6131 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_alloc()
6138 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_map_queues() local
6141 ctrl_info->pci_dev, 0); in pqi_map_queues()
6158 struct pqi_ctrl_info *ctrl_info; in pqi_slave_destroy() local
6160 ctrl_info = shost_to_hba(sdev->host); in pqi_slave_destroy()
6162 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_destroy()
6171 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_destroy()
6174 pqi_dev_info(ctrl_info, "removed", device); in pqi_slave_destroy()
6179 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) in pqi_getpciinfo_ioctl() argument
6189 pci_dev = ctrl_info->pci_dev; in pqi_getpciinfo_ioctl()
6289 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) in pqi_passthru_ioctl() argument
6300 if (pqi_ctrl_offline(ctrl_info)) in pqi_passthru_ioctl()
6302 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info)) in pqi_passthru_ioctl()
6373 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_passthru_ioctl()
6384 if (ctrl_info->raid_iu_timeout_supported) in pqi_passthru_ioctl()
6387 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, in pqi_passthru_ioctl()
6391 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, in pqi_passthru_ioctl()
6436 struct pqi_ctrl_info *ctrl_info; in pqi_ioctl() local
6438 ctrl_info = shost_to_hba(sdev->host); in pqi_ioctl()
6444 rc = pqi_scan_scsi_devices(ctrl_info); in pqi_ioctl()
6447 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); in pqi_ioctl()
6453 rc = pqi_passthru_ioctl(ctrl_info, arg); in pqi_ioctl()
6467 struct pqi_ctrl_info *ctrl_info; in pqi_firmware_version_show() local
6470 ctrl_info = shost_to_hba(shost); in pqi_firmware_version_show()
6472 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); in pqi_firmware_version_show()
6485 struct pqi_ctrl_info *ctrl_info; in pqi_serial_number_show() local
6488 ctrl_info = shost_to_hba(shost); in pqi_serial_number_show()
6490 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); in pqi_serial_number_show()
6497 struct pqi_ctrl_info *ctrl_info; in pqi_model_show() local
6500 ctrl_info = shost_to_hba(shost); in pqi_model_show()
6502 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); in pqi_model_show()
6509 struct pqi_ctrl_info *ctrl_info; in pqi_vendor_show() local
6512 ctrl_info = shost_to_hba(shost); in pqi_vendor_show()
6514 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); in pqi_vendor_show()
6571 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_stream_detection_show() local
6574 ctrl_info->enable_stream_detection); in pqi_host_enable_stream_detection_show()
6581 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_stream_detection_store() local
6590 ctrl_info->enable_stream_detection = set_stream_detection; in pqi_host_enable_stream_detection_store()
6599 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_r5_writes_show() local
6601 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes); in pqi_host_enable_r5_writes_show()
6608 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_r5_writes_store() local
6617 ctrl_info->enable_r5_writes = set_r5_writes; in pqi_host_enable_r5_writes_store()
6626 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_r6_writes_show() local
6628 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes); in pqi_host_enable_r6_writes_show()
6635 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); in pqi_host_enable_r6_writes_store() local
6644 ctrl_info->enable_r6_writes = set_r6_writes; in pqi_host_enable_r6_writes_store()
6682 struct pqi_ctrl_info *ctrl_info; in pqi_unique_id_show() local
6689 ctrl_info = shost_to_hba(sdev->host); in pqi_unique_id_show()
6691 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
6695 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
6706 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
6720 struct pqi_ctrl_info *ctrl_info; in pqi_lunid_show() local
6727 ctrl_info = shost_to_hba(sdev->host); in pqi_lunid_show()
6729 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
6733 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
6739 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
6749 struct pqi_ctrl_info *ctrl_info; in pqi_path_info_show() local
6762 ctrl_info = shost_to_hba(sdev->host); in pqi_path_info_show()
6764 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
6768 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
6785 ctrl_info->scsi_host->host_no, in pqi_path_info_show()
6824 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
6832 struct pqi_ctrl_info *ctrl_info; in pqi_sas_address_show() local
6839 ctrl_info = shost_to_hba(sdev->host); in pqi_sas_address_show()
6841 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
6845 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
6851 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
6859 struct pqi_ctrl_info *ctrl_info; in pqi_ssd_smart_path_enabled_show() local
6865 ctrl_info = shost_to_hba(sdev->host); in pqi_ssd_smart_path_enabled_show()
6867 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
6871 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
6879 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
6887 struct pqi_ctrl_info *ctrl_info; in pqi_raid_level_show() local
6894 ctrl_info = shost_to_hba(sdev->host); in pqi_raid_level_show()
6896 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
6900 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
6909 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
6917 struct pqi_ctrl_info *ctrl_info; in pqi_raid_bypass_cnt_show() local
6924 ctrl_info = shost_to_hba(sdev->host); in pqi_raid_bypass_cnt_show()
6926 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
6930 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
6936 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
6978 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) in pqi_register_scsi() argument
6983 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); in pqi_register_scsi()
6985 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n"); in pqi_register_scsi()
6996 shost->max_sectors = ctrl_info->max_sectors; in pqi_register_scsi()
6997 shost->can_queue = ctrl_info->scsi_ml_can_queue; in pqi_register_scsi()
6999 shost->sg_tablesize = ctrl_info->sg_tablesize; in pqi_register_scsi()
7001 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); in pqi_register_scsi()
7003 shost->nr_hw_queues = ctrl_info->num_queue_groups; in pqi_register_scsi()
7005 shost->hostdata[0] = (unsigned long)ctrl_info; in pqi_register_scsi()
7007 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); in pqi_register_scsi()
7009 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n"); in pqi_register_scsi()
7013 rc = pqi_add_sas_host(shost, ctrl_info); in pqi_register_scsi()
7015 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n"); in pqi_register_scsi()
7019 ctrl_info->scsi_host = shost; in pqi_register_scsi()
7031 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) in pqi_unregister_scsi() argument
7035 pqi_delete_sas_host(ctrl_info); in pqi_unregister_scsi()
7037 shost = ctrl_info->scsi_host; in pqi_unregister_scsi()
7045 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) in pqi_wait_for_pqi_reset_completion() argument
7053 pqi_registers = ctrl_info->pqi_registers; in pqi_wait_for_pqi_reset_completion()
7062 pqi_check_ctrl_health(ctrl_info); in pqi_wait_for_pqi_reset_completion()
7063 if (pqi_ctrl_offline(ctrl_info)) { in pqi_wait_for_pqi_reset_completion()
7076 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) in pqi_reset() argument
7081 if (ctrl_info->pqi_reset_quiesce_supported) { in pqi_reset()
7082 rc = sis_pqi_reset_quiesce(ctrl_info); in pqi_reset()
7084 dev_err(&ctrl_info->pci_dev->dev, in pqi_reset()
7094 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); in pqi_reset()
7096 rc = pqi_wait_for_pqi_reset_completion(ctrl_info); in pqi_reset()
7098 dev_err(&ctrl_info->pci_dev->dev, in pqi_reset()
7104 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info) in pqi_get_ctrl_serial_number() argument
7113 rc = pqi_sense_subsystem_info(ctrl_info, sense_info); in pqi_get_ctrl_serial_number()
7117 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, in pqi_get_ctrl_serial_number()
7119 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; in pqi_get_ctrl_serial_number()
7127 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) in pqi_get_ctrl_product_details() argument
7136 rc = pqi_identify_controller(ctrl_info, identify); in pqi_get_ctrl_product_details()
7142 memcpy(ctrl_info->firmware_version, in pqi_get_ctrl_product_details()
7146 memcpy(ctrl_info->firmware_version, in pqi_get_ctrl_product_details()
7149 ctrl_info->firmware_version in pqi_get_ctrl_product_details()
7151 snprintf(ctrl_info->firmware_version + in pqi_get_ctrl_product_details()
7152 strlen(ctrl_info->firmware_version), in pqi_get_ctrl_product_details()
7153 sizeof(ctrl_info->firmware_version) - in pqi_get_ctrl_product_details()
7159 memcpy(ctrl_info->model, identify->product_id, in pqi_get_ctrl_product_details()
7161 ctrl_info->model[sizeof(identify->product_id)] = '\0'; in pqi_get_ctrl_product_details()
7163 memcpy(ctrl_info->vendor, identify->vendor_id, in pqi_get_ctrl_product_details()
7165 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; in pqi_get_ctrl_product_details()
7174 struct pqi_ctrl_info *ctrl_info; member
7227 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, in pqi_config_table_update() argument
7244 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_config_table_update()
7247 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, in pqi_enable_firmware_features() argument
7274 return pqi_config_table_update(ctrl_info, in pqi_enable_firmware_features()
7284 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7288 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, in pqi_firmware_feature_status() argument
7292 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", in pqi_firmware_feature_status()
7298 dev_info(&ctrl_info->pci_dev->dev, in pqi_firmware_feature_status()
7303 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", in pqi_firmware_feature_status()
7307 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, in pqi_ctrl_update_feature_flags() argument
7312 ctrl_info->enable_r1_writes = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7315 ctrl_info->enable_r5_writes = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7318 ctrl_info->enable_r6_writes = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7321 ctrl_info->soft_reset_handshake_supported = in pqi_ctrl_update_feature_flags()
7323 pqi_read_soft_reset_status(ctrl_info); in pqi_ctrl_update_feature_flags()
7326 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7329 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7332 ctrl_info->unique_wwid_in_report_phys_lun_supported = in pqi_ctrl_update_feature_flags()
7335 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled); in pqi_ctrl_update_feature_flags()
7338 pqi_firmware_feature_status(ctrl_info, firmware_feature); in pqi_ctrl_update_feature_flags()
7341 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, in pqi_firmware_feature_update() argument
7345 firmware_feature->feature_status(ctrl_info, firmware_feature); in pqi_firmware_feature_update()
7437 struct pqi_ctrl_info *ctrl_info; in pqi_process_firmware_features() local
7443 ctrl_info = section_info->ctrl_info; in pqi_process_firmware_features()
7454 pqi_firmware_feature_update(ctrl_info, in pqi_process_firmware_features()
7469 rc = pqi_enable_firmware_features(ctrl_info, firmware_features, in pqi_process_firmware_features()
7472 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_firmware_features()
7477 pqi_firmware_feature_update(ctrl_info, in pqi_process_firmware_features()
7491 pqi_firmware_feature_update(ctrl_info, in pqi_process_firmware_features()
7520 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_reset_config() argument
7522 ctrl_info->heartbeat_counter = NULL; in pqi_ctrl_reset_config()
7523 ctrl_info->soft_reset_status = NULL; in pqi_ctrl_reset_config()
7524 ctrl_info->soft_reset_handshake_supported = false; in pqi_ctrl_reset_config()
7525 ctrl_info->enable_r1_writes = false; in pqi_ctrl_reset_config()
7526 ctrl_info->enable_r5_writes = false; in pqi_ctrl_reset_config()
7527 ctrl_info->enable_r6_writes = false; in pqi_ctrl_reset_config()
7528 ctrl_info->raid_iu_timeout_supported = false; in pqi_ctrl_reset_config()
7529 ctrl_info->tmf_iu_timeout_supported = false; in pqi_ctrl_reset_config()
7530 ctrl_info->unique_wwid_in_report_phys_lun_supported = false; in pqi_ctrl_reset_config()
7533 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) in pqi_process_config_table() argument
7544 table_length = ctrl_info->config_table_length; in pqi_process_config_table()
7550 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_config_table()
7559 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset; in pqi_process_config_table()
7563 section_info.ctrl_info = ctrl_info; in pqi_process_config_table()
7580 dev_warn(&ctrl_info->pci_dev->dev, in pqi_process_config_table()
7583 ctrl_info->heartbeat_counter = in pqi_process_config_table()
7590 ctrl_info->soft_reset_status = in pqi_process_config_table()
7616 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) in pqi_revert_to_sis_mode() argument
7620 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); in pqi_revert_to_sis_mode()
7621 rc = pqi_reset(ctrl_info); in pqi_revert_to_sis_mode()
7624 rc = sis_reenable_sis_mode(ctrl_info); in pqi_revert_to_sis_mode()
7626 dev_err(&ctrl_info->pci_dev->dev, in pqi_revert_to_sis_mode()
7630 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); in pqi_revert_to_sis_mode()
7640 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) in pqi_force_sis_mode() argument
7642 if (!sis_is_firmware_running(ctrl_info)) in pqi_force_sis_mode()
7645 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) in pqi_force_sis_mode()
7648 if (sis_is_kernel_up(ctrl_info)) { in pqi_force_sis_mode()
7649 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); in pqi_force_sis_mode()
7653 return pqi_revert_to_sis_mode(ctrl_info); in pqi_force_sis_mode()
7671 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_init() argument
7677 if (pqi_is_fw_triage_supported(ctrl_info)) { in pqi_ctrl_init()
7678 rc = sis_wait_for_fw_triage_completion(ctrl_info); in pqi_ctrl_init()
7682 sis_soft_reset(ctrl_info); in pqi_ctrl_init()
7685 rc = pqi_force_sis_mode(ctrl_info); in pqi_ctrl_init()
7694 rc = sis_wait_for_ctrl_ready(ctrl_info); in pqi_ctrl_init()
7697 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7709 rc = sis_get_ctrl_properties(ctrl_info); in pqi_ctrl_init()
7711 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7716 rc = sis_get_pqi_capabilities(ctrl_info); in pqi_ctrl_init()
7718 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7723 product_id = sis_get_product_id(ctrl_info); in pqi_ctrl_init()
7724 ctrl_info->product_id = (u8)product_id; in pqi_ctrl_init()
7725 ctrl_info->product_revision = (u8)(product_id >> 8); in pqi_ctrl_init()
7728 if (ctrl_info->max_outstanding_requests > in pqi_ctrl_init()
7730 ctrl_info->max_outstanding_requests = in pqi_ctrl_init()
7733 if (ctrl_info->max_outstanding_requests > in pqi_ctrl_init()
7735 ctrl_info->max_outstanding_requests = in pqi_ctrl_init()
7739 pqi_calculate_io_resources(ctrl_info); in pqi_ctrl_init()
7741 rc = pqi_alloc_error_buffer(ctrl_info); in pqi_ctrl_init()
7743 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7753 rc = sis_init_base_struct_addr(ctrl_info); in pqi_ctrl_init()
7755 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7761 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); in pqi_ctrl_init()
7763 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7769 ctrl_info->pqi_mode_enabled = true; in pqi_ctrl_init()
7770 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); in pqi_ctrl_init()
7772 rc = pqi_alloc_admin_queues(ctrl_info); in pqi_ctrl_init()
7774 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7779 rc = pqi_create_admin_queues(ctrl_info); in pqi_ctrl_init()
7781 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7786 rc = pqi_report_device_capability(ctrl_info); in pqi_ctrl_init()
7788 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7793 rc = pqi_validate_device_capability(ctrl_info); in pqi_ctrl_init()
7797 pqi_calculate_queue_resources(ctrl_info); in pqi_ctrl_init()
7799 rc = pqi_enable_msix_interrupts(ctrl_info); in pqi_ctrl_init()
7803 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { in pqi_ctrl_init()
7804 ctrl_info->max_msix_vectors = in pqi_ctrl_init()
7805 ctrl_info->num_msix_vectors_enabled; in pqi_ctrl_init()
7806 pqi_calculate_queue_resources(ctrl_info); in pqi_ctrl_init()
7809 rc = pqi_alloc_io_resources(ctrl_info); in pqi_ctrl_init()
7813 rc = pqi_alloc_operational_queues(ctrl_info); in pqi_ctrl_init()
7815 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7820 pqi_init_operational_queues(ctrl_info); in pqi_ctrl_init()
7822 rc = pqi_create_queues(ctrl_info); in pqi_ctrl_init()
7826 rc = pqi_request_irqs(ctrl_info); in pqi_ctrl_init()
7830 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); in pqi_ctrl_init()
7832 ctrl_info->controller_online = true; in pqi_ctrl_init()
7834 rc = pqi_process_config_table(ctrl_info); in pqi_ctrl_init()
7838 pqi_start_heartbeat_timer(ctrl_info); in pqi_ctrl_init()
7840 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { in pqi_ctrl_init()
7841 rc = pqi_get_advanced_raid_bypass_config(ctrl_info); in pqi_ctrl_init()
7843 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7847 ctrl_info->ciss_report_log_flags |= in pqi_ctrl_init()
7851 rc = pqi_enable_events(ctrl_info); in pqi_ctrl_init()
7853 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7859 rc = pqi_register_scsi(ctrl_info); in pqi_ctrl_init()
7863 rc = pqi_get_ctrl_product_details(ctrl_info); in pqi_ctrl_init()
7865 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7870 rc = pqi_get_ctrl_serial_number(ctrl_info); in pqi_ctrl_init()
7872 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7877 rc = pqi_set_diag_rescan(ctrl_info); in pqi_ctrl_init()
7879 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7884 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); in pqi_ctrl_init()
7886 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
7891 pqi_schedule_update_time_worker(ctrl_info); in pqi_ctrl_init()
7893 pqi_scan_scsi_devices(ctrl_info); in pqi_ctrl_init()
7898 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) in pqi_reinit_queues() argument
7904 admin_queues = &ctrl_info->admin_queues; in pqi_reinit_queues()
7909 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_reinit_queues()
7910 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; in pqi_reinit_queues()
7911 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; in pqi_reinit_queues()
7912 ctrl_info->queue_groups[i].oq_ci_copy = 0; in pqi_reinit_queues()
7914 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); in pqi_reinit_queues()
7915 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); in pqi_reinit_queues()
7916 writel(0, ctrl_info->queue_groups[i].oq_pi); in pqi_reinit_queues()
7919 event_queue = &ctrl_info->event_queue; in pqi_reinit_queues()
7924 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) in pqi_ctrl_init_resume() argument
7928 rc = pqi_force_sis_mode(ctrl_info); in pqi_ctrl_init_resume()
7936 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); in pqi_ctrl_init_resume()
7944 rc = sis_get_ctrl_properties(ctrl_info); in pqi_ctrl_init_resume()
7946 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7951 rc = sis_get_pqi_capabilities(ctrl_info); in pqi_ctrl_init_resume()
7953 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7963 rc = sis_init_base_struct_addr(ctrl_info); in pqi_ctrl_init_resume()
7965 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7971 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); in pqi_ctrl_init_resume()
7973 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7979 ctrl_info->pqi_mode_enabled = true; in pqi_ctrl_init_resume()
7980 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); in pqi_ctrl_init_resume()
7982 pqi_reinit_queues(ctrl_info); in pqi_ctrl_init_resume()
7984 rc = pqi_create_admin_queues(ctrl_info); in pqi_ctrl_init_resume()
7986 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
7991 rc = pqi_create_queues(ctrl_info); in pqi_ctrl_init_resume()
7995 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); in pqi_ctrl_init_resume()
7997 ctrl_info->controller_online = true; in pqi_ctrl_init_resume()
7998 pqi_ctrl_unblock_requests(ctrl_info); in pqi_ctrl_init_resume()
8000 pqi_ctrl_reset_config(ctrl_info); in pqi_ctrl_init_resume()
8002 rc = pqi_process_config_table(ctrl_info); in pqi_ctrl_init_resume()
8006 pqi_start_heartbeat_timer(ctrl_info); in pqi_ctrl_init_resume()
8008 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { in pqi_ctrl_init_resume()
8009 rc = pqi_get_advanced_raid_bypass_config(ctrl_info); in pqi_ctrl_init_resume()
8011 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8015 ctrl_info->ciss_report_log_flags |= in pqi_ctrl_init_resume()
8019 rc = pqi_enable_events(ctrl_info); in pqi_ctrl_init_resume()
8021 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8026 rc = pqi_get_ctrl_product_details(ctrl_info); in pqi_ctrl_init_resume()
8028 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8033 rc = pqi_set_diag_rescan(ctrl_info); in pqi_ctrl_init_resume()
8035 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8040 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); in pqi_ctrl_init_resume()
8042 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8047 if (pqi_ofa_in_progress(ctrl_info)) in pqi_ctrl_init_resume()
8048 pqi_ctrl_unblock_scan(ctrl_info); in pqi_ctrl_init_resume()
8050 pqi_scan_scsi_devices(ctrl_info); in pqi_ctrl_init_resume()
8065 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) in pqi_pci_init() argument
8070 rc = pci_enable_device(ctrl_info->pci_dev); in pqi_pci_init()
8072 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8082 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); in pqi_pci_init()
8084 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); in pqi_pci_init()
8088 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); in pqi_pci_init()
8090 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8095 ctrl_info->iomem_base = ioremap(pci_resource_start( in pqi_pci_init()
8096 ctrl_info->pci_dev, 0), in pqi_pci_init()
8098 if (!ctrl_info->iomem_base) { in pqi_pci_init()
8099 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8108 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, in pqi_pci_init()
8111 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8117 pci_set_master(ctrl_info->pci_dev); in pqi_pci_init()
8119 ctrl_info->registers = ctrl_info->iomem_base; in pqi_pci_init()
8120 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; in pqi_pci_init()
8122 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); in pqi_pci_init()
8127 pci_release_regions(ctrl_info->pci_dev); in pqi_pci_init()
8129 pci_disable_device(ctrl_info->pci_dev); in pqi_pci_init()
8134 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) in pqi_cleanup_pci_init() argument
8136 iounmap(ctrl_info->iomem_base); in pqi_cleanup_pci_init()
8137 pci_release_regions(ctrl_info->pci_dev); in pqi_cleanup_pci_init()
8138 if (pci_is_enabled(ctrl_info->pci_dev)) in pqi_cleanup_pci_init()
8139 pci_disable_device(ctrl_info->pci_dev); in pqi_cleanup_pci_init()
8140 pci_set_drvdata(ctrl_info->pci_dev, NULL); in pqi_cleanup_pci_init()
8145 struct pqi_ctrl_info *ctrl_info; in pqi_alloc_ctrl_info() local
8147 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), in pqi_alloc_ctrl_info()
8149 if (!ctrl_info) in pqi_alloc_ctrl_info()
8152 mutex_init(&ctrl_info->scan_mutex); in pqi_alloc_ctrl_info()
8153 mutex_init(&ctrl_info->lun_reset_mutex); in pqi_alloc_ctrl_info()
8154 mutex_init(&ctrl_info->ofa_mutex); in pqi_alloc_ctrl_info()
8156 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); in pqi_alloc_ctrl_info()
8157 spin_lock_init(&ctrl_info->scsi_device_list_lock); in pqi_alloc_ctrl_info()
8159 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); in pqi_alloc_ctrl_info()
8160 atomic_set(&ctrl_info->num_interrupts, 0); in pqi_alloc_ctrl_info()
8162 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); in pqi_alloc_ctrl_info()
8163 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); in pqi_alloc_ctrl_info()
8165 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); in pqi_alloc_ctrl_info()
8166 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); in pqi_alloc_ctrl_info()
8168 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker); in pqi_alloc_ctrl_info()
8169 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker); in pqi_alloc_ctrl_info()
8171 sema_init(&ctrl_info->sync_request_sem, in pqi_alloc_ctrl_info()
8173 init_waitqueue_head(&ctrl_info->block_requests_wait); in pqi_alloc_ctrl_info()
8175 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; in pqi_alloc_ctrl_info()
8176 ctrl_info->irq_mode = IRQ_MODE_NONE; in pqi_alloc_ctrl_info()
8177 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; in pqi_alloc_ctrl_info()
8179 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; in pqi_alloc_ctrl_info()
8180 ctrl_info->max_transfer_encrypted_sas_sata = in pqi_alloc_ctrl_info()
8182 ctrl_info->max_transfer_encrypted_nvme = in pqi_alloc_ctrl_info()
8184 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6; in pqi_alloc_ctrl_info()
8185 ctrl_info->max_write_raid_1_10_2drive = ~0; in pqi_alloc_ctrl_info()
8186 ctrl_info->max_write_raid_1_10_3drive = ~0; in pqi_alloc_ctrl_info()
8188 return ctrl_info; in pqi_alloc_ctrl_info()
8191 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) in pqi_free_ctrl_info() argument
8193 kfree(ctrl_info); in pqi_free_ctrl_info()
8196 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) in pqi_free_interrupts() argument
8198 pqi_free_irqs(ctrl_info); in pqi_free_interrupts()
8199 pqi_disable_msix_interrupts(ctrl_info); in pqi_free_interrupts()
8202 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) in pqi_free_ctrl_resources() argument
8204 pqi_stop_heartbeat_timer(ctrl_info); in pqi_free_ctrl_resources()
8205 pqi_free_interrupts(ctrl_info); in pqi_free_ctrl_resources()
8206 if (ctrl_info->queue_memory_base) in pqi_free_ctrl_resources()
8207 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
8208 ctrl_info->queue_memory_length, in pqi_free_ctrl_resources()
8209 ctrl_info->queue_memory_base, in pqi_free_ctrl_resources()
8210 ctrl_info->queue_memory_base_dma_handle); in pqi_free_ctrl_resources()
8211 if (ctrl_info->admin_queue_memory_base) in pqi_free_ctrl_resources()
8212 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
8213 ctrl_info->admin_queue_memory_length, in pqi_free_ctrl_resources()
8214 ctrl_info->admin_queue_memory_base, in pqi_free_ctrl_resources()
8215 ctrl_info->admin_queue_memory_base_dma_handle); in pqi_free_ctrl_resources()
8216 pqi_free_all_io_requests(ctrl_info); in pqi_free_ctrl_resources()
8217 if (ctrl_info->error_buffer) in pqi_free_ctrl_resources()
8218 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
8219 ctrl_info->error_buffer_length, in pqi_free_ctrl_resources()
8220 ctrl_info->error_buffer, in pqi_free_ctrl_resources()
8221 ctrl_info->error_buffer_dma_handle); in pqi_free_ctrl_resources()
8222 if (ctrl_info->iomem_base) in pqi_free_ctrl_resources()
8223 pqi_cleanup_pci_init(ctrl_info); in pqi_free_ctrl_resources()
8224 pqi_free_ctrl_info(ctrl_info); in pqi_free_ctrl_resources()
8227 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) in pqi_remove_ctrl() argument
8229 pqi_cancel_rescan_worker(ctrl_info); in pqi_remove_ctrl()
8230 pqi_cancel_update_time_worker(ctrl_info); in pqi_remove_ctrl()
8231 pqi_unregister_scsi(ctrl_info); in pqi_remove_ctrl()
8232 if (ctrl_info->pqi_mode_enabled) in pqi_remove_ctrl()
8233 pqi_revert_to_sis_mode(ctrl_info); in pqi_remove_ctrl()
8234 pqi_free_ctrl_resources(ctrl_info); in pqi_remove_ctrl()
8237 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_ctrl_quiesce() argument
8239 pqi_ctrl_block_scan(ctrl_info); in pqi_ofa_ctrl_quiesce()
8240 pqi_scsi_block_requests(ctrl_info); in pqi_ofa_ctrl_quiesce()
8241 pqi_ctrl_block_device_reset(ctrl_info); in pqi_ofa_ctrl_quiesce()
8242 pqi_ctrl_block_requests(ctrl_info); in pqi_ofa_ctrl_quiesce()
8243 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_ofa_ctrl_quiesce()
8244 pqi_stop_heartbeat_timer(ctrl_info); in pqi_ofa_ctrl_quiesce()
8247 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_ctrl_unquiesce() argument
8249 pqi_start_heartbeat_timer(ctrl_info); in pqi_ofa_ctrl_unquiesce()
8250 pqi_ctrl_unblock_requests(ctrl_info); in pqi_ofa_ctrl_unquiesce()
8251 pqi_ctrl_unblock_device_reset(ctrl_info); in pqi_ofa_ctrl_unquiesce()
8252 pqi_scsi_unblock_requests(ctrl_info); in pqi_ofa_ctrl_unquiesce()
8253 pqi_ctrl_unblock_scan(ctrl_info); in pqi_ofa_ctrl_unquiesce()
8256 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size) in pqi_ofa_alloc_mem() argument
8265 ofap = ctrl_info->pqi_ofa_mem_virt_addr; in pqi_ofa_alloc_mem()
8271 ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL); in pqi_ofa_alloc_mem()
8272 if (!ctrl_info->pqi_ofa_chunk_virt_addr) in pqi_ofa_alloc_mem()
8275 dev = &ctrl_info->pci_dev->dev; in pqi_ofa_alloc_mem()
8278 ctrl_info->pqi_ofa_chunk_virt_addr[i] = in pqi_ofa_alloc_mem()
8280 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) in pqi_ofa_alloc_mem()
8297 ctrl_info->pqi_ofa_chunk_virt_addr[i], in pqi_ofa_alloc_mem()
8300 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); in pqi_ofa_alloc_mem()
8306 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_alloc_host_buffer() argument
8312 if (ctrl_info->ofa_bytes_requested == 0) in pqi_ofa_alloc_host_buffer()
8315 total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested); in pqi_ofa_alloc_host_buffer()
8320 if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0) in pqi_ofa_alloc_host_buffer()
8329 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_setup_host_buffer() argument
8334 dev = &ctrl_info->pci_dev->dev; in pqi_ofa_setup_host_buffer()
8337 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL); in pqi_ofa_setup_host_buffer()
8341 ctrl_info->pqi_ofa_mem_virt_addr = ofap; in pqi_ofa_setup_host_buffer()
8343 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { in pqi_ofa_setup_host_buffer()
8346 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle); in pqi_ofa_setup_host_buffer()
8347 ctrl_info->pqi_ofa_mem_virt_addr = NULL; in pqi_ofa_setup_host_buffer()
8355 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_free_host_buffer() argument
8363 ofap = ctrl_info->pqi_ofa_mem_virt_addr; in pqi_ofa_free_host_buffer()
8367 dev = &ctrl_info->pci_dev->dev; in pqi_ofa_free_host_buffer()
8379 ctrl_info->pqi_ofa_chunk_virt_addr[i], in pqi_ofa_free_host_buffer()
8382 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); in pqi_ofa_free_host_buffer()
8386 ctrl_info->pqi_ofa_mem_dma_handle); in pqi_ofa_free_host_buffer()
8387 ctrl_info->pqi_ofa_mem_virt_addr = NULL; in pqi_ofa_free_host_buffer()
8390 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) in pqi_ofa_host_memory_update() argument
8404 ofap = ctrl_info->pqi_ofa_mem_virt_addr; in pqi_ofa_host_memory_update()
8411 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, in pqi_ofa_host_memory_update()
8417 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); in pqi_ofa_host_memory_update()
8420 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) in pqi_ofa_ctrl_restart() argument
8424 return pqi_ctrl_init_resume(ctrl_info); in pqi_ofa_ctrl_restart()
8432 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) in pqi_fail_all_outstanding_requests() argument
8438 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_fail_all_outstanding_requests()
8439 io_request = &ctrl_info->io_request_pool[i]; in pqi_fail_all_outstanding_requests()
8457 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) in pqi_take_ctrl_offline_deferred() argument
8460 pqi_stop_heartbeat_timer(ctrl_info); in pqi_take_ctrl_offline_deferred()
8461 pqi_free_interrupts(ctrl_info); in pqi_take_ctrl_offline_deferred()
8462 pqi_cancel_rescan_worker(ctrl_info); in pqi_take_ctrl_offline_deferred()
8463 pqi_cancel_update_time_worker(ctrl_info); in pqi_take_ctrl_offline_deferred()
8464 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_take_ctrl_offline_deferred()
8465 pqi_fail_all_outstanding_requests(ctrl_info); in pqi_take_ctrl_offline_deferred()
8466 pqi_ctrl_unblock_requests(ctrl_info); in pqi_take_ctrl_offline_deferred()
8471 struct pqi_ctrl_info *ctrl_info; in pqi_ctrl_offline_worker() local
8473 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); in pqi_ctrl_offline_worker()
8474 pqi_take_ctrl_offline_deferred(ctrl_info); in pqi_ctrl_offline_worker()
8477 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) in pqi_take_ctrl_offline() argument
8479 if (!ctrl_info->controller_online) in pqi_take_ctrl_offline()
8482 ctrl_info->controller_online = false; in pqi_take_ctrl_offline()
8483 ctrl_info->pqi_mode_enabled = false; in pqi_take_ctrl_offline()
8484 pqi_ctrl_block_requests(ctrl_info); in pqi_take_ctrl_offline()
8486 sis_shutdown_ctrl(ctrl_info); in pqi_take_ctrl_offline()
8487 pci_disable_device(ctrl_info->pci_dev); in pqi_take_ctrl_offline()
8488 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); in pqi_take_ctrl_offline()
8489 schedule_work(&ctrl_info->ctrl_offline_work); in pqi_take_ctrl_offline()
8510 struct pqi_ctrl_info *ctrl_info; in pqi_pci_probe() local
8534 ctrl_info = pqi_alloc_ctrl_info(node); in pqi_pci_probe()
8535 if (!ctrl_info) { in pqi_pci_probe()
8541 ctrl_info->pci_dev = pci_dev; in pqi_pci_probe()
8543 rc = pqi_pci_init(ctrl_info); in pqi_pci_probe()
8547 rc = pqi_ctrl_init(ctrl_info); in pqi_pci_probe()
8554 pqi_remove_ctrl(ctrl_info); in pqi_pci_probe()
8561 struct pqi_ctrl_info *ctrl_info; in pqi_pci_remove() local
8563 ctrl_info = pci_get_drvdata(pci_dev); in pqi_pci_remove()
8564 if (!ctrl_info) in pqi_pci_remove()
8567 pqi_remove_ctrl(ctrl_info); in pqi_pci_remove()
8570 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) in pqi_crash_if_pending_command() argument
8576 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_crash_if_pending_command()
8577 io_request = &ctrl_info->io_request_pool[i]; in pqi_crash_if_pending_command()
8589 struct pqi_ctrl_info *ctrl_info; in pqi_shutdown() local
8591 ctrl_info = pci_get_drvdata(pci_dev); in pqi_shutdown()
8592 if (!ctrl_info) { in pqi_shutdown()
8598 pqi_wait_until_ofa_finished(ctrl_info); in pqi_shutdown()
8600 pqi_scsi_block_requests(ctrl_info); in pqi_shutdown()
8601 pqi_ctrl_block_device_reset(ctrl_info); in pqi_shutdown()
8602 pqi_ctrl_block_requests(ctrl_info); in pqi_shutdown()
8603 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_shutdown()
8609 rc = pqi_flush_cache(ctrl_info, SHUTDOWN); in pqi_shutdown()
8614 pqi_crash_if_pending_command(ctrl_info); in pqi_shutdown()
8615 pqi_reset(ctrl_info); in pqi_shutdown()
8644 struct pqi_ctrl_info *ctrl_info; in pqi_suspend() local
8646 ctrl_info = pci_get_drvdata(pci_dev); in pqi_suspend()
8648 pqi_wait_until_ofa_finished(ctrl_info); in pqi_suspend()
8650 pqi_ctrl_block_scan(ctrl_info); in pqi_suspend()
8651 pqi_scsi_block_requests(ctrl_info); in pqi_suspend()
8652 pqi_ctrl_block_device_reset(ctrl_info); in pqi_suspend()
8653 pqi_ctrl_block_requests(ctrl_info); in pqi_suspend()
8654 pqi_ctrl_wait_until_quiesced(ctrl_info); in pqi_suspend()
8655 pqi_flush_cache(ctrl_info, SUSPEND); in pqi_suspend()
8656 pqi_stop_heartbeat_timer(ctrl_info); in pqi_suspend()
8658 pqi_crash_if_pending_command(ctrl_info); in pqi_suspend()
8666 ctrl_info->controller_online = false; in pqi_suspend()
8667 ctrl_info->pqi_mode_enabled = false; in pqi_suspend()
8675 struct pqi_ctrl_info *ctrl_info; in pqi_resume() local
8677 ctrl_info = pci_get_drvdata(pci_dev); in pqi_resume()
8680 ctrl_info->max_hw_queue_index = 0; in pqi_resume()
8681 pqi_free_interrupts(ctrl_info); in pqi_resume()
8682 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX); in pqi_resume()
8685 &ctrl_info->queue_groups[0]); in pqi_resume()
8687 dev_err(&ctrl_info->pci_dev->dev, in pqi_resume()
8692 pqi_ctrl_unblock_device_reset(ctrl_info); in pqi_resume()
8693 pqi_ctrl_unblock_requests(ctrl_info); in pqi_resume()
8694 pqi_scsi_unblock_requests(ctrl_info); in pqi_resume()
8695 pqi_ctrl_unblock_scan(ctrl_info); in pqi_resume()
8702 pqi_ctrl_unblock_device_reset(ctrl_info); in pqi_resume()
8703 pqi_ctrl_unblock_requests(ctrl_info); in pqi_resume()
8704 pqi_scsi_unblock_requests(ctrl_info); in pqi_resume()
8705 pqi_ctrl_unblock_scan(ctrl_info); in pqi_resume()
8707 return pqi_ctrl_init_resume(ctrl_info); in pqi_resume()