• Home
  • Raw
  • Download

Lines Matching refs:hba

114 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,  in ufshcd_dump_regs()  argument
132 regs[pos / 4] = ufshcd_readl(hba, offset + pos); in ufshcd_dump_regs()
237 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
239 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
241 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
242 static void ufshcd_hba_exit(struct ufs_hba *hba);
243 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
244 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
245 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
246 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
247 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
248 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
249 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
250 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
252 static int ufshcd_change_power_mode(struct ufs_hba *hba,
254 static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
255 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
256 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
257 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
259 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
260 static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
261 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
262 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
263 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
265 static inline int ufshcd_use_mcq_hooks(struct ufs_hba *hba) in ufshcd_use_mcq_hooks() argument
269 trace_android_vh_ufs_use_mcq_hooks(hba, &mcq_hooks); in ufshcd_use_mcq_hooks()
274 static inline void ufshcd_enable_irq(struct ufs_hba *hba) in ufshcd_enable_irq() argument
276 if (!hba->is_irq_enabled) { in ufshcd_enable_irq()
277 enable_irq(hba->irq); in ufshcd_enable_irq()
278 hba->is_irq_enabled = true; in ufshcd_enable_irq()
282 static inline void ufshcd_disable_irq(struct ufs_hba *hba) in ufshcd_disable_irq() argument
284 if (hba->is_irq_enabled) { in ufshcd_disable_irq()
285 disable_irq(hba->irq); in ufshcd_disable_irq()
286 hba->is_irq_enabled = false; in ufshcd_disable_irq()
290 static inline void ufshcd_wb_config(struct ufs_hba *hba) in ufshcd_wb_config() argument
292 if (!ufshcd_is_wb_allowed(hba)) in ufshcd_wb_config()
295 ufshcd_wb_toggle(hba, true); in ufshcd_wb_config()
297 ufshcd_wb_toggle_flush_during_h8(hba, true); in ufshcd_wb_config()
298 if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)) in ufshcd_wb_config()
299 ufshcd_wb_toggle_flush(hba, true); in ufshcd_wb_config()
302 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba) in ufshcd_scsi_unblock_requests() argument
304 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt)) in ufshcd_scsi_unblock_requests()
305 scsi_unblock_requests(hba->host); in ufshcd_scsi_unblock_requests()
308 static void ufshcd_scsi_block_requests(struct ufs_hba *hba) in ufshcd_scsi_block_requests() argument
310 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1) in ufshcd_scsi_block_requests()
311 scsi_block_requests(hba->host); in ufshcd_scsi_block_requests()
314 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, in ufshcd_add_cmd_upiu_trace() argument
317 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr; in ufshcd_add_cmd_upiu_trace()
326 header = &hba->lrb[tag].ucd_rsp_ptr->header; in ufshcd_add_cmd_upiu_trace()
328 trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb, in ufshcd_add_cmd_upiu_trace()
332 static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, in ufshcd_add_query_upiu_trace() argument
339 trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header, in ufshcd_add_query_upiu_trace()
343 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, in ufshcd_add_tm_upiu_trace() argument
346 struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag]; in ufshcd_add_tm_upiu_trace()
348 trace_android_vh_ufs_send_tm_command(hba, tag, (int)str_t); in ufshcd_add_tm_upiu_trace()
354 trace_ufshcd_upiu(dev_name(hba->dev), str_t, in ufshcd_add_tm_upiu_trace()
359 trace_ufshcd_upiu(dev_name(hba->dev), str_t, in ufshcd_add_tm_upiu_trace()
365 static void ufshcd_add_uic_command_trace(struct ufs_hba *hba, in ufshcd_add_uic_command_trace() argument
371 trace_android_vh_ufs_send_uic_command(hba, ucmd, (int)str_t); in ufshcd_add_uic_command_trace()
379 cmd = ufshcd_readl(hba, REG_UIC_COMMAND); in ufshcd_add_uic_command_trace()
381 trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd, in ufshcd_add_uic_command_trace()
382 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1), in ufshcd_add_uic_command_trace()
383 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2), in ufshcd_add_uic_command_trace()
384 ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3)); in ufshcd_add_uic_command_trace()
387 void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, in ufshcd_add_command_trace() argument
393 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; in ufshcd_add_command_trace()
402 ufshcd_add_cmd_upiu_trace(hba, tag, str_t); in ufshcd_add_command_trace()
425 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); in ufshcd_add_command_trace()
426 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_add_command_trace()
427 trace_ufshcd_command(dev_name(hba->dev), str_t, tag, in ufshcd_add_command_trace()
432 static void ufshcd_print_clk_freqs(struct ufs_hba *hba) in ufshcd_print_clk_freqs() argument
435 struct list_head *head = &hba->clk_list_head; in ufshcd_print_clk_freqs()
443 dev_err(hba->dev, "clk: %s, rate: %u\n", in ufshcd_print_clk_freqs()
448 static void ufshcd_print_evt(struct ufs_hba *hba, u32 id, in ufshcd_print_evt() argument
458 e = &hba->ufs_stats.event[id]; in ufshcd_print_evt()
465 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p, in ufshcd_print_evt()
471 dev_err(hba->dev, "No record of %s\n", err_name); in ufshcd_print_evt()
473 dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt); in ufshcd_print_evt()
476 static void ufshcd_print_evt_hist(struct ufs_hba *hba) in ufshcd_print_evt_hist() argument
478 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); in ufshcd_print_evt_hist()
480 ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err"); in ufshcd_print_evt_hist()
481 ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err"); in ufshcd_print_evt_hist()
482 ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err"); in ufshcd_print_evt_hist()
483 ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err"); in ufshcd_print_evt_hist()
484 ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err"); in ufshcd_print_evt_hist()
485 ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR, in ufshcd_print_evt_hist()
487 ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err"); in ufshcd_print_evt_hist()
488 ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL, in ufshcd_print_evt_hist()
490 ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail"); in ufshcd_print_evt_hist()
491 ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR, in ufshcd_print_evt_hist()
493 ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset"); in ufshcd_print_evt_hist()
494 ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset"); in ufshcd_print_evt_hist()
495 ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort"); in ufshcd_print_evt_hist()
497 ufshcd_vops_dbg_register_dump(hba); in ufshcd_print_evt_hist()
501 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt) in ufshcd_print_trs() argument
507 for_each_set_bit(tag, &bitmap, hba->nutrs) { in ufshcd_print_trs()
508 lrbp = &hba->lrb[tag]; in ufshcd_print_trs()
510 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", in ufshcd_print_trs()
512 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", in ufshcd_print_trs()
514 dev_err(hba->dev, in ufshcd_print_trs()
520 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag, in ufshcd_print_trs()
524 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag, in ufshcd_print_trs()
531 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) in ufshcd_print_trs()
532 prdt_length /= hba->sg_entry_size; in ufshcd_print_trs()
534 dev_err(hba->dev, in ufshcd_print_trs()
541 hba->sg_entry_size * prdt_length); in ufshcd_print_trs()
545 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) in ufshcd_print_tmrs() argument
549 for_each_set_bit(tag, &bitmap, hba->nutmrs) { in ufshcd_print_tmrs()
550 struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag]; in ufshcd_print_tmrs()
552 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag); in ufshcd_print_tmrs()
557 static void ufshcd_print_host_state(struct ufs_hba *hba) in ufshcd_print_host_state() argument
559 struct scsi_device *sdev_ufs = hba->sdev_ufs_device; in ufshcd_print_host_state()
561 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); in ufshcd_print_host_state()
562 dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", in ufshcd_print_host_state()
563 hba->outstanding_reqs, hba->outstanding_tasks); in ufshcd_print_host_state()
564 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", in ufshcd_print_host_state()
565 hba->saved_err, hba->saved_uic_err); in ufshcd_print_host_state()
566 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", in ufshcd_print_host_state()
567 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_print_host_state()
568 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n", in ufshcd_print_host_state()
569 hba->pm_op_in_progress, hba->is_sys_suspended); in ufshcd_print_host_state()
570 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n", in ufshcd_print_host_state()
571 hba->auto_bkops_enabled, hba->host->host_self_blocked); in ufshcd_print_host_state()
572 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state); in ufshcd_print_host_state()
573 dev_err(hba->dev, in ufshcd_print_host_state()
575 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp), in ufshcd_print_host_state()
576 hba->ufs_stats.hibern8_exit_cnt); in ufshcd_print_host_state()
577 dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n", in ufshcd_print_host_state()
578 ktime_to_us(hba->ufs_stats.last_intr_ts), in ufshcd_print_host_state()
579 hba->ufs_stats.last_intr_status); in ufshcd_print_host_state()
580 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n", in ufshcd_print_host_state()
581 hba->eh_flags, hba->req_abort_count); in ufshcd_print_host_state()
582 dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n", in ufshcd_print_host_state()
583 hba->ufs_version, hba->capabilities, hba->caps); in ufshcd_print_host_state()
584 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks, in ufshcd_print_host_state()
585 hba->dev_quirks); in ufshcd_print_host_state()
587 dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n", in ufshcd_print_host_state()
590 ufshcd_print_clk_freqs(hba); in ufshcd_print_host_state()
598 static void ufshcd_print_pwr_info(struct ufs_hba *hba) in ufshcd_print_pwr_info() argument
615 dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", in ufshcd_print_pwr_info()
617 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, in ufshcd_print_pwr_info()
618 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, in ufshcd_print_pwr_info()
619 names[hba->pwr_info.pwr_rx], in ufshcd_print_pwr_info()
620 names[hba->pwr_info.pwr_tx], in ufshcd_print_pwr_info()
621 hba->pwr_info.hs_rate); in ufshcd_print_pwr_info()
624 static void ufshcd_device_reset(struct ufs_hba *hba) in ufshcd_device_reset() argument
628 err = ufshcd_vops_device_reset(hba); in ufshcd_device_reset()
631 ufshcd_set_ufs_dev_active(hba); in ufshcd_device_reset()
632 if (ufshcd_is_wb_allowed(hba)) { in ufshcd_device_reset()
633 hba->dev_info.wb_enabled = false; in ufshcd_device_reset()
634 hba->dev_info.wb_buf_flush_enabled = false; in ufshcd_device_reset()
638 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err); in ufshcd_device_reset()
665 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, in ufshcd_wait_for_register() argument
675 while ((ufshcd_readl(hba, reg) & mask) != val) { in ufshcd_wait_for_register()
678 if ((ufshcd_readl(hba, reg) & mask) != val) in ufshcd_wait_for_register()
693 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) in ufshcd_get_intr_mask() argument
695 if (hba->ufs_version == ufshci_version(1, 0)) in ufshcd_get_intr_mask()
697 if (hba->ufs_version <= ufshci_version(2, 0)) in ufshcd_get_intr_mask()
709 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) in ufshcd_get_ufs_version() argument
713 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) in ufshcd_get_ufs_version()
714 ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba); in ufshcd_get_ufs_version()
716 ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION); in ufshcd_get_ufs_version()
736 static inline bool ufshcd_is_device_present(struct ufs_hba *hba) in ufshcd_is_device_present() argument
738 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & in ufshcd_is_device_present()
759 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) in ufshcd_utrl_clear() argument
761 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) in ufshcd_utrl_clear()
762 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); in ufshcd_utrl_clear()
764 ufshcd_writel(hba, ~(1 << pos), in ufshcd_utrl_clear()
773 static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos) in ufshcd_utmrl_clear() argument
775 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) in ufshcd_utmrl_clear()
776 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); in ufshcd_utmrl_clear()
778 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); in ufshcd_utmrl_clear()
799 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) in ufshcd_get_uic_cmd_result() argument
801 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & in ufshcd_get_uic_cmd_result()
812 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) in ufshcd_get_dme_attr_val() argument
814 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); in ufshcd_get_dme_attr_val()
874 ufshcd_reset_intr_aggr(struct ufs_hba *hba) in ufshcd_reset_intr_aggr() argument
876 ufshcd_writel(hba, INT_AGGR_ENABLE | in ufshcd_reset_intr_aggr()
888 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) in ufshcd_config_intr_aggr() argument
890 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | in ufshcd_config_intr_aggr()
900 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) in ufshcd_disable_intr_aggr() argument
902 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); in ufshcd_disable_intr_aggr()
911 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) in ufshcd_enable_run_stop_reg() argument
913 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, in ufshcd_enable_run_stop_reg()
915 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, in ufshcd_enable_run_stop_reg()
923 static inline void ufshcd_hba_start(struct ufs_hba *hba) in ufshcd_hba_start() argument
927 if (ufshcd_crypto_enable(hba)) in ufshcd_hba_start()
930 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE); in ufshcd_hba_start()
939 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba) in ufshcd_is_hba_active() argument
941 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE) in ufshcd_is_hba_active()
945 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba) in ufshcd_get_local_unipro_ver() argument
948 if (hba->ufs_version <= ufshci_version(1, 1)) in ufshcd_get_local_unipro_ver()
955 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba) in ufshcd_is_unipro_pa_params_tuning_req() argument
966 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6) in ufshcd_is_unipro_pa_params_tuning_req()
980 static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up) in ufshcd_set_clk_freq() argument
984 struct list_head *head = &hba->clk_list_head; in ufshcd_set_clk_freq()
997 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_set_clk_freq()
1002 trace_ufshcd_clk_scaling(dev_name(hba->dev), in ufshcd_set_clk_freq()
1015 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_set_clk_freq()
1020 trace_ufshcd_clk_scaling(dev_name(hba->dev), in ufshcd_set_clk_freq()
1027 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, in ufshcd_set_clk_freq()
1043 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) in ufshcd_scale_clks() argument
1048 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); in ufshcd_scale_clks()
1052 ret = ufshcd_set_clk_freq(hba, scale_up); in ufshcd_scale_clks()
1056 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); in ufshcd_scale_clks()
1058 ufshcd_set_clk_freq(hba, !scale_up); in ufshcd_scale_clks()
1061 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), in ufshcd_scale_clks()
1074 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, in ufshcd_is_devfreq_scaling_required() argument
1078 struct list_head *head = &hba->clk_list_head; in ufshcd_is_devfreq_scaling_required()
1100 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, in ufshcd_wait_for_doorbell_clr() argument
1111 ufshcd_hold(hba, false); in ufshcd_wait_for_doorbell_clr()
1112 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_doorbell_clr()
1119 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { in ufshcd_wait_for_doorbell_clr()
1124 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); in ufshcd_wait_for_doorbell_clr()
1125 if (ufshcd_use_mcq_hooks(hba)) { in ufshcd_wait_for_doorbell_clr()
1126 trace_android_vh_ufs_mcq_has_oustanding_reqs(hba, &has_outstanding); in ufshcd_wait_for_doorbell_clr()
1129 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_wait_for_doorbell_clr()
1138 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_doorbell_clr()
1150 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_doorbell_clr()
1154 dev_err(hba->dev, in ufshcd_wait_for_doorbell_clr()
1160 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_doorbell_clr()
1161 ufshcd_release(hba); in ufshcd_wait_for_doorbell_clr()
1174 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up) in ufshcd_scale_gear() argument
1180 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info, in ufshcd_scale_gear()
1183 memcpy(&new_pwr_info, &hba->pwr_info, in ufshcd_scale_gear()
1186 if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear || in ufshcd_scale_gear()
1187 hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) { in ufshcd_scale_gear()
1189 memcpy(&hba->clk_scaling.saved_pwr_info.info, in ufshcd_scale_gear()
1190 &hba->pwr_info, in ufshcd_scale_gear()
1194 new_pwr_info.gear_tx = hba->clk_scaling.min_gear; in ufshcd_scale_gear()
1195 new_pwr_info.gear_rx = hba->clk_scaling.min_gear; in ufshcd_scale_gear()
1200 ret = ufshcd_config_pwr_mode(hba, &new_pwr_info); in ufshcd_scale_gear()
1202 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)", in ufshcd_scale_gear()
1204 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx, in ufshcd_scale_gear()
1210 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba) in ufshcd_clock_scaling_prepare() argument
1218 ufshcd_scsi_block_requests(hba); in ufshcd_clock_scaling_prepare()
1220 down_write(&hba->clk_scaling_lock); in ufshcd_clock_scaling_prepare()
1222 if (!hba->clk_scaling.is_allowed || in ufshcd_clock_scaling_prepare()
1223 ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) { in ufshcd_clock_scaling_prepare()
1225 up_write(&hba->clk_scaling_lock); in ufshcd_clock_scaling_prepare()
1227 ufshcd_scsi_unblock_requests(hba); in ufshcd_clock_scaling_prepare()
1232 ufshcd_hold(hba, false); in ufshcd_clock_scaling_prepare()
1238 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up) in ufshcd_clock_scaling_unprepare() argument
1240 up_write(&hba->clk_scaling_lock); in ufshcd_clock_scaling_unprepare()
1243 if (ufshcd_enable_wb_if_scaling_up(hba) && !err) in ufshcd_clock_scaling_unprepare()
1244 ufshcd_wb_toggle(hba, scale_up); in ufshcd_clock_scaling_unprepare()
1248 ufshcd_scsi_unblock_requests(hba); in ufshcd_clock_scaling_unprepare()
1249 ufshcd_release(hba); in ufshcd_clock_scaling_unprepare()
1261 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) in ufshcd_devfreq_scale() argument
1265 ret = ufshcd_clock_scaling_prepare(hba); in ufshcd_devfreq_scale()
1271 ret = ufshcd_scale_gear(hba, false); in ufshcd_devfreq_scale()
1276 ret = ufshcd_scale_clks(hba, scale_up); in ufshcd_devfreq_scale()
1279 ufshcd_scale_gear(hba, true); in ufshcd_devfreq_scale()
1285 ret = ufshcd_scale_gear(hba, true); in ufshcd_devfreq_scale()
1287 ufshcd_scale_clks(hba, false); in ufshcd_devfreq_scale()
1293 ufshcd_clock_scaling_unprepare(hba, ret, scale_up); in ufshcd_devfreq_scale()
1299 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_clk_scaling_suspend_work() local
1303 spin_lock_irqsave(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_suspend_work()
1304 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) { in ufshcd_clk_scaling_suspend_work()
1305 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_suspend_work()
1308 hba->clk_scaling.is_suspended = true; in ufshcd_clk_scaling_suspend_work()
1309 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_suspend_work()
1311 __ufshcd_suspend_clkscaling(hba); in ufshcd_clk_scaling_suspend_work()
1316 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_clk_scaling_resume_work() local
1320 spin_lock_irqsave(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_resume_work()
1321 if (!hba->clk_scaling.is_suspended) { in ufshcd_clk_scaling_resume_work()
1322 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_resume_work()
1325 hba->clk_scaling.is_suspended = false; in ufshcd_clk_scaling_resume_work()
1326 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_clk_scaling_resume_work()
1328 devfreq_resume_device(hba->devfreq); in ufshcd_clk_scaling_resume_work()
1335 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_devfreq_target() local
1338 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_target()
1344 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_devfreq_target()
1347 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list); in ufshcd_devfreq_target()
1350 spin_lock_irqsave(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1351 if (ufshcd_eh_in_progress(hba)) { in ufshcd_devfreq_target()
1352 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1356 if (!hba->clk_scaling.active_reqs) in ufshcd_devfreq_target()
1360 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1369 trace_android_vh_ufs_clock_scaling(hba, &force_out, &force_scaling, &scale_up); in ufshcd_devfreq_target()
1372 if (force_out || (!force_scaling && !ufshcd_is_devfreq_scaling_required(hba, scale_up))) { in ufshcd_devfreq_target()
1373 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1377 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); in ufshcd_devfreq_target()
1380 ret = ufshcd_devfreq_scale(hba, scale_up); in ufshcd_devfreq_target()
1382 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), in ufshcd_devfreq_target()
1388 queue_work(hba->clk_scaling.workq, in ufshcd_devfreq_target()
1389 &hba->clk_scaling.suspend_work); in ufshcd_devfreq_target()
1404 static bool ufshcd_any_tag_in_use(struct ufs_hba *hba) in ufshcd_any_tag_in_use() argument
1406 struct request_queue *q = hba->cmd_queue; in ufshcd_any_tag_in_use()
1416 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_devfreq_get_dev_status() local
1417 struct ufs_clk_scaling *scaling = &hba->clk_scaling; in ufshcd_devfreq_get_dev_status()
1419 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_get_dev_status()
1424 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_devfreq_get_dev_status()
1429 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_devfreq_get_dev_status()
1451 has_outstanding = hba->outstanding_reqs != 0; in ufshcd_devfreq_get_dev_status()
1452 trace_android_vh_ufs_mcq_has_oustanding_reqs(hba, &has_outstanding); in ufshcd_devfreq_get_dev_status()
1461 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_devfreq_get_dev_status()
1465 static int ufshcd_devfreq_init(struct ufs_hba *hba) in ufshcd_devfreq_init() argument
1467 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_init()
1477 dev_pm_opp_add(hba->dev, clki->min_freq, 0); in ufshcd_devfreq_init()
1478 dev_pm_opp_add(hba->dev, clki->max_freq, 0); in ufshcd_devfreq_init()
1480 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile, in ufshcd_devfreq_init()
1481 &hba->vps->ondemand_data); in ufshcd_devfreq_init()
1482 devfreq = devfreq_add_device(hba->dev, in ufshcd_devfreq_init()
1483 &hba->vps->devfreq_profile, in ufshcd_devfreq_init()
1485 &hba->vps->ondemand_data); in ufshcd_devfreq_init()
1488 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret); in ufshcd_devfreq_init()
1490 dev_pm_opp_remove(hba->dev, clki->min_freq); in ufshcd_devfreq_init()
1491 dev_pm_opp_remove(hba->dev, clki->max_freq); in ufshcd_devfreq_init()
1495 hba->devfreq = devfreq; in ufshcd_devfreq_init()
1500 static void ufshcd_devfreq_remove(struct ufs_hba *hba) in ufshcd_devfreq_remove() argument
1502 struct list_head *clk_list = &hba->clk_list_head; in ufshcd_devfreq_remove()
1505 if (!hba->devfreq) in ufshcd_devfreq_remove()
1508 devfreq_remove_device(hba->devfreq); in ufshcd_devfreq_remove()
1509 hba->devfreq = NULL; in ufshcd_devfreq_remove()
1512 dev_pm_opp_remove(hba->dev, clki->min_freq); in ufshcd_devfreq_remove()
1513 dev_pm_opp_remove(hba->dev, clki->max_freq); in ufshcd_devfreq_remove()
1516 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba) in __ufshcd_suspend_clkscaling() argument
1520 devfreq_suspend_device(hba->devfreq); in __ufshcd_suspend_clkscaling()
1521 spin_lock_irqsave(hba->host->host_lock, flags); in __ufshcd_suspend_clkscaling()
1522 hba->clk_scaling.window_start_t = 0; in __ufshcd_suspend_clkscaling()
1523 spin_unlock_irqrestore(hba->host->host_lock, flags); in __ufshcd_suspend_clkscaling()
1526 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) in ufshcd_suspend_clkscaling() argument
1531 cancel_work_sync(&hba->clk_scaling.suspend_work); in ufshcd_suspend_clkscaling()
1532 cancel_work_sync(&hba->clk_scaling.resume_work); in ufshcd_suspend_clkscaling()
1534 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_suspend_clkscaling()
1535 if (!hba->clk_scaling.is_suspended) { in ufshcd_suspend_clkscaling()
1537 hba->clk_scaling.is_suspended = true; in ufshcd_suspend_clkscaling()
1539 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_suspend_clkscaling()
1542 __ufshcd_suspend_clkscaling(hba); in ufshcd_suspend_clkscaling()
1545 static void ufshcd_resume_clkscaling(struct ufs_hba *hba) in ufshcd_resume_clkscaling() argument
1550 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_resume_clkscaling()
1551 if (hba->clk_scaling.is_suspended) { in ufshcd_resume_clkscaling()
1553 hba->clk_scaling.is_suspended = false; in ufshcd_resume_clkscaling()
1555 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_resume_clkscaling()
1558 devfreq_resume_device(hba->devfreq); in ufshcd_resume_clkscaling()
1564 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkscale_enable_show() local
1566 return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled); in ufshcd_clkscale_enable_show()
1572 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkscale_enable_store() local
1579 down(&hba->host_sem); in ufshcd_clkscale_enable_store()
1580 if (!ufshcd_is_user_access_allowed(hba)) { in ufshcd_clkscale_enable_store()
1586 if (value == hba->clk_scaling.is_enabled) in ufshcd_clkscale_enable_store()
1589 ufshcd_rpm_get_sync(hba); in ufshcd_clkscale_enable_store()
1590 ufshcd_hold(hba, false); in ufshcd_clkscale_enable_store()
1592 hba->clk_scaling.is_enabled = value; in ufshcd_clkscale_enable_store()
1595 ufshcd_resume_clkscaling(hba); in ufshcd_clkscale_enable_store()
1597 ufshcd_suspend_clkscaling(hba); in ufshcd_clkscale_enable_store()
1598 err = ufshcd_devfreq_scale(hba, true); in ufshcd_clkscale_enable_store()
1600 dev_err(hba->dev, "%s: failed to scale clocks up %d\n", in ufshcd_clkscale_enable_store()
1604 ufshcd_release(hba); in ufshcd_clkscale_enable_store()
1605 ufshcd_rpm_put_sync(hba); in ufshcd_clkscale_enable_store()
1607 up(&hba->host_sem); in ufshcd_clkscale_enable_store()
1611 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba) in ufshcd_init_clk_scaling_sysfs() argument
1613 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show; in ufshcd_init_clk_scaling_sysfs()
1614 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store; in ufshcd_init_clk_scaling_sysfs()
1615 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr); in ufshcd_init_clk_scaling_sysfs()
1616 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable"; in ufshcd_init_clk_scaling_sysfs()
1617 hba->clk_scaling.enable_attr.attr.mode = 0644; in ufshcd_init_clk_scaling_sysfs()
1618 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr)) in ufshcd_init_clk_scaling_sysfs()
1619 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n"); in ufshcd_init_clk_scaling_sysfs()
1622 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba) in ufshcd_remove_clk_scaling_sysfs() argument
1624 if (hba->clk_scaling.enable_attr.attr.name) in ufshcd_remove_clk_scaling_sysfs()
1625 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr); in ufshcd_remove_clk_scaling_sysfs()
1628 static void ufshcd_init_clk_scaling(struct ufs_hba *hba) in ufshcd_init_clk_scaling() argument
1632 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_init_clk_scaling()
1635 if (!hba->clk_scaling.min_gear) in ufshcd_init_clk_scaling()
1636 hba->clk_scaling.min_gear = UFS_HS_G1; in ufshcd_init_clk_scaling()
1638 INIT_WORK(&hba->clk_scaling.suspend_work, in ufshcd_init_clk_scaling()
1640 INIT_WORK(&hba->clk_scaling.resume_work, in ufshcd_init_clk_scaling()
1644 hba->host->host_no); in ufshcd_init_clk_scaling()
1645 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); in ufshcd_init_clk_scaling()
1647 hba->clk_scaling.is_initialized = true; in ufshcd_init_clk_scaling()
1650 static void ufshcd_exit_clk_scaling(struct ufs_hba *hba) in ufshcd_exit_clk_scaling() argument
1652 if (!hba->clk_scaling.is_initialized) in ufshcd_exit_clk_scaling()
1655 ufshcd_remove_clk_scaling_sysfs(hba); in ufshcd_exit_clk_scaling()
1656 destroy_workqueue(hba->clk_scaling.workq); in ufshcd_exit_clk_scaling()
1657 ufshcd_devfreq_remove(hba); in ufshcd_exit_clk_scaling()
1658 hba->clk_scaling.is_initialized = false; in ufshcd_exit_clk_scaling()
1665 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_ungate_work() local
1668 cancel_delayed_work_sync(&hba->clk_gating.gate_work); in ufshcd_ungate_work()
1670 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_ungate_work()
1671 if (hba->clk_gating.state == CLKS_ON) { in ufshcd_ungate_work()
1672 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_ungate_work()
1676 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_ungate_work()
1677 ufshcd_hba_vreg_set_hpm(hba); in ufshcd_ungate_work()
1678 ufshcd_setup_clocks(hba, true); in ufshcd_ungate_work()
1680 ufshcd_enable_irq(hba); in ufshcd_ungate_work()
1683 if (ufshcd_can_hibern8_during_gating(hba)) { in ufshcd_ungate_work()
1685 hba->clk_gating.is_suspended = true; in ufshcd_ungate_work()
1686 if (ufshcd_is_link_hibern8(hba)) { in ufshcd_ungate_work()
1687 ret = ufshcd_uic_hibern8_exit(hba); in ufshcd_ungate_work()
1689 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", in ufshcd_ungate_work()
1692 ufshcd_set_link_active(hba); in ufshcd_ungate_work()
1694 hba->clk_gating.is_suspended = false; in ufshcd_ungate_work()
1697 ufshcd_scsi_unblock_requests(hba); in ufshcd_ungate_work()
1706 int ufshcd_hold(struct ufs_hba *hba, bool async) in ufshcd_hold() argument
1712 if (!ufshcd_is_clkgating_allowed(hba) || in ufshcd_hold()
1713 !hba->clk_gating.is_initialized) in ufshcd_hold()
1715 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hold()
1716 hba->clk_gating.active_reqs++; in ufshcd_hold()
1719 switch (hba->clk_gating.state) { in ufshcd_hold()
1729 if (ufshcd_can_hibern8_during_gating(hba) && in ufshcd_hold()
1730 ufshcd_is_link_hibern8(hba)) { in ufshcd_hold()
1733 hba->clk_gating.active_reqs--; in ufshcd_hold()
1736 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hold()
1737 flush_result = flush_work(&hba->clk_gating.ungate_work); in ufshcd_hold()
1738 if (hba->clk_gating.is_suspended && !flush_result) in ufshcd_hold()
1740 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hold()
1745 if (cancel_delayed_work(&hba->clk_gating.gate_work)) { in ufshcd_hold()
1746 hba->clk_gating.state = CLKS_ON; in ufshcd_hold()
1747 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_hold()
1748 hba->clk_gating.state); in ufshcd_hold()
1758 hba->clk_gating.state = REQ_CLKS_ON; in ufshcd_hold()
1759 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_hold()
1760 hba->clk_gating.state); in ufshcd_hold()
1761 if (queue_work(hba->clk_gating.clk_gating_workq, in ufshcd_hold()
1762 &hba->clk_gating.ungate_work)) in ufshcd_hold()
1763 ufshcd_scsi_block_requests(hba); in ufshcd_hold()
1772 hba->clk_gating.active_reqs--; in ufshcd_hold()
1776 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hold()
1777 flush_work(&hba->clk_gating.ungate_work); in ufshcd_hold()
1779 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hold()
1782 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", in ufshcd_hold()
1783 __func__, hba->clk_gating.state); in ufshcd_hold()
1786 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hold()
1794 struct ufs_hba *hba = container_of(work, struct ufs_hba, in ufshcd_gate_work() local
1799 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_gate_work()
1806 if (hba->clk_gating.is_suspended || in ufshcd_gate_work()
1807 (hba->clk_gating.state != REQ_CLKS_OFF)) { in ufshcd_gate_work()
1808 hba->clk_gating.state = CLKS_ON; in ufshcd_gate_work()
1809 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_gate_work()
1810 hba->clk_gating.state); in ufshcd_gate_work()
1814 if (hba->clk_gating.active_reqs in ufshcd_gate_work()
1815 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL in ufshcd_gate_work()
1816 || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks in ufshcd_gate_work()
1817 || hba->active_uic_cmd || hba->uic_async_done) in ufshcd_gate_work()
1820 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_gate_work()
1823 if (ufshcd_can_hibern8_during_gating(hba)) { in ufshcd_gate_work()
1824 ret = ufshcd_uic_hibern8_enter(hba); in ufshcd_gate_work()
1826 hba->clk_gating.state = CLKS_ON; in ufshcd_gate_work()
1827 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", in ufshcd_gate_work()
1829 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_gate_work()
1830 hba->clk_gating.state); in ufshcd_gate_work()
1833 ufshcd_set_link_hibern8(hba); in ufshcd_gate_work()
1836 ufshcd_disable_irq(hba); in ufshcd_gate_work()
1838 ufshcd_setup_clocks(hba, false); in ufshcd_gate_work()
1841 ufshcd_hba_vreg_set_lpm(hba); in ufshcd_gate_work()
1851 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_gate_work()
1852 if (hba->clk_gating.state == REQ_CLKS_OFF) { in ufshcd_gate_work()
1853 hba->clk_gating.state = CLKS_OFF; in ufshcd_gate_work()
1854 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_gate_work()
1855 hba->clk_gating.state); in ufshcd_gate_work()
1858 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_gate_work()
1864 static void __ufshcd_release(struct ufs_hba *hba) in __ufshcd_release() argument
1866 if (!ufshcd_is_clkgating_allowed(hba)) in __ufshcd_release()
1869 hba->clk_gating.active_reqs--; in __ufshcd_release()
1871 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || in __ufshcd_release()
1872 hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL || in __ufshcd_release()
1873 hba->outstanding_tasks || !hba->clk_gating.is_initialized || in __ufshcd_release()
1874 hba->active_uic_cmd || hba->uic_async_done || in __ufshcd_release()
1875 hba->clk_gating.state == CLKS_OFF) in __ufshcd_release()
1878 hba->clk_gating.state = REQ_CLKS_OFF; in __ufshcd_release()
1879 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); in __ufshcd_release()
1880 queue_delayed_work(hba->clk_gating.clk_gating_workq, in __ufshcd_release()
1881 &hba->clk_gating.gate_work, in __ufshcd_release()
1882 msecs_to_jiffies(hba->clk_gating.delay_ms)); in __ufshcd_release()
1885 void ufshcd_release(struct ufs_hba *hba) in ufshcd_release() argument
1889 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_release()
1890 __ufshcd_release(hba); in ufshcd_release()
1891 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_release()
1898 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_delay_show() local
1900 return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms); in ufshcd_clkgate_delay_show()
1906 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_delay_store() local
1912 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clkgate_delay_store()
1913 hba->clk_gating.delay_ms = value; in ufshcd_clkgate_delay_store()
1914 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clkgate_delay_store()
1921 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_enable_show() local
1923 return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled); in ufshcd_clkgate_enable_show()
1929 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_clkgate_enable_store() local
1938 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clkgate_enable_store()
1939 if (value == hba->clk_gating.is_enabled) in ufshcd_clkgate_enable_store()
1943 __ufshcd_release(hba); in ufshcd_clkgate_enable_store()
1945 hba->clk_gating.active_reqs++; in ufshcd_clkgate_enable_store()
1947 hba->clk_gating.is_enabled = value; in ufshcd_clkgate_enable_store()
1949 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clkgate_enable_store()
1953 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba) in ufshcd_init_clk_gating_sysfs() argument
1955 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; in ufshcd_init_clk_gating_sysfs()
1956 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; in ufshcd_init_clk_gating_sysfs()
1957 sysfs_attr_init(&hba->clk_gating.delay_attr.attr); in ufshcd_init_clk_gating_sysfs()
1958 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; in ufshcd_init_clk_gating_sysfs()
1959 hba->clk_gating.delay_attr.attr.mode = 0644; in ufshcd_init_clk_gating_sysfs()
1960 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) in ufshcd_init_clk_gating_sysfs()
1961 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); in ufshcd_init_clk_gating_sysfs()
1963 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show; in ufshcd_init_clk_gating_sysfs()
1964 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store; in ufshcd_init_clk_gating_sysfs()
1965 sysfs_attr_init(&hba->clk_gating.enable_attr.attr); in ufshcd_init_clk_gating_sysfs()
1966 hba->clk_gating.enable_attr.attr.name = "clkgate_enable"; in ufshcd_init_clk_gating_sysfs()
1967 hba->clk_gating.enable_attr.attr.mode = 0644; in ufshcd_init_clk_gating_sysfs()
1968 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr)) in ufshcd_init_clk_gating_sysfs()
1969 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n"); in ufshcd_init_clk_gating_sysfs()
1972 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba) in ufshcd_remove_clk_gating_sysfs() argument
1974 if (hba->clk_gating.delay_attr.attr.name) in ufshcd_remove_clk_gating_sysfs()
1975 device_remove_file(hba->dev, &hba->clk_gating.delay_attr); in ufshcd_remove_clk_gating_sysfs()
1976 if (hba->clk_gating.enable_attr.attr.name) in ufshcd_remove_clk_gating_sysfs()
1977 device_remove_file(hba->dev, &hba->clk_gating.enable_attr); in ufshcd_remove_clk_gating_sysfs()
1980 static void ufshcd_init_clk_gating(struct ufs_hba *hba) in ufshcd_init_clk_gating() argument
1984 if (!ufshcd_is_clkgating_allowed(hba)) in ufshcd_init_clk_gating()
1987 hba->clk_gating.state = CLKS_ON; in ufshcd_init_clk_gating()
1989 hba->clk_gating.delay_ms = 150; in ufshcd_init_clk_gating()
1990 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); in ufshcd_init_clk_gating()
1991 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); in ufshcd_init_clk_gating()
1994 hba->host->host_no); in ufshcd_init_clk_gating()
1995 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name, in ufshcd_init_clk_gating()
1998 ufshcd_init_clk_gating_sysfs(hba); in ufshcd_init_clk_gating()
2000 hba->clk_gating.is_enabled = true; in ufshcd_init_clk_gating()
2001 hba->clk_gating.is_initialized = true; in ufshcd_init_clk_gating()
2004 static void ufshcd_exit_clk_gating(struct ufs_hba *hba) in ufshcd_exit_clk_gating() argument
2006 if (!hba->clk_gating.is_initialized) in ufshcd_exit_clk_gating()
2009 ufshcd_remove_clk_gating_sysfs(hba); in ufshcd_exit_clk_gating()
2012 ufshcd_hold(hba, false); in ufshcd_exit_clk_gating()
2013 hba->clk_gating.is_initialized = false; in ufshcd_exit_clk_gating()
2014 ufshcd_release(hba); in ufshcd_exit_clk_gating()
2016 destroy_workqueue(hba->clk_gating.clk_gating_workq); in ufshcd_exit_clk_gating()
2020 void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) in ufshcd_clk_scaling_start_busy() argument
2026 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_clk_scaling_start_busy()
2029 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clk_scaling_start_busy()
2030 if (!hba->clk_scaling.active_reqs++) in ufshcd_clk_scaling_start_busy()
2033 if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) { in ufshcd_clk_scaling_start_busy()
2034 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clk_scaling_start_busy()
2039 queue_work(hba->clk_scaling.workq, in ufshcd_clk_scaling_start_busy()
2040 &hba->clk_scaling.resume_work); in ufshcd_clk_scaling_start_busy()
2042 if (!hba->clk_scaling.window_start_t) { in ufshcd_clk_scaling_start_busy()
2043 hba->clk_scaling.window_start_t = curr_t; in ufshcd_clk_scaling_start_busy()
2044 hba->clk_scaling.tot_busy_t = 0; in ufshcd_clk_scaling_start_busy()
2045 hba->clk_scaling.is_busy_started = false; in ufshcd_clk_scaling_start_busy()
2048 if (!hba->clk_scaling.is_busy_started) { in ufshcd_clk_scaling_start_busy()
2049 hba->clk_scaling.busy_start_t = curr_t; in ufshcd_clk_scaling_start_busy()
2050 hba->clk_scaling.is_busy_started = true; in ufshcd_clk_scaling_start_busy()
2052 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clk_scaling_start_busy()
2056 void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) in ufshcd_clk_scaling_update_busy() argument
2058 struct ufs_clk_scaling *scaling = &hba->clk_scaling; in ufshcd_clk_scaling_update_busy()
2062 if (!ufshcd_is_clkscaling_supported(hba)) in ufshcd_clk_scaling_update_busy()
2065 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clk_scaling_update_busy()
2066 hba->clk_scaling.active_reqs--; in ufshcd_clk_scaling_update_busy()
2067 has_outstanding = hba->outstanding_reqs != 0; in ufshcd_clk_scaling_update_busy()
2068 trace_android_vh_ufs_mcq_has_oustanding_reqs(hba, &has_outstanding); in ufshcd_clk_scaling_update_busy()
2075 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clk_scaling_update_busy()
2089 static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba, in ufshcd_should_inform_monitor() argument
2092 struct ufs_hba_monitor *m = &hba->monitor; in ufshcd_should_inform_monitor()
2096 ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp)); in ufshcd_should_inform_monitor()
2099 static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_start_monitor() argument
2104 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_start_monitor()
2105 if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0) in ufshcd_start_monitor()
2106 hba->monitor.busy_start_ts[dir] = ktime_get(); in ufshcd_start_monitor()
2107 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_start_monitor()
2110 static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_update_monitor() argument
2115 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_update_monitor()
2116 if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) { in ufshcd_update_monitor()
2118 struct ufs_hba_monitor *m = &hba->monitor; in ufshcd_update_monitor()
2139 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_update_monitor()
2148 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) in ufshcd_send_command() argument
2150 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; in ufshcd_send_command()
2153 if (ufshcd_use_mcq_hooks(hba)) { in ufshcd_send_command()
2154 trace_android_vh_ufs_mcq_send_command(hba, task_tag); in ufshcd_send_command()
2160 trace_android_vh_ufs_send_command(hba, lrbp); in ufshcd_send_command()
2161 ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND); in ufshcd_send_command()
2162 ufshcd_clk_scaling_start_busy(hba); in ufshcd_send_command()
2163 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) in ufshcd_send_command()
2164 ufshcd_start_monitor(hba, lrbp); in ufshcd_send_command()
2166 spin_lock_irqsave(&hba->outstanding_lock, flags); in ufshcd_send_command()
2167 if (hba->vops && hba->vops->setup_xfer_req) in ufshcd_send_command()
2168 hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd); in ufshcd_send_command()
2169 __set_bit(task_tag, &hba->outstanding_reqs); in ufshcd_send_command()
2170 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_send_command()
2171 spin_unlock_irqrestore(&hba->outstanding_lock, flags); in ufshcd_send_command()
2203 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_copy_query_response() argument
2205 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; in ufshcd_copy_query_response()
2210 if (hba->dev_cmd.query.descriptor && in ufshcd_copy_query_response()
2221 hba->dev_cmd.query.request.upiu_req.length); in ufshcd_copy_query_response()
2223 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); in ufshcd_copy_query_response()
2225 dev_warn(hba->dev, in ufshcd_copy_query_response()
2241 static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) in ufshcd_hba_capabilities() argument
2245 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); in ufshcd_hba_capabilities()
2248 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; in ufshcd_hba_capabilities()
2249 hba->nutmrs = in ufshcd_hba_capabilities()
2250 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; in ufshcd_hba_capabilities()
2251 hba->reserved_slot = hba->nutrs - 1; in ufshcd_hba_capabilities()
2253 trace_android_vh_ufs_mcq_hba_capabilities(hba, &err); in ufshcd_hba_capabilities()
2258 err = ufshcd_hba_init_crypto_capabilities(hba); in ufshcd_hba_capabilities()
2260 dev_err(hba->dev, "crypto setup failed\n"); in ufshcd_hba_capabilities()
2271 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) in ufshcd_ready_for_uic_cmd() argument
2273 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) in ufshcd_ready_for_uic_cmd()
2286 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) in ufshcd_get_upmcrs() argument
2288 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; in ufshcd_get_upmcrs()
2297 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_dispatch_uic_cmd() argument
2299 lockdep_assert_held(&hba->uic_cmd_mutex); in ufshcd_dispatch_uic_cmd()
2301 WARN_ON(hba->active_uic_cmd); in ufshcd_dispatch_uic_cmd()
2303 hba->active_uic_cmd = uic_cmd; in ufshcd_dispatch_uic_cmd()
2306 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); in ufshcd_dispatch_uic_cmd()
2307 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); in ufshcd_dispatch_uic_cmd()
2308 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); in ufshcd_dispatch_uic_cmd()
2310 ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND); in ufshcd_dispatch_uic_cmd()
2313 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, in ufshcd_dispatch_uic_cmd()
2325 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_wait_for_uic_cmd() argument
2330 lockdep_assert_held(&hba->uic_cmd_mutex); in ufshcd_wait_for_uic_cmd()
2337 dev_err(hba->dev, in ufshcd_wait_for_uic_cmd()
2342 dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n", in ufshcd_wait_for_uic_cmd()
2348 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_uic_cmd()
2349 hba->active_uic_cmd = NULL; in ufshcd_wait_for_uic_cmd()
2350 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_uic_cmd()
2364 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, in __ufshcd_send_uic_cmd() argument
2367 lockdep_assert_held(&hba->uic_cmd_mutex); in __ufshcd_send_uic_cmd()
2369 if (!ufshcd_ready_for_uic_cmd(hba)) { in __ufshcd_send_uic_cmd()
2370 dev_err(hba->dev, in __ufshcd_send_uic_cmd()
2379 ufshcd_dispatch_uic_cmd(hba, uic_cmd); in __ufshcd_send_uic_cmd()
2391 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) in ufshcd_send_uic_cmd() argument
2395 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) in ufshcd_send_uic_cmd()
2398 ufshcd_hold(hba, false); in ufshcd_send_uic_cmd()
2399 mutex_lock(&hba->uic_cmd_mutex); in ufshcd_send_uic_cmd()
2400 ufshcd_add_delay_before_dme_cmd(hba); in ufshcd_send_uic_cmd()
2402 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true); in ufshcd_send_uic_cmd()
2404 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); in ufshcd_send_uic_cmd()
2406 mutex_unlock(&hba->uic_cmd_mutex); in ufshcd_send_uic_cmd()
2408 ufshcd_release(hba); in ufshcd_send_uic_cmd()
2419 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_map_sg() argument
2435 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) in ufshcd_map_sg()
2437 cpu_to_le16(sg_segments * hba->sg_entry_size); in ufshcd_map_sg()
2452 prd = (void *)prd + hba->sg_entry_size; in ufshcd_map_sg()
2459 trace_android_vh_ufs_fill_prdt(hba, lrbp, sg_segments, &err); in ufshcd_map_sg()
2468 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) in ufshcd_enable_intr() argument
2470 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_enable_intr()
2472 if (hba->ufs_version == ufshci_version(1, 0)) { in ufshcd_enable_intr()
2480 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); in ufshcd_enable_intr()
2488 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) in ufshcd_disable_intr() argument
2490 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_disable_intr()
2492 if (hba->ufs_version == ufshci_version(1, 0)) { in ufshcd_disable_intr()
2502 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); in ufshcd_disable_intr()
2594 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, in ufshcd_prepare_utp_query_req_upiu() argument
2598 struct ufs_query *query = &hba->dev_cmd.query; in ufshcd_prepare_utp_query_req_upiu()
2649 static int ufshcd_compose_devman_upiu(struct ufs_hba *hba, in ufshcd_compose_devman_upiu() argument
2655 if (hba->ufs_version <= ufshci_version(1, 1)) in ufshcd_compose_devman_upiu()
2661 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) in ufshcd_compose_devman_upiu()
2662 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags); in ufshcd_compose_devman_upiu()
2663 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) in ufshcd_compose_devman_upiu()
2677 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_comp_scsi_upiu() argument
2682 if (hba->ufs_version <= ufshci_version(1, 1)) in ufshcd_comp_scsi_upiu()
2720 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) in ufshcd_init_lrb() argument
2722 struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr + in ufshcd_init_lrb()
2723 i * sizeof_utp_transfer_cmd_desc(hba); in ufshcd_init_lrb()
2724 struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr; in ufshcd_init_lrb()
2725 dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr + in ufshcd_init_lrb()
2726 i * sizeof_utp_transfer_cmd_desc(hba); in ufshcd_init_lrb()
2732 lrb->utrd_dma_addr = hba->utrdl_dma_addr + in ufshcd_init_lrb()
2751 struct ufs_hba *hba = shost_priv(host); in ufshcd_queuecommand() local
2756 trace_android_vh_ufs_mcq_map_tag(hba, in ufshcd_queuecommand()
2761 if (!down_read_trylock(&hba->clk_scaling_lock)) in ufshcd_queuecommand()
2770 switch (hba->ufshcd_state) { in ufshcd_queuecommand()
2785 if (hba->pm_op_in_progress) { in ufshcd_queuecommand()
2786 hba->force_reset = true; in ufshcd_queuecommand()
2801 hba->req_abort_count = 0; in ufshcd_queuecommand()
2803 err = ufshcd_hold(hba, true); in ufshcd_queuecommand()
2808 WARN_ON(ufshcd_is_clkgating_allowed(hba) && in ufshcd_queuecommand()
2809 (hba->clk_gating.state != CLKS_ON)); in ufshcd_queuecommand()
2811 lrbp = &hba->lrb[tag]; in ufshcd_queuecommand()
2818 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; in ufshcd_queuecommand()
2820 trace_android_vh_ufs_mcq_set_sqid(hba, scsi_cmd_to_rq(cmd)->mq_hctx->queue_num, lrbp); in ufshcd_queuecommand()
2824 trace_android_vh_ufs_prepare_command(hba, scsi_cmd_to_rq(cmd), lrbp, in ufshcd_queuecommand()
2828 ufshcd_release(hba); in ufshcd_queuecommand()
2834 ufshpb_prep(hba, lrbp); in ufshcd_queuecommand()
2836 ufshcd_comp_scsi_upiu(hba, lrbp); in ufshcd_queuecommand()
2838 err = ufshcd_map_sg(hba, lrbp); in ufshcd_queuecommand()
2841 ufshcd_release(hba); in ufshcd_queuecommand()
2845 ufshcd_send_command(hba, tag); in ufshcd_queuecommand()
2850 up_read(&hba->clk_scaling_lock); in ufshcd_queuecommand()
2855 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_queuecommand()
2856 ufshcd_schedule_eh_work(hba); in ufshcd_queuecommand()
2857 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_queuecommand()
2863 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, in ufshcd_compose_dev_cmd() argument
2873 hba->dev_cmd.type = cmd_type; in ufshcd_compose_dev_cmd()
2875 return ufshcd_compose_devman_upiu(hba, lrbp); in ufshcd_compose_dev_cmd()
2879 ufshcd_clear_cmd(struct ufs_hba *hba, int tag) in ufshcd_clear_cmd() argument
2885 if (ufshcd_use_mcq_hooks(hba)) { in ufshcd_clear_cmd()
2886 trace_android_vh_ufs_mcq_clear_cmd(hba, tag, &err); in ufshcd_clear_cmd()
2891 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clear_cmd()
2892 ufshcd_utrl_clear(hba, tag); in ufshcd_clear_cmd()
2893 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clear_cmd()
2899 err = ufshcd_wait_for_register(hba, in ufshcd_clear_cmd()
2907 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_check_query_response() argument
2909 struct ufs_query_res *query_res = &hba->dev_cmd.query.response; in ufshcd_check_query_response()
2923 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_dev_cmd_completion() argument
2928 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); in ufshcd_dev_cmd_completion()
2933 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { in ufshcd_dev_cmd_completion()
2935 dev_err(hba->dev, "%s: unexpected response %x\n", in ufshcd_dev_cmd_completion()
2940 err = ufshcd_check_query_response(hba, lrbp); in ufshcd_dev_cmd_completion()
2942 err = ufshcd_copy_query_response(hba, lrbp); in ufshcd_dev_cmd_completion()
2947 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", in ufshcd_dev_cmd_completion()
2952 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", in ufshcd_dev_cmd_completion()
2960 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, in ufshcd_wait_for_dev_cmd() argument
2968 time_left = wait_for_completion_timeout(hba->dev_cmd.complete, in ufshcd_wait_for_dev_cmd()
2971 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_wait_for_dev_cmd()
2972 hba->dev_cmd.complete = NULL; in ufshcd_wait_for_dev_cmd()
2976 err = ufshcd_dev_cmd_completion(hba, lrbp); in ufshcd_wait_for_dev_cmd()
2978 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_wait_for_dev_cmd()
2982 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", in ufshcd_wait_for_dev_cmd()
2984 if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) in ufshcd_wait_for_dev_cmd()
2992 spin_lock_irqsave(&hba->outstanding_lock, flags); in ufshcd_wait_for_dev_cmd()
2993 outstanding_reqs = &hba->outstanding_reqs; in ufshcd_wait_for_dev_cmd()
2994 trace_android_vh_ufs_mcq_get_outstanding_reqs(hba, in ufshcd_wait_for_dev_cmd()
2997 spin_unlock_irqrestore(&hba->outstanding_lock, flags); in ufshcd_wait_for_dev_cmd()
3012 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, in ufshcd_exec_dev_cmd() argument
3016 const u32 tag = hba->reserved_slot; in ufshcd_exec_dev_cmd()
3021 lockdep_assert_held(&hba->dev_cmd.lock); in ufshcd_exec_dev_cmd()
3023 down_read(&hba->clk_scaling_lock); in ufshcd_exec_dev_cmd()
3025 lrbp = &hba->lrb[tag]; in ufshcd_exec_dev_cmd()
3027 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); in ufshcd_exec_dev_cmd()
3031 hba->dev_cmd.complete = &wait; in ufshcd_exec_dev_cmd()
3033 trace_android_vh_ufs_mcq_set_sqid(hba, 0, lrbp); in ufshcd_exec_dev_cmd()
3035 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); in ufshcd_exec_dev_cmd()
3037 ufshcd_send_command(hba, tag); in ufshcd_exec_dev_cmd()
3038 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); in ufshcd_exec_dev_cmd()
3039 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, in ufshcd_exec_dev_cmd()
3043 up_read(&hba->clk_scaling_lock); in ufshcd_exec_dev_cmd()
3057 static inline void ufshcd_init_query(struct ufs_hba *hba, in ufshcd_init_query() argument
3061 *request = &hba->dev_cmd.query.request; in ufshcd_init_query()
3062 *response = &hba->dev_cmd.query.response; in ufshcd_init_query()
3071 int ufshcd_query_flag_retry(struct ufs_hba *hba, in ufshcd_query_flag_retry() argument
3078 ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res); in ufshcd_query_flag_retry()
3080 dev_dbg(hba->dev, in ufshcd_query_flag_retry()
3088 dev_err(hba->dev, in ufshcd_query_flag_retry()
3105 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, in ufshcd_query_flag() argument
3113 BUG_ON(!hba); in ufshcd_query_flag()
3115 ufshcd_hold(hba, false); in ufshcd_query_flag()
3116 mutex_lock(&hba->dev_cmd.lock); in ufshcd_query_flag()
3117 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in ufshcd_query_flag()
3130 dev_err(hba->dev, "%s: Invalid argument for read request\n", in ufshcd_query_flag()
3137 dev_err(hba->dev, in ufshcd_query_flag()
3144 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); in ufshcd_query_flag()
3147 dev_err(hba->dev, in ufshcd_query_flag()
3158 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_query_flag()
3159 ufshcd_release(hba); in ufshcd_query_flag()
3175 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, in ufshcd_query_attr() argument
3182 BUG_ON(!hba); in ufshcd_query_attr()
3185 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", in ufshcd_query_attr()
3190 ufshcd_hold(hba, false); in ufshcd_query_attr()
3192 mutex_lock(&hba->dev_cmd.lock); in ufshcd_query_attr()
3193 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in ufshcd_query_attr()
3205 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", in ufshcd_query_attr()
3211 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); in ufshcd_query_attr()
3214 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", in ufshcd_query_attr()
3222 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_query_attr()
3223 ufshcd_release(hba); in ufshcd_query_attr()
3241 int ufshcd_query_attr_retry(struct ufs_hba *hba, in ufshcd_query_attr_retry() argument
3249 ret = ufshcd_query_attr(hba, opcode, idn, index, in ufshcd_query_attr_retry()
3252 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n", in ufshcd_query_attr_retry()
3259 dev_err(hba->dev, in ufshcd_query_attr_retry()
3266 static int __ufshcd_query_descriptor(struct ufs_hba *hba, in __ufshcd_query_descriptor() argument
3274 BUG_ON(!hba); in __ufshcd_query_descriptor()
3277 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", in __ufshcd_query_descriptor()
3283 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", in __ufshcd_query_descriptor()
3288 ufshcd_hold(hba, false); in __ufshcd_query_descriptor()
3290 mutex_lock(&hba->dev_cmd.lock); in __ufshcd_query_descriptor()
3291 ufshcd_init_query(hba, &request, &response, opcode, idn, index, in __ufshcd_query_descriptor()
3293 hba->dev_cmd.query.descriptor = desc_buf; in __ufshcd_query_descriptor()
3304 dev_err(hba->dev, in __ufshcd_query_descriptor()
3311 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); in __ufshcd_query_descriptor()
3314 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", in __ufshcd_query_descriptor()
3322 hba->dev_cmd.query.descriptor = NULL; in __ufshcd_query_descriptor()
3323 mutex_unlock(&hba->dev_cmd.lock); in __ufshcd_query_descriptor()
3324 ufshcd_release(hba); in __ufshcd_query_descriptor()
3342 int ufshcd_query_descriptor_retry(struct ufs_hba *hba, in ufshcd_query_descriptor_retry() argument
3352 err = __ufshcd_query_descriptor(hba, opcode, idn, index, in ufshcd_query_descriptor_retry()
3368 void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, in ufshcd_map_desc_id_to_length() argument
3375 *desc_len = hba->desc_size[desc_id]; in ufshcd_map_desc_id_to_length()
3379 static void ufshcd_update_desc_length(struct ufs_hba *hba, in ufshcd_update_desc_length() argument
3383 if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE && in ufshcd_update_desc_length()
3390 hba->desc_size[desc_id] = desc_len; in ufshcd_update_desc_length()
3404 int ufshcd_read_desc_param(struct ufs_hba *hba, in ufshcd_read_desc_param() argument
3421 ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); in ufshcd_read_desc_param()
3423 dev_err(hba->dev, "%s: Failed to get desc length\n", __func__); in ufshcd_read_desc_param()
3428 dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n", in ufshcd_read_desc_param()
3444 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, in ufshcd_read_desc_param()
3449 …dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret … in ufshcd_read_desc_param()
3456 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n", in ufshcd_read_desc_param()
3464 ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len); in ufshcd_read_desc_param()
3514 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, in ufshcd_read_string_desc() argument
3528 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0, in ufshcd_read_string_desc()
3531 dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", in ufshcd_read_string_desc()
3538 dev_dbg(hba->dev, "String Desc is of zero length\n"); in ufshcd_read_string_desc()
3593 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, in ufshcd_read_unit_desc_param() argument
3603 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset)) in ufshcd_read_unit_desc_param()
3606 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, in ufshcd_read_unit_desc_param()
3610 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba) in ufshcd_get_ref_clk_gating_wait() argument
3615 if (hba->dev_info.wspecversion >= 0x300) { in ufshcd_get_ref_clk_gating_wait()
3616 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_get_ref_clk_gating_wait()
3620 dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n", in ufshcd_get_ref_clk_gating_wait()
3625 dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n", in ufshcd_get_ref_clk_gating_wait()
3629 hba->dev_info.clk_gating_wait_us = gating_wait; in ufshcd_get_ref_clk_gating_wait()
3648 static int ufshcd_memory_alloc(struct ufs_hba *hba) in ufshcd_memory_alloc() argument
3651 int pool_size = hba->nutrs; in ufshcd_memory_alloc()
3653 trace_android_vh_ufs_mcq_max_tag(hba, &pool_size); in ufshcd_memory_alloc()
3656 ucdl_size = (sizeof_utp_transfer_cmd_desc(hba) * pool_size); in ufshcd_memory_alloc()
3657 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
3659 &hba->ucdl_dma_addr, in ufshcd_memory_alloc()
3668 if (!hba->ucdl_base_addr || in ufshcd_memory_alloc()
3669 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
3670 dev_err(hba->dev, in ufshcd_memory_alloc()
3680 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
3682 &hba->utrdl_dma_addr, in ufshcd_memory_alloc()
3684 if (!hba->utrdl_base_addr || in ufshcd_memory_alloc()
3685 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
3686 dev_err(hba->dev, in ufshcd_memory_alloc()
3695 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; in ufshcd_memory_alloc()
3696 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, in ufshcd_memory_alloc()
3698 &hba->utmrdl_dma_addr, in ufshcd_memory_alloc()
3700 if (!hba->utmrdl_base_addr || in ufshcd_memory_alloc()
3701 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { in ufshcd_memory_alloc()
3702 dev_err(hba->dev, in ufshcd_memory_alloc()
3708 hba->lrb = devm_kcalloc(hba->dev, in ufshcd_memory_alloc()
3711 if (!hba->lrb) { in ufshcd_memory_alloc()
3712 dev_err(hba->dev, "LRB Memory allocation failed\n"); in ufshcd_memory_alloc()
3733 static void ufshcd_host_memory_configure(struct ufs_hba *hba) in ufshcd_host_memory_configure() argument
3742 int pool_size = hba->nutrs; in ufshcd_host_memory_configure()
3744 trace_android_vh_ufs_mcq_max_tag(hba, &pool_size); in ufshcd_host_memory_configure()
3746 utrdlp = hba->utrdl_base_addr; in ufshcd_host_memory_configure()
3753 cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba); in ufshcd_host_memory_configure()
3754 cmd_desc_dma_addr = hba->ucdl_dma_addr; in ufshcd_host_memory_configure()
3766 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) { in ufshcd_host_memory_configure()
3782 ufshcd_init_lrb(hba, &hba->lrb[i], i); in ufshcd_host_memory_configure()
3797 static int ufshcd_dme_link_startup(struct ufs_hba *hba) in ufshcd_dme_link_startup() argument
3804 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_link_startup()
3806 dev_dbg(hba->dev, in ufshcd_dme_link_startup()
3819 static int ufshcd_dme_reset(struct ufs_hba *hba) in ufshcd_dme_reset() argument
3826 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_reset()
3828 dev_err(hba->dev, in ufshcd_dme_reset()
3834 int ufshcd_dme_configure_adapt(struct ufs_hba *hba, in ufshcd_dme_configure_adapt() argument
3843 ret = ufshcd_dme_set(hba, in ufshcd_dme_configure_adapt()
3858 static int ufshcd_dme_enable(struct ufs_hba *hba) in ufshcd_dme_enable() argument
3865 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_enable()
3867 dev_err(hba->dev, in ufshcd_dme_enable()
3873 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) in ufshcd_add_delay_before_dme_cmd() argument
3878 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)) in ufshcd_add_delay_before_dme_cmd()
3885 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) { in ufshcd_add_delay_before_dme_cmd()
3891 hba->last_dme_cmd_tstamp)); in ufshcd_add_delay_before_dme_cmd()
3914 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, in ufshcd_dme_set_attr() argument
3934 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_set_attr()
3936 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", in ufshcd_dme_set_attr()
3941 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", in ufshcd_dme_set_attr()
3958 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, in ufshcd_dme_get_attr() argument
3973 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) { in ufshcd_dme_get_attr()
3974 orig_pwr_info = hba->pwr_info; in ufshcd_dme_get_attr()
3989 ret = ufshcd_change_power_mode(hba, &temp_pwr_info); in ufshcd_dme_get_attr()
4001 ret = ufshcd_send_uic_cmd(hba, &uic_cmd); in ufshcd_dme_get_attr()
4003 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", in ufshcd_dme_get_attr()
4008 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", in ufshcd_dme_get_attr()
4015 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE) in ufshcd_dme_get_attr()
4017 ufshcd_change_power_mode(hba, &orig_pwr_info); in ufshcd_dme_get_attr()
4039 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) in ufshcd_uic_pwr_ctrl() argument
4047 mutex_lock(&hba->uic_cmd_mutex); in ufshcd_uic_pwr_ctrl()
4048 ufshcd_add_delay_before_dme_cmd(hba); in ufshcd_uic_pwr_ctrl()
4050 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
4051 if (ufshcd_is_link_broken(hba)) { in ufshcd_uic_pwr_ctrl()
4055 hba->uic_async_done = &uic_async_done; in ufshcd_uic_pwr_ctrl()
4056 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) { in ufshcd_uic_pwr_ctrl()
4057 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL); in ufshcd_uic_pwr_ctrl()
4065 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
4066 ret = __ufshcd_send_uic_cmd(hba, cmd, false); in ufshcd_uic_pwr_ctrl()
4068 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
4074 if (!wait_for_completion_timeout(hba->uic_async_done, in ufshcd_uic_pwr_ctrl()
4076 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
4081 dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n", in ufshcd_uic_pwr_ctrl()
4091 status = ufshcd_get_upmcrs(hba); in ufshcd_uic_pwr_ctrl()
4093 dev_err(hba->dev, in ufshcd_uic_pwr_ctrl()
4100 ufshcd_print_host_state(hba); in ufshcd_uic_pwr_ctrl()
4101 ufshcd_print_pwr_info(hba); in ufshcd_uic_pwr_ctrl()
4102 ufshcd_print_evt_hist(hba); in ufshcd_uic_pwr_ctrl()
4105 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
4106 hba->active_uic_cmd = NULL; in ufshcd_uic_pwr_ctrl()
4107 hba->uic_async_done = NULL; in ufshcd_uic_pwr_ctrl()
4109 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); in ufshcd_uic_pwr_ctrl()
4111 ufshcd_set_link_broken(hba); in ufshcd_uic_pwr_ctrl()
4112 ufshcd_schedule_eh_work(hba); in ufshcd_uic_pwr_ctrl()
4115 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_uic_pwr_ctrl()
4116 mutex_unlock(&hba->uic_cmd_mutex); in ufshcd_uic_pwr_ctrl()
4129 int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) in ufshcd_uic_change_pwr_mode() argument
4134 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) { in ufshcd_uic_change_pwr_mode()
4135 ret = ufshcd_dme_set(hba, in ufshcd_uic_change_pwr_mode()
4138 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n", in ufshcd_uic_change_pwr_mode()
4147 ufshcd_hold(hba, false); in ufshcd_uic_change_pwr_mode()
4148 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_change_pwr_mode()
4149 ufshcd_release(hba); in ufshcd_uic_change_pwr_mode()
4156 int ufshcd_link_recovery(struct ufs_hba *hba) in ufshcd_link_recovery() argument
4161 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_link_recovery()
4162 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_link_recovery()
4163 ufshcd_set_eh_in_progress(hba); in ufshcd_link_recovery()
4164 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_link_recovery()
4167 ufshcd_device_reset(hba); in ufshcd_link_recovery()
4169 ret = ufshcd_host_reset_and_restore(hba); in ufshcd_link_recovery()
4171 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_link_recovery()
4173 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_link_recovery()
4174 ufshcd_clear_eh_in_progress(hba); in ufshcd_link_recovery()
4175 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_link_recovery()
4178 dev_err(hba->dev, "%s: link recovery failed, err %d", in ufshcd_link_recovery()
4185 int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) in ufshcd_uic_hibern8_enter() argument
4191 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); in ufshcd_uic_hibern8_enter()
4194 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_hibern8_enter()
4195 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", in ufshcd_uic_hibern8_enter()
4199 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", in ufshcd_uic_hibern8_enter()
4202 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, in ufshcd_uic_hibern8_enter()
4209 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) in ufshcd_uic_hibern8_exit() argument
4215 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); in ufshcd_uic_hibern8_exit()
4218 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); in ufshcd_uic_hibern8_exit()
4219 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", in ufshcd_uic_hibern8_exit()
4223 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n", in ufshcd_uic_hibern8_exit()
4226 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, in ufshcd_uic_hibern8_exit()
4228 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get(); in ufshcd_uic_hibern8_exit()
4229 hba->ufs_stats.hibern8_exit_cnt++; in ufshcd_uic_hibern8_exit()
4236 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) in ufshcd_auto_hibern8_update() argument
4241 if (!ufshcd_is_auto_hibern8_supported(hba)) in ufshcd_auto_hibern8_update()
4244 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_auto_hibern8_update()
4245 if (hba->ahit != ahit) { in ufshcd_auto_hibern8_update()
4246 hba->ahit = ahit; in ufshcd_auto_hibern8_update()
4249 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_auto_hibern8_update()
4252 !pm_runtime_suspended(&hba->sdev_ufs_device->sdev_gendev)) { in ufshcd_auto_hibern8_update()
4253 ufshcd_rpm_get_sync(hba); in ufshcd_auto_hibern8_update()
4254 ufshcd_hold(hba, false); in ufshcd_auto_hibern8_update()
4255 ufshcd_auto_hibern8_enable(hba); in ufshcd_auto_hibern8_update()
4256 ufshcd_release(hba); in ufshcd_auto_hibern8_update()
4257 ufshcd_rpm_put_sync(hba); in ufshcd_auto_hibern8_update()
4262 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) in ufshcd_auto_hibern8_enable() argument
4266 if (!ufshcd_is_auto_hibern8_supported(hba)) in ufshcd_auto_hibern8_enable()
4269 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_auto_hibern8_enable()
4270 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); in ufshcd_auto_hibern8_enable()
4271 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_auto_hibern8_enable()
4279 static void ufshcd_init_pwr_info(struct ufs_hba *hba) in ufshcd_init_pwr_info() argument
4281 hba->pwr_info.gear_rx = UFS_PWM_G1; in ufshcd_init_pwr_info()
4282 hba->pwr_info.gear_tx = UFS_PWM_G1; in ufshcd_init_pwr_info()
4283 hba->pwr_info.lane_rx = 1; in ufshcd_init_pwr_info()
4284 hba->pwr_info.lane_tx = 1; in ufshcd_init_pwr_info()
4285 hba->pwr_info.pwr_rx = SLOWAUTO_MODE; in ufshcd_init_pwr_info()
4286 hba->pwr_info.pwr_tx = SLOWAUTO_MODE; in ufshcd_init_pwr_info()
4287 hba->pwr_info.hs_rate = 0; in ufshcd_init_pwr_info()
4294 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) in ufshcd_get_max_pwr_mode() argument
4296 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; in ufshcd_get_max_pwr_mode()
4298 if (hba->max_pwr_info.is_valid) in ufshcd_get_max_pwr_mode()
4306 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), in ufshcd_get_max_pwr_mode()
4308 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), in ufshcd_get_max_pwr_mode()
4312 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", in ufshcd_get_max_pwr_mode()
4324 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); in ufshcd_get_max_pwr_mode()
4326 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), in ufshcd_get_max_pwr_mode()
4329 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", in ufshcd_get_max_pwr_mode()
4336 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), in ufshcd_get_max_pwr_mode()
4339 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), in ufshcd_get_max_pwr_mode()
4342 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", in ufshcd_get_max_pwr_mode()
4349 hba->max_pwr_info.is_valid = true; in ufshcd_get_max_pwr_mode()
4353 static int ufshcd_change_power_mode(struct ufs_hba *hba, in ufshcd_change_power_mode() argument
4359 if (!hba->force_pmc && in ufshcd_change_power_mode()
4360 pwr_mode->gear_rx == hba->pwr_info.gear_rx && in ufshcd_change_power_mode()
4361 pwr_mode->gear_tx == hba->pwr_info.gear_tx && in ufshcd_change_power_mode()
4362 pwr_mode->lane_rx == hba->pwr_info.lane_rx && in ufshcd_change_power_mode()
4363 pwr_mode->lane_tx == hba->pwr_info.lane_tx && in ufshcd_change_power_mode()
4364 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && in ufshcd_change_power_mode()
4365 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && in ufshcd_change_power_mode()
4366 pwr_mode->hs_rate == hba->pwr_info.hs_rate) { in ufshcd_change_power_mode()
4367 dev_dbg(hba->dev, "%s: power already configured\n", __func__); in ufshcd_change_power_mode()
4377 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); in ufshcd_change_power_mode()
4378 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), in ufshcd_change_power_mode()
4382 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); in ufshcd_change_power_mode()
4384 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); in ufshcd_change_power_mode()
4386 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); in ufshcd_change_power_mode()
4387 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), in ufshcd_change_power_mode()
4391 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); in ufshcd_change_power_mode()
4393 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); in ufshcd_change_power_mode()
4399 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), in ufshcd_change_power_mode()
4402 if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) { in ufshcd_change_power_mode()
4403 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), in ufshcd_change_power_mode()
4405 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), in ufshcd_change_power_mode()
4407 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), in ufshcd_change_power_mode()
4409 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), in ufshcd_change_power_mode()
4411 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), in ufshcd_change_power_mode()
4413 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), in ufshcd_change_power_mode()
4416 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), in ufshcd_change_power_mode()
4418 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), in ufshcd_change_power_mode()
4420 ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), in ufshcd_change_power_mode()
4424 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 in ufshcd_change_power_mode()
4428 dev_err(hba->dev, in ufshcd_change_power_mode()
4431 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, in ufshcd_change_power_mode()
4434 memcpy(&hba->pwr_info, pwr_mode, in ufshcd_change_power_mode()
4446 int ufshcd_config_pwr_mode(struct ufs_hba *hba, in ufshcd_config_pwr_mode() argument
4452 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, in ufshcd_config_pwr_mode()
4458 ret = ufshcd_change_power_mode(hba, &final_params); in ufshcd_config_pwr_mode()
4470 static int ufshcd_complete_dev_init(struct ufs_hba *hba) in ufshcd_complete_dev_init() argument
4476 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, in ufshcd_complete_dev_init()
4479 dev_err(hba->dev, in ufshcd_complete_dev_init()
4488 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, in ufshcd_complete_dev_init()
4496 dev_err(hba->dev, in ufshcd_complete_dev_init()
4500 dev_err(hba->dev, in ufshcd_complete_dev_init()
4521 int ufshcd_make_hba_operational(struct ufs_hba *hba) in ufshcd_make_hba_operational() argument
4526 if (ufshcd_use_mcq_hooks(hba)) { in ufshcd_make_hba_operational()
4527 trace_android_vh_ufs_mcq_make_hba_operational(hba, &err); in ufshcd_make_hba_operational()
4532 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); in ufshcd_make_hba_operational()
4535 if (ufshcd_is_intr_aggr_allowed(hba)) in ufshcd_make_hba_operational()
4536 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); in ufshcd_make_hba_operational()
4538 ufshcd_disable_intr_aggr(hba); in ufshcd_make_hba_operational()
4541 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), in ufshcd_make_hba_operational()
4543 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), in ufshcd_make_hba_operational()
4545 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), in ufshcd_make_hba_operational()
4547 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), in ufshcd_make_hba_operational()
4559 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); in ufshcd_make_hba_operational()
4561 ufshcd_enable_run_stop_reg(hba); in ufshcd_make_hba_operational()
4563 dev_err(hba->dev, in ufshcd_make_hba_operational()
4576 void ufshcd_hba_stop(struct ufs_hba *hba) in ufshcd_hba_stop() argument
4585 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_hba_stop()
4586 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); in ufshcd_hba_stop()
4587 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_hba_stop()
4589 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, in ufshcd_hba_stop()
4593 dev_err(hba->dev, "%s: Controller disable failed\n", __func__); in ufshcd_hba_stop()
4607 static int ufshcd_hba_execute_hce(struct ufs_hba *hba) in ufshcd_hba_execute_hce() argument
4613 if (!ufshcd_is_hba_active(hba)) in ufshcd_hba_execute_hce()
4615 ufshcd_hba_stop(hba); in ufshcd_hba_execute_hce()
4618 ufshcd_set_link_off(hba); in ufshcd_hba_execute_hce()
4620 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); in ufshcd_hba_execute_hce()
4623 ufshcd_hba_start(hba); in ufshcd_hba_execute_hce()
4635 ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100); in ufshcd_hba_execute_hce()
4639 while (ufshcd_is_hba_active(hba)) { in ufshcd_hba_execute_hce()
4643 dev_err(hba->dev, in ufshcd_hba_execute_hce()
4655 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); in ufshcd_hba_execute_hce()
4657 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); in ufshcd_hba_execute_hce()
4662 int ufshcd_hba_enable(struct ufs_hba *hba) in ufshcd_hba_enable() argument
4666 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) { in ufshcd_hba_enable()
4667 ufshcd_set_link_off(hba); in ufshcd_hba_enable()
4668 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); in ufshcd_hba_enable()
4671 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); in ufshcd_hba_enable()
4672 ret = ufshcd_dme_reset(hba); in ufshcd_hba_enable()
4674 ret = ufshcd_dme_enable(hba); in ufshcd_hba_enable()
4676 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); in ufshcd_hba_enable()
4678 dev_err(hba->dev, in ufshcd_hba_enable()
4682 ret = ufshcd_hba_execute_hce(hba); in ufshcd_hba_enable()
4689 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) in ufshcd_disable_tx_lcc() argument
4694 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), in ufshcd_disable_tx_lcc()
4697 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), in ufshcd_disable_tx_lcc()
4701 err = ufshcd_dme_set(hba, in ufshcd_disable_tx_lcc()
4706 err = ufshcd_dme_peer_set(hba, in ufshcd_disable_tx_lcc()
4711 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d", in ufshcd_disable_tx_lcc()
4720 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) in ufshcd_disable_device_tx_lcc() argument
4722 return ufshcd_disable_tx_lcc(hba, true); in ufshcd_disable_device_tx_lcc()
4725 void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val) in ufshcd_update_evt_hist() argument
4732 e = &hba->ufs_stats.event[id]; in ufshcd_update_evt_hist()
4738 ufshcd_vops_event_notify(hba, id, &val); in ufshcd_update_evt_hist()
4748 static int ufshcd_link_startup(struct ufs_hba *hba) in ufshcd_link_startup() argument
4758 if (!ufshcd_is_ufs_dev_active(hba)) in ufshcd_link_startup()
4763 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE); in ufshcd_link_startup()
4765 ret = ufshcd_dme_link_startup(hba); in ufshcd_link_startup()
4768 if (!ret && !ufshcd_is_device_present(hba)) { in ufshcd_link_startup()
4769 ufshcd_update_evt_hist(hba, in ufshcd_link_startup()
4772 dev_err(hba->dev, "%s: Device not present\n", __func__); in ufshcd_link_startup()
4782 if (ret && ufshcd_hba_enable(hba)) { in ufshcd_link_startup()
4783 ufshcd_update_evt_hist(hba, in ufshcd_link_startup()
4792 ufshcd_update_evt_hist(hba, in ufshcd_link_startup()
4805 ufshcd_init_pwr_info(hba); in ufshcd_link_startup()
4806 ufshcd_print_pwr_info(hba); in ufshcd_link_startup()
4808 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { in ufshcd_link_startup()
4809 ret = ufshcd_disable_device_tx_lcc(hba); in ufshcd_link_startup()
4815 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE); in ufshcd_link_startup()
4820 ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); in ufshcd_link_startup()
4821 ret = ufshcd_make_hba_operational(hba); in ufshcd_link_startup()
4824 dev_err(hba->dev, "link startup failed %d\n", ret); in ufshcd_link_startup()
4825 ufshcd_print_host_state(hba); in ufshcd_link_startup()
4826 ufshcd_print_pwr_info(hba); in ufshcd_link_startup()
4827 ufshcd_print_evt_hist(hba); in ufshcd_link_startup()
4842 static int ufshcd_verify_dev_init(struct ufs_hba *hba) in ufshcd_verify_dev_init() argument
4847 ufshcd_hold(hba, false); in ufshcd_verify_dev_init()
4848 mutex_lock(&hba->dev_cmd.lock); in ufshcd_verify_dev_init()
4850 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, in ufshcd_verify_dev_init()
4851 hba->nop_out_timeout); in ufshcd_verify_dev_init()
4856 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); in ufshcd_verify_dev_init()
4858 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_verify_dev_init()
4859 ufshcd_release(hba); in ufshcd_verify_dev_init()
4862 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); in ufshcd_verify_dev_init()
4879 struct ufs_hba *hba; in ufshcd_set_queue_depth() local
4881 hba = shost_priv(sdev->host); in ufshcd_set_queue_depth()
4883 lun_qdepth = hba->nutrs; in ufshcd_set_queue_depth()
4884 ret = ufshcd_read_unit_desc_param(hba, in ufshcd_set_queue_depth()
4895 lun_qdepth = hba->nutrs; in ufshcd_set_queue_depth()
4897 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); in ufshcd_set_queue_depth()
4899 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", in ufshcd_set_queue_depth()
4915 static int ufshcd_get_lu_wp(struct ufs_hba *hba, in ufshcd_get_lu_wp() argument
4928 else if (lun >= hba->dev_info.max_lu_supported) in ufshcd_get_lu_wp()
4931 ret = ufshcd_read_unit_desc_param(hba, in ufshcd_get_lu_wp()
4946 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba, in ufshcd_get_lu_power_on_wp_status() argument
4949 if (hba->dev_info.f_power_on_wp_en && in ufshcd_get_lu_power_on_wp_status()
4950 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_get_lu_power_on_wp_status()
4953 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun), in ufshcd_get_lu_power_on_wp_status()
4956 hba->dev_info.is_lu_power_on_wp = true; in ufshcd_get_lu_power_on_wp_status()
4965 static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev) in ufshcd_setup_links() argument
4973 if (hba->sdev_ufs_device) { in ufshcd_setup_links()
4975 &hba->sdev_ufs_device->sdev_gendev, in ufshcd_setup_links()
4979 dev_name(&hba->sdev_ufs_device->sdev_gendev)); in ufshcd_setup_links()
4982 hba->luns_avail--; in ufshcd_setup_links()
4984 if (hba->luns_avail == 1) { in ufshcd_setup_links()
4985 ufshcd_rpm_put(hba); in ufshcd_setup_links()
4993 hba->luns_avail--; in ufshcd_setup_links()
5005 struct ufs_hba *hba; in ufshcd_slave_alloc() local
5007 hba = shost_priv(sdev->host); in ufshcd_slave_alloc()
5026 ufshcd_get_lu_power_on_wp_status(hba, sdev); in ufshcd_slave_alloc()
5028 ufshcd_setup_links(hba, sdev); in ufshcd_slave_alloc()
5042 struct ufs_hba *hba = shost_priv(sdev->host); in ufshcd_change_queue_depth() local
5044 if (depth > hba->nutrs) in ufshcd_change_queue_depth()
5045 depth = hba->nutrs; in ufshcd_change_queue_depth()
5049 static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev) in ufshcd_hpb_destroy() argument
5053 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) in ufshcd_hpb_destroy()
5056 ufshpb_destroy_lu(hba, sdev); in ufshcd_hpb_destroy()
5059 static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev) in ufshcd_hpb_configure() argument
5063 !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) in ufshcd_hpb_configure()
5066 ufshpb_init_hpb_lu(hba, sdev); in ufshcd_hpb_configure()
5075 struct ufs_hba *hba = shost_priv(sdev->host); in ufshcd_slave_configure() local
5078 ufshcd_hpb_configure(hba, sdev); in ufshcd_slave_configure()
5081 if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE) in ufshcd_slave_configure()
5089 else if (ufshcd_is_rpm_autosuspend_allowed(hba)) in ufshcd_slave_configure()
5098 ufshcd_crypto_setup_rq_keyslot_manager(hba, q); in ufshcd_slave_configure()
5111 struct ufs_hba *hba; in ufshcd_slave_destroy() local
5114 hba = shost_priv(sdev->host); in ufshcd_slave_destroy()
5116 ufshcd_hpb_destroy(hba, sdev); in ufshcd_slave_destroy()
5120 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_slave_destroy()
5121 hba->sdev_ufs_device = NULL; in ufshcd_slave_destroy()
5122 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_slave_destroy()
5123 } else if (hba->sdev_ufs_device) { in ufshcd_slave_destroy()
5127 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_slave_destroy()
5128 if (hba->sdev_ufs_device) { in ufshcd_slave_destroy()
5129 supplier = &hba->sdev_ufs_device->sdev_gendev; in ufshcd_slave_destroy()
5132 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_slave_destroy()
5186 int ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) in ufshcd_transfer_rsp_status() argument
5195 if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) { in ufshcd_transfer_rsp_status()
5204 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); in ufshcd_transfer_rsp_status()
5232 if (!hba->pm_op_in_progress && in ufshcd_transfer_rsp_status()
5233 !ufshcd_eh_in_progress(hba) && in ufshcd_transfer_rsp_status()
5236 schedule_work(&hba->eeh_work); in ufshcd_transfer_rsp_status()
5239 ufshpb_rsp_upiu(hba, lrbp); in ufshcd_transfer_rsp_status()
5244 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
5248 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
5272 dev_err(hba->dev, in ufshcd_transfer_rsp_status()
5275 ufshcd_print_evt_hist(hba); in ufshcd_transfer_rsp_status()
5276 ufshcd_print_host_state(hba); in ufshcd_transfer_rsp_status()
5281 (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs) in ufshcd_transfer_rsp_status()
5282 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true); in ufshcd_transfer_rsp_status()
5287 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, in ufshcd_is_auto_hibern8_error() argument
5290 if (!ufshcd_is_auto_hibern8_supported(hba) || in ufshcd_is_auto_hibern8_error()
5291 !ufshcd_is_auto_hibern8_enabled(hba)) in ufshcd_is_auto_hibern8_error()
5297 if (hba->active_uic_cmd && in ufshcd_is_auto_hibern8_error()
5298 (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER || in ufshcd_is_auto_hibern8_error()
5299 hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT)) in ufshcd_is_auto_hibern8_error()
5314 static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) in ufshcd_uic_cmd_compl() argument
5318 spin_lock(hba->host->host_lock); in ufshcd_uic_cmd_compl()
5319 if (ufshcd_is_auto_hibern8_error(hba, intr_status)) in ufshcd_uic_cmd_compl()
5320 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); in ufshcd_uic_cmd_compl()
5322 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { in ufshcd_uic_cmd_compl()
5323 hba->active_uic_cmd->argument2 |= in ufshcd_uic_cmd_compl()
5324 ufshcd_get_uic_cmd_result(hba); in ufshcd_uic_cmd_compl()
5325 hba->active_uic_cmd->argument3 = in ufshcd_uic_cmd_compl()
5326 ufshcd_get_dme_attr_val(hba); in ufshcd_uic_cmd_compl()
5327 if (!hba->uic_async_done) in ufshcd_uic_cmd_compl()
5328 hba->active_uic_cmd->cmd_active = 0; in ufshcd_uic_cmd_compl()
5329 complete(&hba->active_uic_cmd->done); in ufshcd_uic_cmd_compl()
5333 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { in ufshcd_uic_cmd_compl()
5334 hba->active_uic_cmd->cmd_active = 0; in ufshcd_uic_cmd_compl()
5335 complete(hba->uic_async_done); in ufshcd_uic_cmd_compl()
5340 ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, in ufshcd_uic_cmd_compl()
5342 spin_unlock(hba->host->host_lock); in ufshcd_uic_cmd_compl()
5352 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, in __ufshcd_transfer_req_compl() argument
5362 for_each_set_bit(index, &completed_reqs, hba->nutrs) { in __ufshcd_transfer_req_compl()
5363 lrbp = &hba->lrb[index]; in __ufshcd_transfer_req_compl()
5367 trace_android_vh_ufs_compl_command(hba, lrbp); in __ufshcd_transfer_req_compl()
5368 if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) in __ufshcd_transfer_req_compl()
5369 ufshcd_update_monitor(hba, lrbp); in __ufshcd_transfer_req_compl()
5370 ufshcd_add_command_trace(hba, index, UFS_CMD_COMP); in __ufshcd_transfer_req_compl()
5372 ufshcd_transfer_rsp_status(hba, lrbp); in __ufshcd_transfer_req_compl()
5375 ufshcd_crypto_clear_prdt(hba, lrbp); in __ufshcd_transfer_req_compl()
5380 ufshcd_release(hba); in __ufshcd_transfer_req_compl()
5384 if (hba->dev_cmd.complete) { in __ufshcd_transfer_req_compl()
5385 trace_android_vh_ufs_compl_command(hba, lrbp); in __ufshcd_transfer_req_compl()
5386 ufshcd_add_command_trace(hba, index, in __ufshcd_transfer_req_compl()
5388 complete(hba->dev_cmd.complete); in __ufshcd_transfer_req_compl()
5393 ufshcd_clk_scaling_update_busy(hba); in __ufshcd_transfer_req_compl()
5406 static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba, in ufshcd_transfer_req_compl() argument
5412 if (ufshcd_use_mcq_hooks(hba)) in ufshcd_transfer_req_compl()
5422 if (ufshcd_is_intr_aggr_allowed(hba) && in ufshcd_transfer_req_compl()
5423 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR)) in ufshcd_transfer_req_compl()
5424 ufshcd_reset_intr_aggr(hba); in ufshcd_transfer_req_compl()
5429 spin_lock_irqsave(&hba->outstanding_lock, flags); in ufshcd_transfer_req_compl()
5430 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_transfer_req_compl()
5431 completed_reqs = ~tr_doorbell & hba->outstanding_reqs; in ufshcd_transfer_req_compl()
5432 WARN_ONCE(completed_reqs & ~hba->outstanding_reqs, in ufshcd_transfer_req_compl()
5434 hba->outstanding_reqs); in ufshcd_transfer_req_compl()
5435 hba->outstanding_reqs &= ~completed_reqs; in ufshcd_transfer_req_compl()
5436 spin_unlock_irqrestore(&hba->outstanding_lock, flags); in ufshcd_transfer_req_compl()
5439 __ufshcd_transfer_req_compl(hba, completed_reqs, in ufshcd_transfer_req_compl()
5447 int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask) in __ufshcd_write_ee_control() argument
5449 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in __ufshcd_write_ee_control()
5454 int ufshcd_write_ee_control(struct ufs_hba *hba) in ufshcd_write_ee_control() argument
5458 mutex_lock(&hba->ee_ctrl_mutex); in ufshcd_write_ee_control()
5459 err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask); in ufshcd_write_ee_control()
5460 mutex_unlock(&hba->ee_ctrl_mutex); in ufshcd_write_ee_control()
5462 dev_err(hba->dev, "%s: failed to write ee control %d\n", in ufshcd_write_ee_control()
5467 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask, in ufshcd_update_ee_control() argument
5473 mutex_lock(&hba->ee_ctrl_mutex); in ufshcd_update_ee_control()
5476 if (ee_ctrl_mask != hba->ee_ctrl_mask) in ufshcd_update_ee_control()
5477 err = __ufshcd_write_ee_control(hba, ee_ctrl_mask); in ufshcd_update_ee_control()
5480 hba->ee_ctrl_mask = ee_ctrl_mask; in ufshcd_update_ee_control()
5483 mutex_unlock(&hba->ee_ctrl_mutex); in ufshcd_update_ee_control()
5497 static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) in ufshcd_disable_ee() argument
5499 return ufshcd_update_ee_drv_mask(hba, 0, mask); in ufshcd_disable_ee()
5512 static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) in ufshcd_enable_ee() argument
5514 return ufshcd_update_ee_drv_mask(hba, mask, 0); in ufshcd_enable_ee()
5528 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) in ufshcd_enable_auto_bkops() argument
5532 if (hba->auto_bkops_enabled) in ufshcd_enable_auto_bkops()
5535 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, in ufshcd_enable_auto_bkops()
5538 dev_err(hba->dev, "%s: failed to enable bkops %d\n", in ufshcd_enable_auto_bkops()
5543 hba->auto_bkops_enabled = true; in ufshcd_enable_auto_bkops()
5544 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled"); in ufshcd_enable_auto_bkops()
5547 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_enable_auto_bkops()
5549 dev_err(hba->dev, "%s: failed to disable exception event %d\n", in ufshcd_enable_auto_bkops()
5567 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) in ufshcd_disable_auto_bkops() argument
5571 if (!hba->auto_bkops_enabled) in ufshcd_disable_auto_bkops()
5578 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_disable_auto_bkops()
5580 dev_err(hba->dev, "%s: failed to enable exception event %d\n", in ufshcd_disable_auto_bkops()
5585 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, in ufshcd_disable_auto_bkops()
5588 dev_err(hba->dev, "%s: failed to disable bkops %d\n", in ufshcd_disable_auto_bkops()
5590 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); in ufshcd_disable_auto_bkops()
5594 hba->auto_bkops_enabled = false; in ufshcd_disable_auto_bkops()
5595 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); in ufshcd_disable_auto_bkops()
5596 hba->is_urgent_bkops_lvl_checked = false; in ufshcd_disable_auto_bkops()
5610 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) in ufshcd_force_reset_auto_bkops() argument
5612 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) { in ufshcd_force_reset_auto_bkops()
5613 hba->auto_bkops_enabled = false; in ufshcd_force_reset_auto_bkops()
5614 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; in ufshcd_force_reset_auto_bkops()
5615 ufshcd_enable_auto_bkops(hba); in ufshcd_force_reset_auto_bkops()
5617 hba->auto_bkops_enabled = true; in ufshcd_force_reset_auto_bkops()
5618 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; in ufshcd_force_reset_auto_bkops()
5619 ufshcd_disable_auto_bkops(hba); in ufshcd_force_reset_auto_bkops()
5621 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT; in ufshcd_force_reset_auto_bkops()
5622 hba->is_urgent_bkops_lvl_checked = false; in ufshcd_force_reset_auto_bkops()
5625 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) in ufshcd_get_bkops_status() argument
5627 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_get_bkops_status()
5647 int ufshcd_bkops_ctrl(struct ufs_hba *hba, in ufshcd_bkops_ctrl() argument
5653 err = ufshcd_get_bkops_status(hba, &curr_status); in ufshcd_bkops_ctrl()
5655 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", in ufshcd_bkops_ctrl()
5659 dev_err(hba->dev, "%s: invalid BKOPS status %d\n", in ufshcd_bkops_ctrl()
5666 err = ufshcd_enable_auto_bkops(hba); in ufshcd_bkops_ctrl()
5668 err = ufshcd_disable_auto_bkops(hba); in ufshcd_bkops_ctrl()
5684 static int ufshcd_urgent_bkops(struct ufs_hba *hba) in ufshcd_urgent_bkops() argument
5686 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl); in ufshcd_urgent_bkops()
5689 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) in ufshcd_get_ee_status() argument
5691 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_get_ee_status()
5695 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba) in ufshcd_bkops_exception_event_handler() argument
5700 if (hba->is_urgent_bkops_lvl_checked) in ufshcd_bkops_exception_event_handler()
5703 err = ufshcd_get_bkops_status(hba, &curr_status); in ufshcd_bkops_exception_event_handler()
5705 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", in ufshcd_bkops_exception_event_handler()
5717 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n", in ufshcd_bkops_exception_event_handler()
5720 hba->urgent_bkops_lvl = curr_status; in ufshcd_bkops_exception_event_handler()
5721 hba->is_urgent_bkops_lvl_checked = true; in ufshcd_bkops_exception_event_handler()
5725 err = ufshcd_enable_auto_bkops(hba); in ufshcd_bkops_exception_event_handler()
5728 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", in ufshcd_bkops_exception_event_handler()
5732 static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn) in __ufshcd_wb_toggle() argument
5738 index = ufshcd_wb_get_query_index(hba); in __ufshcd_wb_toggle()
5739 return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL); in __ufshcd_wb_toggle()
5742 int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable) in ufshcd_wb_toggle() argument
5746 if (!ufshcd_is_wb_allowed(hba)) in ufshcd_wb_toggle()
5749 if (!(enable ^ hba->dev_info.wb_enabled)) in ufshcd_wb_toggle()
5752 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN); in ufshcd_wb_toggle()
5754 dev_err(hba->dev, "%s Write Booster %s failed %d\n", in ufshcd_wb_toggle()
5759 hba->dev_info.wb_enabled = enable; in ufshcd_wb_toggle()
5760 dev_dbg(hba->dev, "%s Write Booster %s\n", in ufshcd_wb_toggle()
5766 static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set) in ufshcd_wb_toggle_flush_during_h8() argument
5770 ret = __ufshcd_wb_toggle(hba, set, in ufshcd_wb_toggle_flush_during_h8()
5773 dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed: %d\n", in ufshcd_wb_toggle_flush_during_h8()
5777 dev_dbg(hba->dev, "%s WB-Buf Flush during H8 %s\n", in ufshcd_wb_toggle_flush_during_h8()
5781 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable) in ufshcd_wb_toggle_flush() argument
5785 if (!ufshcd_is_wb_allowed(hba) || in ufshcd_wb_toggle_flush()
5786 hba->dev_info.wb_buf_flush_enabled == enable) in ufshcd_wb_toggle_flush()
5789 ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN); in ufshcd_wb_toggle_flush()
5791 dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__, in ufshcd_wb_toggle_flush()
5796 hba->dev_info.wb_buf_flush_enabled = enable; in ufshcd_wb_toggle_flush()
5798 dev_dbg(hba->dev, "%s WB-Buf Flush %s\n", in ufshcd_wb_toggle_flush()
5802 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba, in ufshcd_wb_presrv_usrspc_keep_vcc_on() argument
5809 index = ufshcd_wb_get_query_index(hba); in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5810 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5814 dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n", in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5820 dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n", in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5825 if (avail_buf < hba->vps->wb_flush_threshold) in ufshcd_wb_presrv_usrspc_keep_vcc_on()
5831 static bool ufshcd_wb_need_flush(struct ufs_hba *hba) in ufshcd_wb_need_flush() argument
5837 if (!ufshcd_is_wb_allowed(hba)) in ufshcd_wb_need_flush()
5850 index = ufshcd_wb_get_query_index(hba); in ufshcd_wb_need_flush()
5851 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_wb_need_flush()
5855 dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n", in ufshcd_wb_need_flush()
5860 if (!hba->dev_info.b_presrv_uspc_en) { in ufshcd_wb_need_flush()
5866 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf); in ufshcd_wb_need_flush()
5871 struct ufs_hba *hba = container_of(to_delayed_work(work), in ufshcd_rpm_dev_flush_recheck_work() local
5880 ufshcd_rpm_get_sync(hba); in ufshcd_rpm_dev_flush_recheck_work()
5881 ufshcd_rpm_put_sync(hba); in ufshcd_rpm_dev_flush_recheck_work()
5893 struct ufs_hba *hba; in ufshcd_exception_event_handler() local
5896 hba = container_of(work, struct ufs_hba, eeh_work); in ufshcd_exception_event_handler()
5898 ufshcd_scsi_block_requests(hba); in ufshcd_exception_event_handler()
5899 err = ufshcd_get_ee_status(hba, &status); in ufshcd_exception_event_handler()
5901 dev_err(hba->dev, "%s: failed to get exception status %d\n", in ufshcd_exception_event_handler()
5906 trace_ufshcd_exception_event(dev_name(hba->dev), status); in ufshcd_exception_event_handler()
5908 if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS) in ufshcd_exception_event_handler()
5909 ufshcd_bkops_exception_event_handler(hba); in ufshcd_exception_event_handler()
5911 ufs_debugfs_exception_event(hba, status); in ufshcd_exception_event_handler()
5913 ufshcd_scsi_unblock_requests(hba); in ufshcd_exception_event_handler()
5918 static void ufshcd_complete_requests(struct ufs_hba *hba) in ufshcd_complete_requests() argument
5920 ufshcd_transfer_req_compl(hba, /*retry_requests=*/false); in ufshcd_complete_requests()
5921 ufshcd_tmc_handler(hba); in ufshcd_complete_requests()
5924 static void ufshcd_retry_aborted_requests(struct ufs_hba *hba) in ufshcd_retry_aborted_requests() argument
5926 ufshcd_transfer_req_compl(hba, /*retry_requests=*/true); in ufshcd_retry_aborted_requests()
5927 ufshcd_tmc_handler(hba); in ufshcd_retry_aborted_requests()
5937 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba) in ufshcd_quirk_dl_nac_errors() argument
5942 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5947 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR)) in ufshcd_quirk_dl_nac_errors()
5950 if ((hba->saved_err & DEVICE_FATAL_ERROR) || in ufshcd_quirk_dl_nac_errors()
5951 ((hba->saved_err & UIC_ERROR) && in ufshcd_quirk_dl_nac_errors()
5952 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) in ufshcd_quirk_dl_nac_errors()
5955 if ((hba->saved_err & UIC_ERROR) && in ufshcd_quirk_dl_nac_errors()
5956 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) { in ufshcd_quirk_dl_nac_errors()
5961 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5963 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5969 if ((hba->saved_err & INT_FATAL_ERRORS) || in ufshcd_quirk_dl_nac_errors()
5970 ((hba->saved_err & UIC_ERROR) && in ufshcd_quirk_dl_nac_errors()
5971 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) in ufshcd_quirk_dl_nac_errors()
5981 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5982 err = ufshcd_verify_dev_init(hba); in ufshcd_quirk_dl_nac_errors()
5983 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
5989 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR) in ufshcd_quirk_dl_nac_errors()
5990 hba->saved_err &= ~UIC_ERROR; in ufshcd_quirk_dl_nac_errors()
5992 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; in ufshcd_quirk_dl_nac_errors()
5993 if (!hba->saved_uic_err) in ufshcd_quirk_dl_nac_errors()
5997 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_quirk_dl_nac_errors()
6002 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) in ufshcd_is_saved_err_fatal() argument
6004 return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || in ufshcd_is_saved_err_fatal()
6005 (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); in ufshcd_is_saved_err_fatal()
6009 static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba) in ufshcd_schedule_eh_work() argument
6012 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { in ufshcd_schedule_eh_work()
6013 if (hba->force_reset || ufshcd_is_link_broken(hba) || in ufshcd_schedule_eh_work()
6014 ufshcd_is_saved_err_fatal(hba)) in ufshcd_schedule_eh_work()
6015 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; in ufshcd_schedule_eh_work()
6017 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; in ufshcd_schedule_eh_work()
6018 queue_work(hba->eh_wq, &hba->eh_work); in ufshcd_schedule_eh_work()
6022 static void ufshcd_force_error_recovery(struct ufs_hba *hba) in ufshcd_force_error_recovery() argument
6024 spin_lock_irq(hba->host->host_lock); in ufshcd_force_error_recovery()
6025 hba->force_reset = true; in ufshcd_force_error_recovery()
6026 ufshcd_schedule_eh_work(hba); in ufshcd_force_error_recovery()
6027 spin_unlock_irq(hba->host->host_lock); in ufshcd_force_error_recovery()
6030 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) in ufshcd_clk_scaling_allow() argument
6033 down_write(&hba->clk_scaling_lock); in ufshcd_clk_scaling_allow()
6034 hba->clk_scaling.is_allowed = allow; in ufshcd_clk_scaling_allow()
6035 up_write(&hba->clk_scaling_lock); in ufshcd_clk_scaling_allow()
6039 static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend) in ufshcd_clk_scaling_suspend() argument
6042 if (hba->clk_scaling.is_enabled) in ufshcd_clk_scaling_suspend()
6043 ufshcd_suspend_clkscaling(hba); in ufshcd_clk_scaling_suspend()
6044 ufshcd_clk_scaling_allow(hba, false); in ufshcd_clk_scaling_suspend()
6046 ufshcd_clk_scaling_allow(hba, true); in ufshcd_clk_scaling_suspend()
6047 if (hba->clk_scaling.is_enabled) in ufshcd_clk_scaling_suspend()
6048 ufshcd_resume_clkscaling(hba); in ufshcd_clk_scaling_suspend()
6052 static void ufshcd_err_handling_prepare(struct ufs_hba *hba) in ufshcd_err_handling_prepare() argument
6054 ufshcd_rpm_get_sync(hba); in ufshcd_err_handling_prepare()
6055 if (pm_runtime_status_suspended(&hba->sdev_ufs_device->sdev_gendev) || in ufshcd_err_handling_prepare()
6056 hba->is_sys_suspended) { in ufshcd_err_handling_prepare()
6064 ufshcd_setup_hba_vreg(hba, true); in ufshcd_err_handling_prepare()
6065 ufshcd_enable_irq(hba); in ufshcd_err_handling_prepare()
6066 ufshcd_setup_vreg(hba, true); in ufshcd_err_handling_prepare()
6067 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); in ufshcd_err_handling_prepare()
6068 ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); in ufshcd_err_handling_prepare()
6069 ufshcd_hold(hba, false); in ufshcd_err_handling_prepare()
6070 if (!ufshcd_is_clkgating_allowed(hba)) in ufshcd_err_handling_prepare()
6071 ufshcd_setup_clocks(hba, true); in ufshcd_err_handling_prepare()
6072 ufshcd_release(hba); in ufshcd_err_handling_prepare()
6073 pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM; in ufshcd_err_handling_prepare()
6074 ufshcd_vops_resume(hba, pm_op); in ufshcd_err_handling_prepare()
6076 ufshcd_hold(hba, false); in ufshcd_err_handling_prepare()
6077 if (ufshcd_is_clkscaling_supported(hba) && in ufshcd_err_handling_prepare()
6078 hba->clk_scaling.is_enabled) in ufshcd_err_handling_prepare()
6079 ufshcd_suspend_clkscaling(hba); in ufshcd_err_handling_prepare()
6080 ufshcd_clk_scaling_allow(hba, false); in ufshcd_err_handling_prepare()
6082 ufshcd_scsi_block_requests(hba); in ufshcd_err_handling_prepare()
6085 cancel_work_sync(&hba->eeh_work); in ufshcd_err_handling_prepare()
6088 static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) in ufshcd_err_handling_unprepare() argument
6090 ufshcd_scsi_unblock_requests(hba); in ufshcd_err_handling_unprepare()
6091 ufshcd_release(hba); in ufshcd_err_handling_unprepare()
6092 if (ufshcd_is_clkscaling_supported(hba)) in ufshcd_err_handling_unprepare()
6093 ufshcd_clk_scaling_suspend(hba, false); in ufshcd_err_handling_unprepare()
6094 ufshcd_rpm_put(hba); in ufshcd_err_handling_unprepare()
6097 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba) in ufshcd_err_handling_should_stop() argument
6099 return (!hba->is_powered || hba->shutting_down || in ufshcd_err_handling_should_stop()
6100 !hba->sdev_ufs_device || in ufshcd_err_handling_should_stop()
6101 hba->ufshcd_state == UFSHCD_STATE_ERROR || in ufshcd_err_handling_should_stop()
6102 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset || in ufshcd_err_handling_should_stop()
6103 ufshcd_is_link_broken(hba)))); in ufshcd_err_handling_should_stop()
6107 static void ufshcd_recover_pm_error(struct ufs_hba *hba) in ufshcd_recover_pm_error() argument
6109 struct Scsi_Host *shost = hba->host; in ufshcd_recover_pm_error()
6114 hba->is_sys_suspended = false; in ufshcd_recover_pm_error()
6119 ret = pm_runtime_set_active(&hba->sdev_ufs_device->sdev_gendev); in ufshcd_recover_pm_error()
6123 ret = pm_runtime_set_active(hba->dev); in ufshcd_recover_pm_error()
6140 static inline void ufshcd_recover_pm_error(struct ufs_hba *hba) in ufshcd_recover_pm_error() argument
6145 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba) in ufshcd_is_pwr_mode_restore_needed() argument
6147 struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info; in ufshcd_is_pwr_mode_restore_needed()
6150 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode); in ufshcd_is_pwr_mode_restore_needed()
6167 struct ufs_hba *hba; in ufshcd_err_handler() local
6177 hba = container_of(work, struct ufs_hba, eh_work); in ufshcd_err_handler()
6179 down(&hba->host_sem); in ufshcd_err_handler()
6180 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6181 if (ufshcd_err_handling_should_stop(hba)) { in ufshcd_err_handler()
6182 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) in ufshcd_err_handler()
6183 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_err_handler()
6184 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6185 up(&hba->host_sem); in ufshcd_err_handler()
6188 ufshcd_set_eh_in_progress(hba); in ufshcd_err_handler()
6189 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6190 ufshcd_err_handling_prepare(hba); in ufshcd_err_handler()
6192 ufshcd_complete_requests(hba); in ufshcd_err_handler()
6193 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6194 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) in ufshcd_err_handler()
6195 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_err_handler()
6200 if (ufshcd_err_handling_should_stop(hba)) in ufshcd_err_handler()
6203 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { in ufshcd_err_handler()
6206 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6208 ret = ufshcd_quirk_dl_nac_errors(hba); in ufshcd_err_handler()
6209 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6210 if (!ret && ufshcd_err_handling_should_stop(hba)) in ufshcd_err_handler()
6214 if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) || in ufshcd_err_handler()
6215 (hba->saved_uic_err && in ufshcd_err_handler()
6216 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { in ufshcd_err_handler()
6217 bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR); in ufshcd_err_handler()
6219 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6220 ufshcd_print_host_state(hba); in ufshcd_err_handler()
6221 ufshcd_print_pwr_info(hba); in ufshcd_err_handler()
6222 ufshcd_print_evt_hist(hba); in ufshcd_err_handler()
6223 ufshcd_print_tmrs(hba, hba->outstanding_tasks); in ufshcd_err_handler()
6225 if (ufshcd_use_mcq_hooks(hba)) in ufshcd_err_handler()
6226 trace_android_vh_ufs_mcq_print_trs(hba, pr_prdt); in ufshcd_err_handler()
6228 ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt); in ufshcd_err_handler()
6229 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6237 if (hba->force_reset || ufshcd_is_link_broken(hba) || in ufshcd_err_handler()
6238 ufshcd_is_saved_err_fatal(hba) || in ufshcd_err_handler()
6239 ((hba->saved_err & UIC_ERROR) && in ufshcd_err_handler()
6240 (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR | in ufshcd_err_handler()
6250 if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) { in ufshcd_err_handler()
6251 hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR; in ufshcd_err_handler()
6252 if (!hba->saved_uic_err) in ufshcd_err_handler()
6253 hba->saved_err &= ~UIC_ERROR; in ufshcd_err_handler()
6254 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6255 if (ufshcd_is_pwr_mode_restore_needed(hba)) in ufshcd_err_handler()
6257 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6258 if (!hba->saved_err && !needs_restore) in ufshcd_err_handler()
6262 hba->silence_err_logs = true; in ufshcd_err_handler()
6264 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6265 outstanding_reqs = &hba->outstanding_reqs; in ufshcd_err_handler()
6266 nr_tag = hba->nutrs; in ufshcd_err_handler()
6267 trace_android_vh_ufs_mcq_get_outstanding_reqs(hba, in ufshcd_err_handler()
6271 if (ufshcd_try_to_abort_task(hba, tag)) { in ufshcd_err_handler()
6278 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { in ufshcd_err_handler()
6279 if (ufshcd_clear_tm_cmd(hba, tag)) { in ufshcd_err_handler()
6286 ufshcd_retry_aborted_requests(hba); in ufshcd_err_handler()
6288 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6289 hba->silence_err_logs = false; in ufshcd_err_handler()
6300 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6305 down_write(&hba->clk_scaling_lock); in ufshcd_err_handler()
6306 hba->force_pmc = true; in ufshcd_err_handler()
6307 pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info)); in ufshcd_err_handler()
6310 dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n", in ufshcd_err_handler()
6313 hba->force_pmc = false; in ufshcd_err_handler()
6314 ufshcd_print_pwr_info(hba); in ufshcd_err_handler()
6315 up_write(&hba->clk_scaling_lock); in ufshcd_err_handler()
6316 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6322 hba->force_reset = false; in ufshcd_err_handler()
6323 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6324 trace_android_vh_ufs_mcq_retry_complete(hba); in ufshcd_err_handler()
6325 err = ufshcd_reset_and_restore(hba); in ufshcd_err_handler()
6327 dev_err(hba->dev, "%s: reset and restore failed with err %d\n", in ufshcd_err_handler()
6330 ufshcd_recover_pm_error(hba); in ufshcd_err_handler()
6331 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_err_handler()
6336 if (hba->ufshcd_state == UFSHCD_STATE_RESET) in ufshcd_err_handler()
6337 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_err_handler()
6338 if (hba->saved_err || hba->saved_uic_err) in ufshcd_err_handler()
6339 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x", in ufshcd_err_handler()
6340 __func__, hba->saved_err, hba->saved_uic_err); in ufshcd_err_handler()
6342 ufshcd_clear_eh_in_progress(hba); in ufshcd_err_handler()
6343 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_err_handler()
6344 ufshcd_err_handling_unprepare(hba); in ufshcd_err_handler()
6345 up(&hba->host_sem); in ufshcd_err_handler()
6356 static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba) in ufshcd_update_uic_error() argument
6362 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); in ufshcd_update_uic_error()
6365 ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg); in ufshcd_update_uic_error()
6371 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", in ufshcd_update_uic_error()
6378 hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR; in ufshcd_update_uic_error()
6379 if (hba->uic_async_done && hba->active_uic_cmd) in ufshcd_update_uic_error()
6380 cmd = hba->active_uic_cmd; in ufshcd_update_uic_error()
6386 hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR; in ufshcd_update_uic_error()
6392 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); in ufshcd_update_uic_error()
6395 ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg); in ufshcd_update_uic_error()
6398 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; in ufshcd_update_uic_error()
6399 else if (hba->dev_quirks & in ufshcd_update_uic_error()
6402 hba->uic_error |= in ufshcd_update_uic_error()
6405 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; in ufshcd_update_uic_error()
6411 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); in ufshcd_update_uic_error()
6414 ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg); in ufshcd_update_uic_error()
6415 hba->uic_error |= UFSHCD_UIC_NL_ERROR; in ufshcd_update_uic_error()
6419 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); in ufshcd_update_uic_error()
6422 ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg); in ufshcd_update_uic_error()
6423 hba->uic_error |= UFSHCD_UIC_TL_ERROR; in ufshcd_update_uic_error()
6427 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); in ufshcd_update_uic_error()
6430 ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg); in ufshcd_update_uic_error()
6431 hba->uic_error |= UFSHCD_UIC_DME_ERROR; in ufshcd_update_uic_error()
6435 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", in ufshcd_update_uic_error()
6436 __func__, hba->uic_error); in ufshcd_update_uic_error()
6449 static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) in ufshcd_check_errors() argument
6454 spin_lock(hba->host->host_lock); in ufshcd_check_errors()
6455 hba->errors |= UFSHCD_ERROR_MASK & intr_status; in ufshcd_check_errors()
6457 if (hba->errors & INT_FATAL_ERRORS) { in ufshcd_check_errors()
6458 ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR, in ufshcd_check_errors()
6459 hba->errors); in ufshcd_check_errors()
6463 if (hba->errors & UIC_ERROR) { in ufshcd_check_errors()
6464 hba->uic_error = 0; in ufshcd_check_errors()
6465 retval = ufshcd_update_uic_error(hba); in ufshcd_check_errors()
6466 if (hba->uic_error) in ufshcd_check_errors()
6470 if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) { in ufshcd_check_errors()
6471 dev_err(hba->dev, in ufshcd_check_errors()
6473 __func__, (hba->errors & UIC_HIBERNATE_ENTER) ? in ufshcd_check_errors()
6475 hba->errors, ufshcd_get_upmcrs(hba)); in ufshcd_check_errors()
6476 ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR, in ufshcd_check_errors()
6477 hba->errors); in ufshcd_check_errors()
6478 ufshcd_set_link_broken(hba); in ufshcd_check_errors()
6482 trace_android_vh_ufs_check_int_errors(hba, queue_eh_work); in ufshcd_check_errors()
6489 hba->saved_err |= hba->errors; in ufshcd_check_errors()
6490 hba->saved_uic_err |= hba->uic_error; in ufshcd_check_errors()
6493 if ((hba->saved_err & in ufshcd_check_errors()
6495 (hba->saved_uic_err && in ufshcd_check_errors()
6496 (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) { in ufshcd_check_errors()
6497 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n", in ufshcd_check_errors()
6498 __func__, hba->saved_err, in ufshcd_check_errors()
6499 hba->saved_uic_err); in ufshcd_check_errors()
6500 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, in ufshcd_check_errors()
6502 ufshcd_print_pwr_info(hba); in ufshcd_check_errors()
6504 ufshcd_schedule_eh_work(hba); in ufshcd_check_errors()
6513 hba->errors = 0; in ufshcd_check_errors()
6514 hba->uic_error = 0; in ufshcd_check_errors()
6515 spin_unlock(hba->host->host_lock); in ufshcd_check_errors()
6527 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) in ufshcd_tmc_handler() argument
6533 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_tmc_handler()
6534 pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); in ufshcd_tmc_handler()
6535 issued = hba->outstanding_tasks & ~pending; in ufshcd_tmc_handler()
6536 for_each_set_bit(tag, &issued, hba->nutmrs) { in ufshcd_tmc_handler()
6537 struct request *req = hba->tmf_rqs[tag]; in ufshcd_tmc_handler()
6543 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_tmc_handler()
6557 static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) in ufshcd_sl_intr() argument
6562 retval |= ufshcd_uic_cmd_compl(hba, intr_status); in ufshcd_sl_intr()
6564 if (intr_status & UFSHCD_ERROR_MASK || hba->errors) in ufshcd_sl_intr()
6565 retval |= ufshcd_check_errors(hba, intr_status); in ufshcd_sl_intr()
6568 retval |= ufshcd_tmc_handler(hba); in ufshcd_sl_intr()
6571 retval |= ufshcd_transfer_req_compl(hba, /*retry_requests=*/false); in ufshcd_sl_intr()
6573 trace_android_vh_ufs_mcq_handler(hba, intr_status, &retval); in ufshcd_sl_intr()
6591 struct ufs_hba *hba = __hba; in ufshcd_intr() local
6592 int retries = hba->nutrs; in ufshcd_intr()
6595 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); in ufshcd_intr()
6596 hba->ufs_stats.last_intr_status = intr_status; in ufshcd_intr()
6597 hba->ufs_stats.last_intr_ts = ktime_get(); in ufshcd_intr()
6607 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); in ufshcd_intr()
6608 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); in ufshcd_intr()
6610 retval |= ufshcd_sl_intr(hba, enabled_intr_status); in ufshcd_intr()
6612 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); in ufshcd_intr()
6615 has_outstanding = hba->outstanding_reqs != 0; in ufshcd_intr()
6616 trace_android_vh_ufs_mcq_has_oustanding_reqs(hba, &has_outstanding); in ufshcd_intr()
6620 has_outstanding) && !ufshcd_eh_in_progress(hba)) { in ufshcd_intr()
6621 dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n", in ufshcd_intr()
6624 hba->ufs_stats.last_intr_status, in ufshcd_intr()
6626 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); in ufshcd_intr()
6632 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) in ufshcd_clear_tm_cmd() argument
6638 if (!test_bit(tag, &hba->outstanding_tasks)) in ufshcd_clear_tm_cmd()
6641 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_clear_tm_cmd()
6642 ufshcd_utmrl_clear(hba, tag); in ufshcd_clear_tm_cmd()
6643 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_clear_tm_cmd()
6646 err = ufshcd_wait_for_register(hba, in ufshcd_clear_tm_cmd()
6653 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, in __ufshcd_issue_tm_cmd() argument
6656 struct request_queue *q = hba->tmf_queue; in __ufshcd_issue_tm_cmd()
6657 struct Scsi_Host *host = hba->host; in __ufshcd_issue_tm_cmd()
6671 ufshcd_hold(hba, false); in __ufshcd_issue_tm_cmd()
6676 hba->tmf_rqs[req->tag] = req; in __ufshcd_issue_tm_cmd()
6679 memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq)); in __ufshcd_issue_tm_cmd()
6680 ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function); in __ufshcd_issue_tm_cmd()
6683 __set_bit(task_tag, &hba->outstanding_tasks); in __ufshcd_issue_tm_cmd()
6685 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); in __ufshcd_issue_tm_cmd()
6691 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND); in __ufshcd_issue_tm_cmd()
6697 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR); in __ufshcd_issue_tm_cmd()
6698 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", in __ufshcd_issue_tm_cmd()
6700 if (ufshcd_clear_tm_cmd(hba, task_tag)) in __ufshcd_issue_tm_cmd()
6701 dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n", in __ufshcd_issue_tm_cmd()
6706 memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq)); in __ufshcd_issue_tm_cmd()
6708 ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP); in __ufshcd_issue_tm_cmd()
6711 spin_lock_irqsave(hba->host->host_lock, flags); in __ufshcd_issue_tm_cmd()
6712 hba->tmf_rqs[req->tag] = NULL; in __ufshcd_issue_tm_cmd()
6713 __clear_bit(task_tag, &hba->outstanding_tasks); in __ufshcd_issue_tm_cmd()
6714 spin_unlock_irqrestore(hba->host->host_lock, flags); in __ufshcd_issue_tm_cmd()
6716 ufshcd_release(hba); in __ufshcd_issue_tm_cmd()
6732 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, in ufshcd_issue_tm_cmd() argument
6754 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function); in ufshcd_issue_tm_cmd()
6760 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", in ufshcd_issue_tm_cmd()
6785 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, in ufshcd_issue_devman_upiu_cmd() argument
6793 const u32 tag = hba->reserved_slot; in ufshcd_issue_devman_upiu_cmd()
6799 lockdep_assert_held(&hba->dev_cmd.lock); in ufshcd_issue_devman_upiu_cmd()
6801 down_read(&hba->clk_scaling_lock); in ufshcd_issue_devman_upiu_cmd()
6803 lrbp = &hba->lrb[tag]; in ufshcd_issue_devman_upiu_cmd()
6812 trace_android_vh_ufs_mcq_set_sqid(hba, 0, lrbp); in ufshcd_issue_devman_upiu_cmd()
6815 hba->dev_cmd.type = cmd_type; in ufshcd_issue_devman_upiu_cmd()
6817 if (hba->ufs_version <= ufshci_version(1, 1)) in ufshcd_issue_devman_upiu_cmd()
6840 hba->dev_cmd.complete = &wait; in ufshcd_issue_devman_upiu_cmd()
6842 ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); in ufshcd_issue_devman_upiu_cmd()
6844 ufshcd_send_command(hba, tag); in ufshcd_issue_devman_upiu_cmd()
6850 ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT); in ufshcd_issue_devman_upiu_cmd()
6863 dev_warn(hba->dev, in ufshcd_issue_devman_upiu_cmd()
6870 ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, in ufshcd_issue_devman_upiu_cmd()
6873 up_read(&hba->clk_scaling_lock); in ufshcd_issue_devman_upiu_cmd()
6892 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, in ufshcd_exec_raw_upiu_cmd() argument
6910 ufshcd_hold(hba, false); in ufshcd_exec_raw_upiu_cmd()
6911 mutex_lock(&hba->dev_cmd.lock); in ufshcd_exec_raw_upiu_cmd()
6912 err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu, in ufshcd_exec_raw_upiu_cmd()
6915 mutex_unlock(&hba->dev_cmd.lock); in ufshcd_exec_raw_upiu_cmd()
6916 ufshcd_release(hba); in ufshcd_exec_raw_upiu_cmd()
6925 err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f); in ufshcd_exec_raw_upiu_cmd()
6931 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__, in ufshcd_exec_raw_upiu_cmd()
6958 struct ufs_hba *hba; in ufshcd_eh_device_reset_handler() local
6964 hba = shost_priv(host); in ufshcd_eh_device_reset_handler()
6967 err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp); in ufshcd_eh_device_reset_handler()
6974 if (ufshcd_use_mcq_hooks(hba)) { in ufshcd_eh_device_reset_handler()
6975 trace_android_vh_ufs_mcq_clear_pending(hba, &err); in ufshcd_eh_device_reset_handler()
6978 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { in ufshcd_eh_device_reset_handler()
6979 if (hba->lrb[pos].lun == lun) { in ufshcd_eh_device_reset_handler()
6980 err = ufshcd_clear_cmd(hba, pos); in ufshcd_eh_device_reset_handler()
6983 __ufshcd_transfer_req_compl(hba, 1U << pos, false); in ufshcd_eh_device_reset_handler()
6989 hba->req_abort_count = 0; in ufshcd_eh_device_reset_handler()
6990 ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err); in ufshcd_eh_device_reset_handler()
6994 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); in ufshcd_eh_device_reset_handler()
7000 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) in ufshcd_set_req_abort_skip() argument
7005 for_each_set_bit(tag, &bitmap, hba->nutrs) { in ufshcd_set_req_abort_skip()
7006 lrbp = &hba->lrb[tag]; in ufshcd_set_req_abort_skip()
7024 static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) in ufshcd_try_to_abort_task() argument
7026 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; in ufshcd_try_to_abort_task()
7034 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, in ufshcd_try_to_abort_task()
7038 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n", in ufshcd_try_to_abort_task()
7046 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", in ufshcd_try_to_abort_task()
7049 if (ufshcd_use_mcq_hooks(hba)) { in ufshcd_try_to_abort_task()
7050 trace_android_vh_ufs_mcq_get_outstanding_reqs(hba, in ufshcd_try_to_abort_task()
7058 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_try_to_abort_task()
7067 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", in ufshcd_try_to_abort_task()
7071 dev_err(hba->dev, in ufshcd_try_to_abort_task()
7085 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, in ufshcd_try_to_abort_task()
7090 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n", in ufshcd_try_to_abort_task()
7096 err = ufshcd_clear_cmd(hba, tag); in ufshcd_try_to_abort_task()
7098 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", in ufshcd_try_to_abort_task()
7114 struct ufs_hba *hba = shost_priv(host); in ufshcd_abort() local
7116 struct ufshcd_lrb *lrbp = &hba->lrb[tag]; in ufshcd_abort()
7121 trace_android_vh_ufs_mcq_map_tag(hba, in ufshcd_abort()
7126 if (ufshcd_use_mcq_hooks(hba)) { in ufshcd_abort()
7131 ufshcd_hold(hba, false); in ufshcd_abort()
7132 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); in ufshcd_abort()
7134 if (!(test_bit(tag, &hba->outstanding_reqs))) { in ufshcd_abort()
7135 dev_err(hba->dev, in ufshcd_abort()
7137 __func__, tag, hba->outstanding_reqs, reg); in ufshcd_abort()
7142 dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag); in ufshcd_abort()
7152 if (!hba->req_abort_count) { in ufshcd_abort()
7153 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag); in ufshcd_abort()
7154 ufshcd_print_evt_hist(hba); in ufshcd_abort()
7155 ufshcd_print_host_state(hba); in ufshcd_abort()
7156 ufshcd_print_pwr_info(hba); in ufshcd_abort()
7157 ufshcd_print_trs(hba, 1 << tag, true); in ufshcd_abort()
7159 ufshcd_print_trs(hba, 1 << tag, false); in ufshcd_abort()
7161 hba->req_abort_count++; in ufshcd_abort()
7164 dev_err(hba->dev, in ufshcd_abort()
7167 __ufshcd_transfer_req_compl(hba, 1UL << tag, /*retry_requests=*/false); in ufshcd_abort()
7180 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); in ufshcd_abort()
7183 hba->force_reset = true; in ufshcd_abort()
7184 ufshcd_schedule_eh_work(hba); in ufshcd_abort()
7191 dev_err(hba->dev, "%s: skipping abort\n", __func__); in ufshcd_abort()
7192 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); in ufshcd_abort()
7196 err = ufshcd_try_to_abort_task(hba, tag); in ufshcd_abort()
7198 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); in ufshcd_abort()
7199 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); in ufshcd_abort()
7209 ufshcd_release(hba); in ufshcd_abort()
7223 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) in ufshcd_host_reset_and_restore() argument
7231 ufshpb_reset_host(hba); in ufshcd_host_reset_and_restore()
7232 ufshcd_hba_stop(hba); in ufshcd_host_reset_and_restore()
7233 hba->silence_err_logs = true; in ufshcd_host_reset_and_restore()
7234 ufshcd_retry_aborted_requests(hba); in ufshcd_host_reset_and_restore()
7235 hba->silence_err_logs = false; in ufshcd_host_reset_and_restore()
7238 ufshcd_scale_clks(hba, true); in ufshcd_host_reset_and_restore()
7240 err = ufshcd_hba_enable(hba); in ufshcd_host_reset_and_restore()
7244 err = ufshcd_probe_hba(hba, false); in ufshcd_host_reset_and_restore()
7247 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); in ufshcd_host_reset_and_restore()
7248 ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err); in ufshcd_host_reset_and_restore()
7261 static int ufshcd_reset_and_restore(struct ufs_hba *hba) in ufshcd_reset_and_restore() argument
7273 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7274 saved_err = hba->saved_err; in ufshcd_reset_and_restore()
7275 saved_uic_err = hba->saved_uic_err; in ufshcd_reset_and_restore()
7276 hba->saved_err = 0; in ufshcd_reset_and_restore()
7277 hba->saved_uic_err = 0; in ufshcd_reset_and_restore()
7278 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7282 ufshcd_device_reset(hba); in ufshcd_reset_and_restore()
7284 err = ufshcd_host_reset_and_restore(hba); in ufshcd_reset_and_restore()
7287 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7292 scsi_report_bus_reset(hba->host, 0); in ufshcd_reset_and_restore()
7294 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_reset_and_restore()
7295 hba->saved_err |= saved_err; in ufshcd_reset_and_restore()
7296 hba->saved_uic_err |= saved_uic_err; in ufshcd_reset_and_restore()
7298 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_reset_and_restore()
7313 struct ufs_hba *hba; in ufshcd_eh_host_reset_handler() local
7315 hba = shost_priv(cmd->device->host); in ufshcd_eh_host_reset_handler()
7324 if (hba->pm_op_in_progress) { in ufshcd_eh_host_reset_handler()
7325 if (ufshcd_link_recovery(hba)) in ufshcd_eh_host_reset_handler()
7331 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7332 hba->force_reset = true; in ufshcd_eh_host_reset_handler()
7333 ufshcd_schedule_eh_work(hba); in ufshcd_eh_host_reset_handler()
7334 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__); in ufshcd_eh_host_reset_handler()
7335 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7337 flush_work(&hba->eh_work); in ufshcd_eh_host_reset_handler()
7339 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7340 if (hba->ufshcd_state == UFSHCD_STATE_ERROR) in ufshcd_eh_host_reset_handler()
7342 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_eh_host_reset_handler()
7401 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, in ufshcd_find_max_sup_active_icc_level() argument
7406 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || in ufshcd_find_max_sup_active_icc_level()
7407 !hba->vreg_info.vccq2) { in ufshcd_find_max_sup_active_icc_level()
7414 dev_dbg(hba->dev, in ufshcd_find_max_sup_active_icc_level()
7420 if (hba->vreg_info.vcc->max_uA) in ufshcd_find_max_sup_active_icc_level()
7422 hba->vreg_info.vcc->max_uA, in ufshcd_find_max_sup_active_icc_level()
7426 if (hba->vreg_info.vccq->max_uA) in ufshcd_find_max_sup_active_icc_level()
7428 hba->vreg_info.vccq->max_uA, in ufshcd_find_max_sup_active_icc_level()
7432 if (hba->vreg_info.vccq2->max_uA) in ufshcd_find_max_sup_active_icc_level()
7434 hba->vreg_info.vccq2->max_uA, in ufshcd_find_max_sup_active_icc_level()
7441 static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba) in ufshcd_set_active_icc_lvl() argument
7444 int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER]; in ufshcd_set_active_icc_lvl()
7452 ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0, in ufshcd_set_active_icc_lvl()
7455 dev_err(hba->dev, in ufshcd_set_active_icc_lvl()
7461 icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf, in ufshcd_set_active_icc_lvl()
7463 dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level); in ufshcd_set_active_icc_lvl()
7465 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in ufshcd_set_active_icc_lvl()
7469 dev_err(hba->dev, in ufshcd_set_active_icc_lvl()
7513 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) in ufshcd_scsi_add_wlus() argument
7518 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
7520 if (IS_ERR(hba->sdev_ufs_device)) { in ufshcd_scsi_add_wlus()
7521 ret = PTR_ERR(hba->sdev_ufs_device); in ufshcd_scsi_add_wlus()
7522 hba->sdev_ufs_device = NULL; in ufshcd_scsi_add_wlus()
7525 scsi_device_put(hba->sdev_ufs_device); in ufshcd_scsi_add_wlus()
7527 hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
7529 if (IS_ERR(hba->sdev_rpmb)) { in ufshcd_scsi_add_wlus()
7530 ret = PTR_ERR(hba->sdev_rpmb); in ufshcd_scsi_add_wlus()
7533 ufshcd_blk_pm_runtime_init(hba->sdev_rpmb); in ufshcd_scsi_add_wlus()
7534 scsi_device_put(hba->sdev_rpmb); in ufshcd_scsi_add_wlus()
7536 sdev_boot = __scsi_add_device(hba->host, 0, 0, in ufshcd_scsi_add_wlus()
7539 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__); in ufshcd_scsi_add_wlus()
7547 scsi_remove_device(hba->sdev_ufs_device); in ufshcd_scsi_add_wlus()
7552 static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf) in ufshcd_wb_probe() argument
7554 struct ufs_dev_info *dev_info = &hba->dev_info; in ufshcd_wb_probe()
7559 if (!ufshcd_is_wb_allowed(hba)) in ufshcd_wb_probe()
7568 (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))) in ufshcd_wb_probe()
7571 if (hba->desc_size[QUERY_DESC_IDN_DEVICE] < in ufshcd_wb_probe()
7598 ufshcd_read_unit_desc_param(hba, in ufshcd_wb_probe()
7615 hba->caps &= ~UFSHCD_CAP_WB_EN; in ufshcd_wb_probe()
7618 static void ufshcd_temp_notif_probe(struct ufs_hba *hba, u8 *desc_buf) in ufshcd_temp_notif_probe() argument
7620 struct ufs_dev_info *dev_info = &hba->dev_info; in ufshcd_temp_notif_probe()
7624 if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300) in ufshcd_temp_notif_probe()
7636 ufshcd_enable_ee(hba, mask); in ufshcd_temp_notif_probe()
7637 ufs_hwmon_probe(hba, mask); in ufshcd_temp_notif_probe()
7641 void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups) in ufshcd_fixup_dev_quirks() argument
7644 struct ufs_dev_info *dev_info = &hba->dev_info; in ufshcd_fixup_dev_quirks()
7655 hba->dev_quirks |= f->quirk; in ufshcd_fixup_dev_quirks()
7660 static void ufs_fixup_device_setup(struct ufs_hba *hba) in ufs_fixup_device_setup() argument
7663 ufshcd_fixup_dev_quirks(hba, ufs_fixups); in ufs_fixup_device_setup()
7666 ufshcd_vops_fixup_dev_quirks(hba); in ufs_fixup_device_setup()
7669 static int ufs_get_device_desc(struct ufs_hba *hba) in ufs_get_device_desc() argument
7675 struct ufs_dev_info *dev_info = &hba->dev_info; in ufs_get_device_desc()
7683 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf, in ufs_get_device_desc()
7684 hba->desc_size[QUERY_DESC_IDN_DEVICE]); in ufs_get_device_desc()
7686 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", in ufs_get_device_desc()
7709 ufshpb_get_dev_info(hba, desc_buf); in ufs_get_device_desc()
7711 if (!ufshpb_is_legacy(hba)) in ufs_get_device_desc()
7712 err = ufshcd_query_flag_retry(hba, in ufs_get_device_desc()
7717 if (ufshpb_is_legacy(hba) || (!err && hpb_en)) in ufs_get_device_desc()
7721 err = ufshcd_read_string_desc(hba, model_index, in ufs_get_device_desc()
7724 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", in ufs_get_device_desc()
7729 hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] + in ufs_get_device_desc()
7732 ufs_fixup_device_setup(hba); in ufs_get_device_desc()
7734 ufshcd_wb_probe(hba, desc_buf); in ufs_get_device_desc()
7736 ufshcd_temp_notif_probe(hba, desc_buf); in ufs_get_device_desc()
7749 static void ufs_put_device_desc(struct ufs_hba *hba) in ufs_put_device_desc() argument
7751 struct ufs_dev_info *dev_info = &hba->dev_info; in ufs_put_device_desc()
7768 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba) in ufshcd_tune_pa_tactivate() argument
7773 ret = ufshcd_dme_peer_get(hba, in ufshcd_tune_pa_tactivate()
7785 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), in ufshcd_tune_pa_tactivate()
7803 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba) in ufshcd_tune_pa_hibern8time() argument
7809 ret = ufshcd_dme_get(hba, in ufshcd_tune_pa_hibern8time()
7816 ret = ufshcd_dme_peer_get(hba, in ufshcd_tune_pa_hibern8time()
7828 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), in ufshcd_tune_pa_hibern8time()
7845 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba) in ufshcd_quirk_tune_host_pa_tactivate() argument
7853 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), in ufshcd_quirk_tune_host_pa_tactivate()
7858 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), in ufshcd_quirk_tune_host_pa_tactivate()
7865 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d", in ufshcd_quirk_tune_host_pa_tactivate()
7872 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d", in ufshcd_quirk_tune_host_pa_tactivate()
7877 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); in ufshcd_quirk_tune_host_pa_tactivate()
7881 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), in ufshcd_quirk_tune_host_pa_tactivate()
7896 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), in ufshcd_quirk_tune_host_pa_tactivate()
7904 static void ufshcd_tune_unipro_params(struct ufs_hba *hba) in ufshcd_tune_unipro_params() argument
7906 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) { in ufshcd_tune_unipro_params()
7907 ufshcd_tune_pa_tactivate(hba); in ufshcd_tune_unipro_params()
7908 ufshcd_tune_pa_hibern8time(hba); in ufshcd_tune_unipro_params()
7911 ufshcd_vops_apply_dev_quirks(hba); in ufshcd_tune_unipro_params()
7913 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE) in ufshcd_tune_unipro_params()
7915 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10); in ufshcd_tune_unipro_params()
7917 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) in ufshcd_tune_unipro_params()
7918 ufshcd_quirk_tune_host_pa_tactivate(hba); in ufshcd_tune_unipro_params()
7921 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) in ufshcd_clear_dbg_ufs_stats() argument
7923 hba->ufs_stats.hibern8_exit_cnt = 0; in ufshcd_clear_dbg_ufs_stats()
7924 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); in ufshcd_clear_dbg_ufs_stats()
7925 hba->req_abort_count = 0; in ufshcd_clear_dbg_ufs_stats()
7928 static int ufshcd_device_geo_params_init(struct ufs_hba *hba) in ufshcd_device_geo_params_init() argument
7934 buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY]; in ufshcd_device_geo_params_init()
7941 err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0, in ufshcd_device_geo_params_init()
7944 dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n", in ufshcd_device_geo_params_init()
7950 hba->dev_info.max_lu_supported = 32; in ufshcd_device_geo_params_init()
7952 hba->dev_info.max_lu_supported = 8; in ufshcd_device_geo_params_init()
7954 if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >= in ufshcd_device_geo_params_init()
7956 ufshpb_get_geo_info(hba, desc_buf); in ufshcd_device_geo_params_init()
7983 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk) in ufshcd_parse_dev_ref_clk_freq() argument
7989 hba->dev_ref_clk_freq = in ufshcd_parse_dev_ref_clk_freq()
7992 if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL) in ufshcd_parse_dev_ref_clk_freq()
7993 dev_err(hba->dev, in ufshcd_parse_dev_ref_clk_freq()
7997 static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba) in ufshcd_set_dev_ref_clk() argument
8001 u32 freq = hba->dev_ref_clk_freq; in ufshcd_set_dev_ref_clk()
8003 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, in ufshcd_set_dev_ref_clk()
8007 dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n", in ufshcd_set_dev_ref_clk()
8015 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, in ufshcd_set_dev_ref_clk()
8019 dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n", in ufshcd_set_dev_ref_clk()
8024 dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n", in ufshcd_set_dev_ref_clk()
8031 static int ufshcd_device_params_init(struct ufs_hba *hba) in ufshcd_device_params_init() argument
8038 hba->desc_size[i] = QUERY_DESC_MAX_SIZE; in ufshcd_device_params_init()
8041 ret = ufshcd_device_geo_params_init(hba); in ufshcd_device_params_init()
8046 ret = ufs_get_device_desc(hba); in ufshcd_device_params_init()
8048 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", in ufshcd_device_params_init()
8053 ufshcd_get_ref_clk_gating_wait(hba); in ufshcd_device_params_init()
8055 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, in ufshcd_device_params_init()
8057 hba->dev_info.f_power_on_wp_en = flag; in ufshcd_device_params_init()
8060 if (ufshcd_get_max_pwr_mode(hba)) in ufshcd_device_params_init()
8061 dev_err(hba->dev, in ufshcd_device_params_init()
8072 static int ufshcd_add_lus(struct ufs_hba *hba) in ufshcd_add_lus() argument
8077 ret = ufshcd_scsi_add_wlus(hba); in ufshcd_add_lus()
8082 if (ufshcd_is_clkscaling_supported(hba)) { in ufshcd_add_lus()
8083 memcpy(&hba->clk_scaling.saved_pwr_info.info, in ufshcd_add_lus()
8084 &hba->pwr_info, in ufshcd_add_lus()
8086 hba->clk_scaling.saved_pwr_info.is_valid = true; in ufshcd_add_lus()
8087 hba->clk_scaling.is_allowed = true; in ufshcd_add_lus()
8089 ret = ufshcd_devfreq_init(hba); in ufshcd_add_lus()
8093 hba->clk_scaling.is_enabled = true; in ufshcd_add_lus()
8094 ufshcd_init_clk_scaling_sysfs(hba); in ufshcd_add_lus()
8097 ufs_bsg_probe(hba); in ufshcd_add_lus()
8098 ufshpb_init(hba); in ufshcd_add_lus()
8099 scsi_scan_host(hba->host); in ufshcd_add_lus()
8112 static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) in ufshcd_probe_hba() argument
8118 hba->ufshcd_state = UFSHCD_STATE_RESET; in ufshcd_probe_hba()
8120 ret = ufshcd_link_startup(hba); in ufshcd_probe_hba()
8124 if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION) in ufshcd_probe_hba()
8128 ufshcd_clear_dbg_ufs_stats(hba); in ufshcd_probe_hba()
8131 ufshcd_set_link_active(hba); in ufshcd_probe_hba()
8134 ret = ufshcd_verify_dev_init(hba); in ufshcd_probe_hba()
8139 ret = ufshcd_complete_dev_init(hba); in ufshcd_probe_hba()
8148 ret = ufshcd_device_params_init(hba); in ufshcd_probe_hba()
8153 ufshcd_tune_unipro_params(hba); in ufshcd_probe_hba()
8156 ufshcd_set_ufs_dev_active(hba); in ufshcd_probe_hba()
8157 ufshcd_force_reset_auto_bkops(hba); in ufshcd_probe_hba()
8160 if (hba->max_pwr_info.is_valid) { in ufshcd_probe_hba()
8165 if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL) in ufshcd_probe_hba()
8166 ufshcd_set_dev_ref_clk(hba); in ufshcd_probe_hba()
8167 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); in ufshcd_probe_hba()
8169 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", in ufshcd_probe_hba()
8173 ufshcd_print_pwr_info(hba); in ufshcd_probe_hba()
8182 ufshcd_set_active_icc_lvl(hba); in ufshcd_probe_hba()
8184 ufshcd_wb_config(hba); in ufshcd_probe_hba()
8185 if (hba->ee_usr_mask) in ufshcd_probe_hba()
8186 ufshcd_write_ee_control(hba); in ufshcd_probe_hba()
8188 ufshcd_auto_hibern8_enable(hba); in ufshcd_probe_hba()
8190 ufshpb_reset(hba); in ufshcd_probe_hba()
8192 trace_android_rvh_ufs_complete_init(hba); in ufshcd_probe_hba()
8194 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_probe_hba()
8196 hba->ufshcd_state = UFSHCD_STATE_ERROR; in ufshcd_probe_hba()
8197 else if (hba->ufshcd_state == UFSHCD_STATE_RESET) in ufshcd_probe_hba()
8198 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; in ufshcd_probe_hba()
8199 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_probe_hba()
8201 trace_ufshcd_init(dev_name(hba->dev), ret, in ufshcd_probe_hba()
8203 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_probe_hba()
8214 struct ufs_hba *hba = (struct ufs_hba *)data; in ufshcd_async_scan() local
8217 down(&hba->host_sem); in ufshcd_async_scan()
8219 ret = ufshcd_probe_hba(hba, true); in ufshcd_async_scan()
8220 up(&hba->host_sem); in ufshcd_async_scan()
8225 ret = ufshcd_add_lus(hba); in ufshcd_async_scan()
8228 pm_runtime_put_sync(hba->dev); in ufshcd_async_scan()
8231 dev_err(hba->dev, "%s failed: %d\n", __func__, ret); in ufshcd_async_scan()
8304 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, in ufshcd_config_vreg_lpm() argument
8307 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); in ufshcd_config_vreg_lpm()
8310 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, in ufshcd_config_vreg_hpm() argument
8316 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); in ufshcd_config_vreg_hpm()
8392 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) in ufshcd_setup_vreg() argument
8395 struct device *dev = hba->dev; in ufshcd_setup_vreg()
8396 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_setup_vreg()
8417 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) in ufshcd_setup_hba_vreg() argument
8419 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_setup_hba_vreg()
8421 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); in ufshcd_setup_hba_vreg()
8442 static int ufshcd_init_vreg(struct ufs_hba *hba) in ufshcd_init_vreg() argument
8445 struct device *dev = hba->dev; in ufshcd_init_vreg()
8446 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_init_vreg()
8459 static int ufshcd_init_hba_vreg(struct ufs_hba *hba) in ufshcd_init_hba_vreg() argument
8461 struct ufs_vreg_info *info = &hba->vreg_info; in ufshcd_init_hba_vreg()
8464 return ufshcd_get_vreg(hba->dev, info->vdd_hba); in ufshcd_init_hba_vreg()
8469 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) in ufshcd_setup_clocks() argument
8473 struct list_head *head = &hba->clk_list_head; in ufshcd_setup_clocks()
8481 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); in ufshcd_setup_clocks()
8491 if (ufshcd_is_link_active(hba) && in ufshcd_setup_clocks()
8499 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", in ufshcd_setup_clocks()
8507 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, in ufshcd_setup_clocks()
8512 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE); in ufshcd_setup_clocks()
8523 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_setup_clocks()
8524 hba->clk_gating.state = CLKS_ON; in ufshcd_setup_clocks()
8525 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_setup_clocks()
8526 hba->clk_gating.state); in ufshcd_setup_clocks()
8527 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_setup_clocks()
8531 trace_ufshcd_profile_clk_gating(dev_name(hba->dev), in ufshcd_setup_clocks()
8537 static int ufshcd_init_clocks(struct ufs_hba *hba) in ufshcd_init_clocks() argument
8541 struct device *dev = hba->dev; in ufshcd_init_clocks()
8542 struct list_head *head = &hba->clk_list_head; in ufshcd_init_clocks()
8565 ufshcd_parse_dev_ref_clk_freq(hba, clki->clk); in ufshcd_init_clocks()
8570 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", in ufshcd_init_clocks()
8584 static int ufshcd_variant_hba_init(struct ufs_hba *hba) in ufshcd_variant_hba_init() argument
8588 if (!hba->vops) in ufshcd_variant_hba_init()
8591 err = ufshcd_vops_init(hba); in ufshcd_variant_hba_init()
8593 dev_err(hba->dev, "%s: variant %s init failed err %d\n", in ufshcd_variant_hba_init()
8594 __func__, ufshcd_get_var_name(hba), err); in ufshcd_variant_hba_init()
8599 static void ufshcd_variant_hba_exit(struct ufs_hba *hba) in ufshcd_variant_hba_exit() argument
8601 if (!hba->vops) in ufshcd_variant_hba_exit()
8604 ufshcd_vops_exit(hba); in ufshcd_variant_hba_exit()
8607 static int ufshcd_hba_init(struct ufs_hba *hba) in ufshcd_hba_init() argument
8618 err = ufshcd_init_hba_vreg(hba); in ufshcd_hba_init()
8622 err = ufshcd_setup_hba_vreg(hba, true); in ufshcd_hba_init()
8626 err = ufshcd_init_clocks(hba); in ufshcd_hba_init()
8630 err = ufshcd_setup_clocks(hba, true); in ufshcd_hba_init()
8634 err = ufshcd_init_vreg(hba); in ufshcd_hba_init()
8638 err = ufshcd_setup_vreg(hba, true); in ufshcd_hba_init()
8642 err = ufshcd_variant_hba_init(hba); in ufshcd_hba_init()
8646 ufs_debugfs_hba_init(hba); in ufshcd_hba_init()
8648 hba->is_powered = true; in ufshcd_hba_init()
8652 ufshcd_setup_vreg(hba, false); in ufshcd_hba_init()
8654 ufshcd_setup_clocks(hba, false); in ufshcd_hba_init()
8656 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_init()
8661 static void ufshcd_hba_exit(struct ufs_hba *hba) in ufshcd_hba_exit() argument
8663 if (hba->is_powered) { in ufshcd_hba_exit()
8664 ufshcd_exit_clk_scaling(hba); in ufshcd_hba_exit()
8665 ufshcd_exit_clk_gating(hba); in ufshcd_hba_exit()
8666 if (hba->eh_wq) in ufshcd_hba_exit()
8667 destroy_workqueue(hba->eh_wq); in ufshcd_hba_exit()
8668 ufs_debugfs_hba_exit(hba); in ufshcd_hba_exit()
8669 ufshcd_variant_hba_exit(hba); in ufshcd_hba_exit()
8670 ufshcd_setup_vreg(hba, false); in ufshcd_hba_exit()
8671 ufshcd_setup_clocks(hba, false); in ufshcd_hba_exit()
8672 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_exit()
8673 hba->is_powered = false; in ufshcd_hba_exit()
8674 ufs_put_device_desc(hba); in ufshcd_hba_exit()
8687 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, in ufshcd_set_dev_pwr_mode() argument
8696 spin_lock_irqsave(hba->host->host_lock, flags); in ufshcd_set_dev_pwr_mode()
8697 sdp = hba->sdev_ufs_device; in ufshcd_set_dev_pwr_mode()
8707 spin_unlock_irqrestore(hba->host->host_lock, flags); in ufshcd_set_dev_pwr_mode()
8718 hba->host->eh_noresume = 1; in ufshcd_set_dev_pwr_mode()
8749 hba->curr_dev_pwr_mode = pwr_mode; in ufshcd_set_dev_pwr_mode()
8752 hba->host->eh_noresume = 0; in ufshcd_set_dev_pwr_mode()
8756 static int ufshcd_link_state_transition(struct ufs_hba *hba, in ufshcd_link_state_transition() argument
8762 if (req_link_state == hba->uic_link_state) in ufshcd_link_state_transition()
8766 ret = ufshcd_uic_hibern8_enter(hba); in ufshcd_link_state_transition()
8768 ufshcd_set_link_hibern8(hba); in ufshcd_link_state_transition()
8770 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", in ufshcd_link_state_transition()
8781 (!check_for_bkops || !hba->auto_bkops_enabled)) { in ufshcd_link_state_transition()
8792 ret = ufshcd_uic_hibern8_enter(hba); in ufshcd_link_state_transition()
8794 dev_err(hba->dev, "%s: hibern8 enter failed %d\n", in ufshcd_link_state_transition()
8802 ufshcd_hba_stop(hba); in ufshcd_link_state_transition()
8807 ufshcd_set_link_off(hba); in ufshcd_link_state_transition()
8814 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) in ufshcd_vreg_set_lpm() argument
8824 if (!ufshcd_is_link_active(hba) && in ufshcd_vreg_set_lpm()
8825 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM) in ufshcd_vreg_set_lpm()
8843 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && in ufshcd_vreg_set_lpm()
8844 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_vreg_set_lpm()
8845 ufshcd_setup_vreg(hba, false); in ufshcd_vreg_set_lpm()
8847 } else if (!ufshcd_is_ufs_dev_active(hba)) { in ufshcd_vreg_set_lpm()
8848 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); in ufshcd_vreg_set_lpm()
8850 if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) { in ufshcd_vreg_set_lpm()
8851 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_lpm()
8852 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); in ufshcd_vreg_set_lpm()
8859 if (vcc_off && hba->vreg_info.vcc && in ufshcd_vreg_set_lpm()
8860 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM) in ufshcd_vreg_set_lpm()
8865 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) in ufshcd_vreg_set_hpm() argument
8869 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && in ufshcd_vreg_set_hpm()
8870 !hba->dev_info.is_lu_power_on_wp) { in ufshcd_vreg_set_hpm()
8871 ret = ufshcd_setup_vreg(hba, true); in ufshcd_vreg_set_hpm()
8872 } else if (!ufshcd_is_ufs_dev_active(hba)) { in ufshcd_vreg_set_hpm()
8873 if (!ufshcd_is_link_active(hba)) { in ufshcd_vreg_set_hpm()
8874 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_hpm()
8877 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); in ufshcd_vreg_set_hpm()
8881 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); in ufshcd_vreg_set_hpm()
8886 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); in ufshcd_vreg_set_hpm()
8888 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); in ufshcd_vreg_set_hpm()
8894 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) in ufshcd_hba_vreg_set_lpm() argument
8896 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) in ufshcd_hba_vreg_set_lpm()
8897 ufshcd_setup_hba_vreg(hba, false); in ufshcd_hba_vreg_set_lpm()
8900 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) in ufshcd_hba_vreg_set_hpm() argument
8902 if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba)) in ufshcd_hba_vreg_set_hpm()
8903 ufshcd_setup_hba_vreg(hba, true); in ufshcd_hba_vreg_set_hpm()
8906 static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) in __ufshcd_wl_suspend() argument
8914 hba->pm_op_in_progress = true; in __ufshcd_wl_suspend()
8917 hba->rpm_lvl : hba->spm_lvl; in __ufshcd_wl_suspend()
8925 ufshpb_suspend(hba); in __ufshcd_wl_suspend()
8931 ufshcd_hold(hba, false); in __ufshcd_wl_suspend()
8932 hba->clk_gating.is_suspended = true; in __ufshcd_wl_suspend()
8934 if (ufshcd_is_clkscaling_supported(hba)) in __ufshcd_wl_suspend()
8935 ufshcd_clk_scaling_suspend(hba, true); in __ufshcd_wl_suspend()
8942 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && in __ufshcd_wl_suspend()
8943 (req_link_state == hba->uic_link_state)) in __ufshcd_wl_suspend()
8947 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { in __ufshcd_wl_suspend()
8953 if (ufshcd_can_autobkops_during_suspend(hba)) { in __ufshcd_wl_suspend()
8959 ret = ufshcd_urgent_bkops(hba); in __ufshcd_wl_suspend()
8966 ufshcd_force_error_recovery(hba); in __ufshcd_wl_suspend()
8972 ufshcd_disable_auto_bkops(hba); in __ufshcd_wl_suspend()
8979 hba->dev_info.b_rpm_dev_flush_capable = in __ufshcd_wl_suspend()
8980 hba->auto_bkops_enabled || in __ufshcd_wl_suspend()
8983 ufshcd_is_auto_hibern8_enabled(hba))) && in __ufshcd_wl_suspend()
8984 ufshcd_wb_need_flush(hba)); in __ufshcd_wl_suspend()
8987 flush_work(&hba->eeh_work); in __ufshcd_wl_suspend()
8989 ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); in __ufshcd_wl_suspend()
8993 if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) { in __ufshcd_wl_suspend()
8996 ufshcd_disable_auto_bkops(hba); in __ufshcd_wl_suspend()
8998 if (!hba->dev_info.b_rpm_dev_flush_capable) { in __ufshcd_wl_suspend()
8999 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); in __ufshcd_wl_suspend()
9006 ufshcd_force_error_recovery(hba); in __ufshcd_wl_suspend()
9018 check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba); in __ufshcd_wl_suspend()
9019 ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops); in __ufshcd_wl_suspend()
9026 ufshcd_force_error_recovery(hba); in __ufshcd_wl_suspend()
9038 ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); in __ufshcd_wl_suspend()
9049 if (ufshcd_is_ufs_dev_deepsleep(hba)) { in __ufshcd_wl_suspend()
9050 ufshcd_device_reset(hba); in __ufshcd_wl_suspend()
9051 WARN_ON(!ufshcd_is_link_off(hba)); in __ufshcd_wl_suspend()
9053 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) in __ufshcd_wl_suspend()
9054 ufshcd_set_link_active(hba); in __ufshcd_wl_suspend()
9055 else if (ufshcd_is_link_off(hba)) in __ufshcd_wl_suspend()
9056 ufshcd_host_reset_and_restore(hba); in __ufshcd_wl_suspend()
9059 if (ufshcd_is_ufs_dev_deepsleep(hba)) { in __ufshcd_wl_suspend()
9060 ufshcd_device_reset(hba); in __ufshcd_wl_suspend()
9061 ufshcd_host_reset_and_restore(hba); in __ufshcd_wl_suspend()
9063 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) in __ufshcd_wl_suspend()
9064 ufshcd_disable_auto_bkops(hba); in __ufshcd_wl_suspend()
9066 if (ufshcd_is_clkscaling_supported(hba)) in __ufshcd_wl_suspend()
9067 ufshcd_clk_scaling_suspend(hba, false); in __ufshcd_wl_suspend()
9069 hba->dev_info.b_rpm_dev_flush_capable = false; in __ufshcd_wl_suspend()
9071 if (hba->dev_info.b_rpm_dev_flush_capable) { in __ufshcd_wl_suspend()
9072 schedule_delayed_work(&hba->rpm_dev_flush_recheck_work, in __ufshcd_wl_suspend()
9077 ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret); in __ufshcd_wl_suspend()
9078 hba->clk_gating.is_suspended = false; in __ufshcd_wl_suspend()
9079 ufshcd_release(hba); in __ufshcd_wl_suspend()
9080 ufshpb_resume(hba); in __ufshcd_wl_suspend()
9082 hba->pm_op_in_progress = false; in __ufshcd_wl_suspend()
9087 static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) in __ufshcd_wl_resume() argument
9090 enum uic_link_state old_link_state = hba->uic_link_state; in __ufshcd_wl_resume()
9092 hba->pm_op_in_progress = true; in __ufshcd_wl_resume()
9099 ret = ufshcd_vops_resume(hba, pm_op); in __ufshcd_wl_resume()
9104 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba)); in __ufshcd_wl_resume()
9106 if (ufshcd_is_link_hibern8(hba)) { in __ufshcd_wl_resume()
9107 ret = ufshcd_uic_hibern8_exit(hba); in __ufshcd_wl_resume()
9109 ufshcd_set_link_active(hba); in __ufshcd_wl_resume()
9111 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", in __ufshcd_wl_resume()
9115 } else if (ufshcd_is_link_off(hba)) { in __ufshcd_wl_resume()
9122 ret = ufshcd_reset_and_restore(hba); in __ufshcd_wl_resume()
9127 if (ret || !ufshcd_is_link_active(hba)) in __ufshcd_wl_resume()
9131 if (!ufshcd_is_ufs_dev_active(hba)) { in __ufshcd_wl_resume()
9132 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); in __ufshcd_wl_resume()
9137 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) in __ufshcd_wl_resume()
9138 ufshcd_enable_auto_bkops(hba); in __ufshcd_wl_resume()
9144 ufshcd_urgent_bkops(hba); in __ufshcd_wl_resume()
9146 if (hba->ee_usr_mask) in __ufshcd_wl_resume()
9147 ufshcd_write_ee_control(hba); in __ufshcd_wl_resume()
9149 if (ufshcd_is_clkscaling_supported(hba)) in __ufshcd_wl_resume()
9150 ufshcd_clk_scaling_suspend(hba, false); in __ufshcd_wl_resume()
9152 if (hba->dev_info.b_rpm_dev_flush_capable) { in __ufshcd_wl_resume()
9153 hba->dev_info.b_rpm_dev_flush_capable = false; in __ufshcd_wl_resume()
9154 cancel_delayed_work(&hba->rpm_dev_flush_recheck_work); in __ufshcd_wl_resume()
9158 ufshcd_auto_hibern8_enable(hba); in __ufshcd_wl_resume()
9160 ufshpb_resume(hba); in __ufshcd_wl_resume()
9164 ufshcd_link_state_transition(hba, old_link_state, 0); in __ufshcd_wl_resume()
9166 ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE); in __ufshcd_wl_resume()
9167 ufshcd_vops_suspend(hba, pm_op, POST_CHANGE); in __ufshcd_wl_resume()
9170 ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret); in __ufshcd_wl_resume()
9171 hba->clk_gating.is_suspended = false; in __ufshcd_wl_resume()
9172 ufshcd_release(hba); in __ufshcd_wl_resume()
9173 hba->pm_op_in_progress = false; in __ufshcd_wl_resume()
9180 struct ufs_hba *hba; in ufshcd_wl_runtime_suspend() local
9184 hba = shost_priv(sdev->host); in ufshcd_wl_runtime_suspend()
9186 ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM); in ufshcd_wl_runtime_suspend()
9192 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_wl_runtime_suspend()
9200 struct ufs_hba *hba; in ufshcd_wl_runtime_resume() local
9204 hba = shost_priv(sdev->host); in ufshcd_wl_runtime_resume()
9206 ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM); in ufshcd_wl_runtime_resume()
9212 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_wl_runtime_resume()
9222 struct ufs_hba *hba; in ufshcd_wl_suspend() local
9226 hba = shost_priv(sdev->host); in ufshcd_wl_suspend()
9227 down(&hba->host_sem); in ufshcd_wl_suspend()
9232 ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM); in ufshcd_wl_suspend()
9235 up(&hba->host_sem); in ufshcd_wl_suspend()
9240 hba->is_sys_suspended = true; in ufshcd_wl_suspend()
9243 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_wl_suspend()
9251 struct ufs_hba *hba; in ufshcd_wl_resume() local
9255 hba = shost_priv(sdev->host); in ufshcd_wl_resume()
9260 ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM); in ufshcd_wl_resume()
9266 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_wl_resume()
9268 hba->is_sys_suspended = false; in ufshcd_wl_resume()
9269 up(&hba->host_sem); in ufshcd_wl_resume()
9277 struct ufs_hba *hba; in ufshcd_wl_shutdown() local
9279 hba = shost_priv(sdev->host); in ufshcd_wl_shutdown()
9281 down(&hba->host_sem); in ufshcd_wl_shutdown()
9282 hba->shutting_down = true; in ufshcd_wl_shutdown()
9283 up(&hba->host_sem); in ufshcd_wl_shutdown()
9286 ufshcd_rpm_get_sync(hba); in ufshcd_wl_shutdown()
9288 shost_for_each_device(sdev, hba->host) { in ufshcd_wl_shutdown()
9289 if (sdev == hba->sdev_ufs_device) in ufshcd_wl_shutdown()
9293 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); in ufshcd_wl_shutdown()
9303 static int ufshcd_suspend(struct ufs_hba *hba) in ufshcd_suspend() argument
9307 if (!hba->is_powered) in ufshcd_suspend()
9313 ufshcd_disable_irq(hba); in ufshcd_suspend()
9314 ret = ufshcd_setup_clocks(hba, false); in ufshcd_suspend()
9316 ufshcd_enable_irq(hba); in ufshcd_suspend()
9319 if (ufshcd_is_clkgating_allowed(hba)) { in ufshcd_suspend()
9320 hba->clk_gating.state = CLKS_OFF; in ufshcd_suspend()
9321 trace_ufshcd_clk_gating(dev_name(hba->dev), in ufshcd_suspend()
9322 hba->clk_gating.state); in ufshcd_suspend()
9325 ufshcd_vreg_set_lpm(hba); in ufshcd_suspend()
9327 ufshcd_hba_vreg_set_lpm(hba); in ufshcd_suspend()
9341 static int ufshcd_resume(struct ufs_hba *hba) in ufshcd_resume() argument
9345 if (!hba->is_powered) in ufshcd_resume()
9348 ufshcd_hba_vreg_set_hpm(hba); in ufshcd_resume()
9349 ret = ufshcd_vreg_set_hpm(hba); in ufshcd_resume()
9354 ret = ufshcd_setup_clocks(hba, true); in ufshcd_resume()
9359 ufshcd_enable_irq(hba); in ufshcd_resume()
9363 ufshcd_vreg_set_lpm(hba); in ufshcd_resume()
9366 ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret); in ufshcd_resume()
9383 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_system_suspend() local
9387 if (pm_runtime_suspended(hba->dev)) in ufshcd_system_suspend()
9390 ret = ufshcd_suspend(hba); in ufshcd_system_suspend()
9392 trace_ufshcd_system_suspend(dev_name(hba->dev), ret, in ufshcd_system_suspend()
9394 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_system_suspend()
9410 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_system_resume() local
9414 if (pm_runtime_suspended(hba->dev)) in ufshcd_system_resume()
9417 ret = ufshcd_resume(hba); in ufshcd_system_resume()
9420 trace_ufshcd_system_resume(dev_name(hba->dev), ret, in ufshcd_system_resume()
9422 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_system_resume()
9440 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_runtime_suspend() local
9444 ret = ufshcd_suspend(hba); in ufshcd_runtime_suspend()
9446 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret, in ufshcd_runtime_suspend()
9448 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_runtime_suspend()
9465 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_runtime_resume() local
9469 ret = ufshcd_resume(hba); in ufshcd_runtime_resume()
9471 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret, in ufshcd_runtime_resume()
9473 hba->curr_dev_pwr_mode, hba->uic_link_state); in ufshcd_runtime_resume()
9488 int ufshcd_shutdown(struct ufs_hba *hba) in ufshcd_shutdown() argument
9490 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) in ufshcd_shutdown()
9491 ufshcd_suspend(hba); in ufshcd_shutdown()
9493 hba->is_powered = false; in ufshcd_shutdown()
9504 void ufshcd_remove(struct ufs_hba *hba) in ufshcd_remove() argument
9506 if (hba->sdev_ufs_device) in ufshcd_remove()
9507 ufshcd_rpm_get_sync(hba); in ufshcd_remove()
9508 ufs_hwmon_remove(hba); in ufshcd_remove()
9509 ufs_bsg_remove(hba); in ufshcd_remove()
9510 ufshpb_remove(hba); in ufshcd_remove()
9511 ufs_sysfs_remove_nodes(hba->dev); in ufshcd_remove()
9512 blk_cleanup_queue(hba->tmf_queue); in ufshcd_remove()
9513 blk_mq_free_tag_set(&hba->tmf_tag_set); in ufshcd_remove()
9514 blk_cleanup_queue(hba->cmd_queue); in ufshcd_remove()
9515 scsi_remove_host(hba->host); in ufshcd_remove()
9517 ufshcd_disable_intr(hba, hba->intr_mask); in ufshcd_remove()
9518 ufshcd_hba_stop(hba); in ufshcd_remove()
9519 ufshcd_hba_exit(hba); in ufshcd_remove()
9535 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_system_restore() local
9543 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), in ufshcd_system_restore()
9545 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), in ufshcd_system_restore()
9547 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), in ufshcd_system_restore()
9549 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), in ufshcd_system_restore()
9559 ufshcd_set_link_off(hba); in ufshcd_system_restore()
9577 void ufshcd_dealloc_host(struct ufs_hba *hba) in ufshcd_dealloc_host() argument
9579 scsi_host_put(hba->host); in ufshcd_dealloc_host()
9590 static int ufshcd_set_dma_mask(struct ufs_hba *hba) in ufshcd_set_dma_mask() argument
9592 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { in ufshcd_set_dma_mask()
9593 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) in ufshcd_set_dma_mask()
9596 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); in ufshcd_set_dma_mask()
9608 struct ufs_hba *hba; in ufshcd_alloc_host() local
9625 hba = shost_priv(host); in ufshcd_alloc_host()
9626 hba->host = host; in ufshcd_alloc_host()
9627 hba->dev = dev; in ufshcd_alloc_host()
9628 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; in ufshcd_alloc_host()
9629 hba->nop_out_timeout = NOP_OUT_TIMEOUT; in ufshcd_alloc_host()
9630 hba->sg_entry_size = sizeof(struct ufshcd_sg_entry); in ufshcd_alloc_host()
9631 INIT_LIST_HEAD(&hba->clk_list_head); in ufshcd_alloc_host()
9632 spin_lock_init(&hba->outstanding_lock); in ufshcd_alloc_host()
9634 *hba_handle = hba; in ufshcd_alloc_host()
9660 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) in ufshcd_init() argument
9663 struct Scsi_Host *host = hba->host; in ufshcd_init()
9664 struct device *dev = hba->dev; in ufshcd_init()
9672 dev_set_drvdata(dev, hba); in ufshcd_init()
9675 dev_err(hba->dev, in ufshcd_init()
9681 hba->mmio_base = mmio_base; in ufshcd_init()
9682 hba->irq = irq; in ufshcd_init()
9683 hba->vps = &ufs_hba_vps; in ufshcd_init()
9685 err = ufshcd_hba_init(hba); in ufshcd_init()
9690 err = ufshcd_hba_capabilities(hba); in ufshcd_init()
9695 hba->ufs_version = ufshcd_get_ufs_version(hba); in ufshcd_init()
9698 hba->intr_mask = ufshcd_get_intr_mask(hba); in ufshcd_init()
9700 err = ufshcd_set_dma_mask(hba); in ufshcd_init()
9702 dev_err(hba->dev, "set dma mask failed\n"); in ufshcd_init()
9707 err = ufshcd_memory_alloc(hba); in ufshcd_init()
9709 dev_err(hba->dev, "Memory allocation failed\n"); in ufshcd_init()
9714 ufshcd_host_memory_configure(hba); in ufshcd_init()
9716 host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; in ufshcd_init()
9717 host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED; in ufshcd_init()
9724 hba->max_pwr_info.is_valid = false; in ufshcd_init()
9726 if (ufshcd_use_mcq_hooks(hba)) { in ufshcd_init()
9727 trace_android_vh_ufs_mcq_config(hba, &err); in ufshcd_init()
9734 hba->host->host_no); in ufshcd_init()
9735 hba->eh_wq = create_singlethread_workqueue(eh_wq_name); in ufshcd_init()
9736 if (!hba->eh_wq) { in ufshcd_init()
9737 dev_err(hba->dev, "%s: failed to create eh workqueue\n", in ufshcd_init()
9742 INIT_WORK(&hba->eh_work, ufshcd_err_handler); in ufshcd_init()
9743 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); in ufshcd_init()
9745 sema_init(&hba->host_sem, 1); in ufshcd_init()
9748 mutex_init(&hba->uic_cmd_mutex); in ufshcd_init()
9751 mutex_init(&hba->dev_cmd.lock); in ufshcd_init()
9754 mutex_init(&hba->ee_ctrl_mutex); in ufshcd_init()
9756 init_rwsem(&hba->clk_scaling_lock); in ufshcd_init()
9758 ufshcd_init_clk_gating(hba); in ufshcd_init()
9760 ufshcd_init_clk_scaling(hba); in ufshcd_init()
9767 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), in ufshcd_init()
9769 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); in ufshcd_init()
9777 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); in ufshcd_init()
9779 dev_err(hba->dev, "request irq failed\n"); in ufshcd_init()
9782 hba->is_irq_enabled = true; in ufshcd_init()
9785 err = scsi_add_host(host, hba->dev); in ufshcd_init()
9787 dev_err(hba->dev, "scsi_add_host failed\n"); in ufshcd_init()
9791 hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set); in ufshcd_init()
9792 if (IS_ERR(hba->cmd_queue)) { in ufshcd_init()
9793 err = PTR_ERR(hba->cmd_queue); in ufshcd_init()
9797 hba->tmf_tag_set = (struct blk_mq_tag_set) { in ufshcd_init()
9799 .queue_depth = hba->nutmrs, in ufshcd_init()
9803 err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); in ufshcd_init()
9806 hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); in ufshcd_init()
9807 if (IS_ERR(hba->tmf_queue)) { in ufshcd_init()
9808 err = PTR_ERR(hba->tmf_queue); in ufshcd_init()
9811 hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs, in ufshcd_init()
9812 sizeof(*hba->tmf_rqs), GFP_KERNEL); in ufshcd_init()
9813 if (!hba->tmf_rqs) { in ufshcd_init()
9819 ufshcd_device_reset(hba); in ufshcd_init()
9821 ufshcd_init_crypto(hba); in ufshcd_init()
9824 err = ufshcd_hba_enable(hba); in ufshcd_init()
9826 dev_err(hba->dev, "Host controller enable failed\n"); in ufshcd_init()
9827 ufshcd_print_evt_hist(hba); in ufshcd_init()
9828 ufshcd_print_host_state(hba); in ufshcd_init()
9837 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( in ufshcd_init()
9840 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( in ufshcd_init()
9844 INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, in ufshcd_init()
9848 if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) { in ufshcd_init()
9849 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) | in ufshcd_init()
9855 atomic_set(&hba->scsi_block_reqs_cnt, 0); in ufshcd_init()
9862 ufshcd_set_ufs_dev_active(hba); in ufshcd_init()
9864 async_schedule(ufshcd_async_scan, hba); in ufshcd_init()
9865 ufs_sysfs_add_nodes(hba); in ufshcd_init()
9871 blk_cleanup_queue(hba->tmf_queue); in ufshcd_init()
9873 blk_mq_free_tag_set(&hba->tmf_tag_set); in ufshcd_init()
9875 blk_cleanup_queue(hba->cmd_queue); in ufshcd_init()
9877 scsi_remove_host(hba->host); in ufshcd_init()
9879 hba->is_irq_enabled = false; in ufshcd_init()
9880 ufshcd_hba_exit(hba); in ufshcd_init()
9888 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_resume_complete() local
9890 if (hba->complete_put) { in ufshcd_resume_complete()
9891 ufshcd_rpm_put(hba); in ufshcd_resume_complete()
9892 hba->complete_put = false; in ufshcd_resume_complete()
9899 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_suspend_prepare() local
9908 if (hba->sdev_ufs_device) { in ufshcd_suspend_prepare()
9909 ret = ufshcd_rpm_get_sync(hba); in ufshcd_suspend_prepare()
9911 ufshcd_rpm_put(hba); in ufshcd_suspend_prepare()
9914 hba->complete_put = true; in ufshcd_suspend_prepare()
9924 struct ufs_hba *hba = shost_priv(sdev->host); in ufshcd_wl_poweroff() local
9926 __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); in ufshcd_wl_poweroff()