Lines Matching refs:hba
50 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
111 struct device *dev = host->hba->dev; in ufs_qcom_enable_lane_clks()
153 struct device *dev = host->hba->dev; in ufs_qcom_init_lane_clks()
169 if (host->hba->lanes_per_direction > 1) { in ufs_qcom_init_lane_clks()
182 static int ufs_qcom_check_hibern8(struct ufs_hba *hba) in ufs_qcom_check_hibern8() argument
189 err = ufshcd_dme_get(hba, in ufs_qcom_check_hibern8()
205 err = ufshcd_dme_get(hba, in ufs_qcom_check_hibern8()
211 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", in ufs_qcom_check_hibern8()
215 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n", in ufs_qcom_check_hibern8()
224 ufshcd_rmwl(host->hba, QUNIPRO_SEL, in ufs_qcom_select_unipro_mode()
229 ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0); in ufs_qcom_select_unipro_mode()
238 static int ufs_qcom_host_reset(struct ufs_hba *hba) in ufs_qcom_host_reset() argument
241 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_host_reset()
245 dev_warn(hba->dev, "%s: reset control not set\n", __func__); in ufs_qcom_host_reset()
249 reenable_intr = hba->is_irq_enabled; in ufs_qcom_host_reset()
250 disable_irq(hba->irq); in ufs_qcom_host_reset()
251 hba->is_irq_enabled = false; in ufs_qcom_host_reset()
255 dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n", in ufs_qcom_host_reset()
269 dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n", in ufs_qcom_host_reset()
275 enable_irq(hba->irq); in ufs_qcom_host_reset()
276 hba->is_irq_enabled = true; in ufs_qcom_host_reset()
282 static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba) in ufs_qcom_get_hs_gear() argument
284 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_get_hs_gear()
296 return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0)); in ufs_qcom_get_hs_gear()
303 static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) in ufs_qcom_power_up_sequence() argument
305 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_power_up_sequence()
310 ret = ufs_qcom_host_reset(hba); in ufs_qcom_power_up_sequence()
312 dev_warn(hba->dev, "%s: host reset returned %d\n", in ufs_qcom_power_up_sequence()
318 dev_err(hba->dev, "%s: phy init failed, ret = %d\n", in ufs_qcom_power_up_sequence()
328 dev_err(hba->dev, "%s: phy power on failed, ret = %d\n", in ufs_qcom_power_up_sequence()
351 static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) in ufs_qcom_enable_hw_clk_gating() argument
353 ufshcd_writel(hba, in ufs_qcom_enable_hw_clk_gating()
354 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL, in ufs_qcom_enable_hw_clk_gating()
361 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, in ufs_qcom_hce_enable_notify() argument
364 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_hce_enable_notify()
369 ufs_qcom_power_up_sequence(hba); in ufs_qcom_hce_enable_notify()
379 err = ufs_qcom_check_hibern8(hba); in ufs_qcom_hce_enable_notify()
380 ufs_qcom_enable_hw_clk_gating(hba); in ufs_qcom_hce_enable_notify()
384 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); in ufs_qcom_hce_enable_notify()
394 static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, in ufs_qcom_cfg_timers() argument
397 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_cfg_timers()
430 if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba)) in ufs_qcom_cfg_timers()
434 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); in ufs_qcom_cfg_timers()
438 list_for_each_entry(clki, &hba->clk_list_head, list) { in ufs_qcom_cfg_timers()
448 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { in ufs_qcom_cfg_timers()
449 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); in ufs_qcom_cfg_timers()
469 dev_err(hba->dev, in ufs_qcom_cfg_timers()
478 dev_err(hba->dev, in ufs_qcom_cfg_timers()
486 dev_err(hba->dev, "%s: invalid rate = %d\n", in ufs_qcom_cfg_timers()
494 dev_err(hba->dev, in ufs_qcom_cfg_timers()
504 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs); in ufs_qcom_cfg_timers()
508 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) != in ufs_qcom_cfg_timers()
511 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us, in ufs_qcom_cfg_timers()
521 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100), in ufs_qcom_cfg_timers()
533 static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, in ufs_qcom_link_startup_notify() argument
537 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_link_startup_notify()
541 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, in ufs_qcom_link_startup_notify()
543 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", in ufs_qcom_link_startup_notify()
553 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, in ufs_qcom_link_startup_notify()
563 if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41) in ufs_qcom_link_startup_notify()
564 err = ufshcd_disable_host_tx_lcc(hba); in ufs_qcom_link_startup_notify()
574 static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted) in ufs_qcom_device_reset_ctrl() argument
576 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_device_reset_ctrl()
585 static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, in ufs_qcom_suspend() argument
588 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_suspend()
594 if (ufs_qcom_is_link_off(hba)) { in ufs_qcom_suspend()
604 ufs_qcom_device_reset_ctrl(hba, true); in ufs_qcom_suspend()
606 } else if (!ufs_qcom_is_link_active(hba)) { in ufs_qcom_suspend()
613 static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) in ufs_qcom_resume() argument
615 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_resume()
619 if (ufs_qcom_is_link_off(hba)) { in ufs_qcom_resume()
622 dev_err(hba->dev, "%s: failed PHY power on: %d\n", in ufs_qcom_resume()
631 } else if (!ufs_qcom_is_link_active(hba)) { in ufs_qcom_resume()
660 gating_wait = host->hba->dev_info.clk_gating_wait_us; in ufs_qcom_dev_ref_clk_ctrl()
696 static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, in ufs_qcom_pwr_change_notify() argument
701 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_pwr_change_notify()
716 ufs_qcom_cap.hs_tx_gear = ufs_qcom_cap.hs_rx_gear = ufs_qcom_get_hs_gear(hba); in ufs_qcom_pwr_change_notify()
722 dev_err(hba->dev, "%s: failed to determine capabilities\n", in ufs_qcom_pwr_change_notify()
731 if (!ufshcd_is_hs_mode(&hba->pwr_info) && in ufs_qcom_pwr_change_notify()
736 ufshcd_dme_configure_adapt(hba, in ufs_qcom_pwr_change_notify()
742 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, in ufs_qcom_pwr_change_notify()
745 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", in ufs_qcom_pwr_change_notify()
760 if (ufshcd_is_hs_mode(&hba->pwr_info) && in ufs_qcom_pwr_change_notify()
772 static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba) in ufs_qcom_quirk_host_pa_saveconfigtime() argument
777 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), in ufs_qcom_quirk_host_pa_saveconfigtime()
783 return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1), in ufs_qcom_quirk_host_pa_saveconfigtime()
787 static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) in ufs_qcom_apply_dev_quirks() argument
791 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) in ufs_qcom_apply_dev_quirks()
792 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); in ufs_qcom_apply_dev_quirks()
794 if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC) in ufs_qcom_apply_dev_quirks()
795 hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE; in ufs_qcom_apply_dev_quirks()
800 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) in ufs_qcom_get_ufs_hci_version() argument
802 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_get_ufs_hci_version()
819 static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) in ufs_qcom_advertise_quirks() argument
821 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_advertise_quirks()
824 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS in ufs_qcom_advertise_quirks()
829 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR; in ufs_qcom_advertise_quirks()
831 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC; in ufs_qcom_advertise_quirks()
835 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; in ufs_qcom_advertise_quirks()
839 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS in ufs_qcom_advertise_quirks()
845 hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; in ufs_qcom_advertise_quirks()
848 static void ufs_qcom_set_caps(struct ufs_hba *hba) in ufs_qcom_set_caps() argument
850 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_set_caps()
852 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; in ufs_qcom_set_caps()
853 hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING; in ufs_qcom_set_caps()
854 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; in ufs_qcom_set_caps()
855 hba->caps |= UFSHCD_CAP_WB_EN; in ufs_qcom_set_caps()
856 hba->caps |= UFSHCD_CAP_CRYPTO; in ufs_qcom_set_caps()
857 hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE; in ufs_qcom_set_caps()
858 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; in ufs_qcom_set_caps()
874 static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, in ufs_qcom_setup_clocks() argument
877 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_setup_clocks()
890 if (!ufs_qcom_is_link_active(hba)) { in ufs_qcom_setup_clocks()
899 if (ufshcd_is_hs_mode(&hba->pwr_info)) in ufs_qcom_setup_clocks()
913 ufs_qcom_assert_reset(host->hba); in ufs_qcom_reset_assert()
924 ufs_qcom_deassert_reset(host->hba); in ufs_qcom_reset_deassert()
949 static int ufs_qcom_init(struct ufs_hba *hba) in ufs_qcom_init() argument
952 struct device *dev = hba->dev; in ufs_qcom_init()
965 host->hba = hba; in ufs_qcom_init()
966 ufshcd_set_variant(hba, host); in ufs_qcom_init()
969 host->core_reset = devm_reset_control_get_optional(hba->dev, "rst"); in ufs_qcom_init()
1002 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, in ufs_qcom_init()
1010 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1; in ufs_qcom_init()
1025 list_for_each_entry(clki, &hba->clk_list_head, list) { in ufs_qcom_init()
1034 ufs_qcom_set_caps(hba); in ufs_qcom_init()
1035 ufs_qcom_advertise_quirks(hba); in ufs_qcom_init()
1041 ufs_qcom_setup_clocks(hba, true, POST_CHANGE); in ufs_qcom_init()
1043 if (hba->dev->id < MAX_UFS_QCOM_HOSTS) in ufs_qcom_init()
1044 ufs_qcom_hosts[hba->dev->id] = host; in ufs_qcom_init()
1062 ufshcd_set_variant(hba, NULL); in ufs_qcom_init()
1067 static void ufs_qcom_exit(struct ufs_hba *hba) in ufs_qcom_exit() argument
1069 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_exit()
1076 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, in ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div() argument
1085 err = ufshcd_dme_get(hba, in ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div()
1097 return ufshcd_dme_set(hba, in ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div()
1102 static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba) in ufs_qcom_clk_scale_up_pre_change() argument
1108 static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) in ufs_qcom_clk_scale_up_post_change() argument
1110 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_clk_scale_up_post_change()
1116 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150); in ufs_qcom_clk_scale_up_post_change()
1119 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) in ufs_qcom_clk_scale_down_pre_change() argument
1121 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_clk_scale_down_pre_change()
1128 err = ufshcd_dme_get(hba, in ufs_qcom_clk_scale_down_pre_change()
1136 err = ufshcd_dme_set(hba, in ufs_qcom_clk_scale_down_pre_change()
1144 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba) in ufs_qcom_clk_scale_down_post_change() argument
1146 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_clk_scale_down_post_change()
1152 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75); in ufs_qcom_clk_scale_down_post_change()
1155 static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, in ufs_qcom_clk_scale_notify() argument
1158 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_clk_scale_notify()
1163 err = ufshcd_uic_hibern8_enter(hba); in ufs_qcom_clk_scale_notify()
1167 err = ufs_qcom_clk_scale_up_pre_change(hba); in ufs_qcom_clk_scale_notify()
1169 err = ufs_qcom_clk_scale_down_pre_change(hba); in ufs_qcom_clk_scale_notify()
1171 ufshcd_uic_hibern8_exit(hba); in ufs_qcom_clk_scale_notify()
1175 err = ufs_qcom_clk_scale_up_post_change(hba); in ufs_qcom_clk_scale_notify()
1177 err = ufs_qcom_clk_scale_down_post_change(hba); in ufs_qcom_clk_scale_notify()
1181 ufshcd_uic_hibern8_exit(hba); in ufs_qcom_clk_scale_notify()
1185 ufs_qcom_cfg_timers(hba, in ufs_qcom_clk_scale_notify()
1190 ufshcd_uic_hibern8_exit(hba); in ufs_qcom_clk_scale_notify()
1198 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, in ufs_qcom_enable_test_bus()
1200 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1); in ufs_qcom_enable_test_bus()
1213 dev_err(host->hba->dev, in ufs_qcom_testbus_cfg_is_ok()
1291 ufshcd_rmwl(host->hba, TEST_BUS_SEL, in ufs_qcom_testbus_config()
1294 ufshcd_rmwl(host->hba, mask, in ufs_qcom_testbus_config()
1307 static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) in ufs_qcom_dump_dbg_regs() argument
1312 host = ufshcd_get_variant(hba); in ufs_qcom_dump_dbg_regs()
1314 ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, in ufs_qcom_dump_dbg_regs()
1318 ufshcd_dump_regs(hba, reg, 44 * 4, "UFS_UFS_DBG_RD_REG_OCSC "); in ufs_qcom_dump_dbg_regs()
1320 reg = ufshcd_readl(hba, REG_UFS_CFG1); in ufs_qcom_dump_dbg_regs()
1322 ufshcd_writel(hba, reg, REG_UFS_CFG1); in ufs_qcom_dump_dbg_regs()
1325 ufshcd_dump_regs(hba, reg, 32 * 4, "UFS_UFS_DBG_RD_EDTL_RAM "); in ufs_qcom_dump_dbg_regs()
1328 ufshcd_dump_regs(hba, reg, 128 * 4, "UFS_UFS_DBG_RD_DESC_RAM "); in ufs_qcom_dump_dbg_regs()
1331 ufshcd_dump_regs(hba, reg, 64 * 4, "UFS_UFS_DBG_RD_PRDT_RAM "); in ufs_qcom_dump_dbg_regs()
1334 ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1); in ufs_qcom_dump_dbg_regs()
1337 ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UAWM "); in ufs_qcom_dump_dbg_regs()
1340 ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UARM "); in ufs_qcom_dump_dbg_regs()
1343 ufshcd_dump_regs(hba, reg, 48 * 4, "UFS_DBG_RD_REG_TXUC "); in ufs_qcom_dump_dbg_regs()
1346 ufshcd_dump_regs(hba, reg, 27 * 4, "UFS_DBG_RD_REG_RXUC "); in ufs_qcom_dump_dbg_regs()
1349 ufshcd_dump_regs(hba, reg, 19 * 4, "UFS_DBG_RD_REG_DFC "); in ufs_qcom_dump_dbg_regs()
1352 ufshcd_dump_regs(hba, reg, 34 * 4, "UFS_DBG_RD_REG_TRLUT "); in ufs_qcom_dump_dbg_regs()
1355 ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT "); in ufs_qcom_dump_dbg_regs()
1364 static int ufs_qcom_device_reset(struct ufs_hba *hba) in ufs_qcom_device_reset() argument
1366 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_device_reset()
1376 ufs_qcom_device_reset_ctrl(hba, true); in ufs_qcom_device_reset()
1379 ufs_qcom_device_reset_ctrl(hba, false); in ufs_qcom_device_reset()
1386 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, in ufs_qcom_config_scaling_param() argument
1395 static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, in ufs_qcom_config_scaling_param() argument
1402 static void ufs_qcom_reinit_notify(struct ufs_hba *hba) in ufs_qcom_reinit_notify() argument
1404 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_reinit_notify()
1425 static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba) in ufs_qcom_mcq_config_resource() argument
1427 struct platform_device *pdev = to_platform_device(hba->dev); in ufs_qcom_mcq_config_resource()
1432 memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info)); in ufs_qcom_mcq_config_resource()
1435 res = &hba->res[i]; in ufs_qcom_mcq_config_resource()
1440 dev_info(hba->dev, "Resource %s not provided\n", res->name); in ufs_qcom_mcq_config_resource()
1446 res->base = hba->mmio_base; in ufs_qcom_mcq_config_resource()
1450 res->base = devm_ioremap_resource(hba->dev, res->resource); in ufs_qcom_mcq_config_resource()
1452 dev_err(hba->dev, "Failed to map res %s, err=%d\n", in ufs_qcom_mcq_config_resource()
1461 res = &hba->res[RES_MCQ]; in ufs_qcom_mcq_config_resource()
1467 res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL); in ufs_qcom_mcq_config_resource()
1472 MCQ_SQATTR_OFFSET(hba->mcq_capabilities); in ufs_qcom_mcq_config_resource()
1473 res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1; in ufs_qcom_mcq_config_resource()
1479 dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n", in ufs_qcom_mcq_config_resource()
1484 res->base = devm_ioremap_resource(hba->dev, res_mcq); in ufs_qcom_mcq_config_resource()
1486 dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n", in ufs_qcom_mcq_config_resource()
1493 hba->mcq_base = res->base; in ufs_qcom_mcq_config_resource()
1501 static int ufs_qcom_op_runtime_config(struct ufs_hba *hba) in ufs_qcom_op_runtime_config() argument
1507 mem_res = &hba->res[RES_UFS]; in ufs_qcom_op_runtime_config()
1508 sqdao_res = &hba->res[RES_MCQ_SQD]; in ufs_qcom_op_runtime_config()
1514 opr = &hba->mcq_opr[i]; in ufs_qcom_op_runtime_config()
1524 static int ufs_qcom_get_hba_mac(struct ufs_hba *hba) in ufs_qcom_get_hba_mac() argument
1530 static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba, in ufs_qcom_get_outstanding_cqs() argument
1533 struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS]; in ufs_qcom_get_outstanding_cqs()
1546 struct ufs_hba *hba = dev_get_drvdata(dev); in ufs_qcom_write_msi_msg() local
1548 ufshcd_mcq_config_esi(hba, msg); in ufs_qcom_write_msi_msg()
1553 struct ufs_hba *hba = __hba; in ufs_qcom_mcq_esi_handler() local
1554 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_mcq_esi_handler()
1556 struct ufs_hw_queue *hwq = &hba->uhq[id]; in ufs_qcom_mcq_esi_handler()
1558 ufshcd_mcq_write_cqis(hba, 0x1, id); in ufs_qcom_mcq_esi_handler()
1559 ufshcd_mcq_poll_cqe_nolock(hba, hwq); in ufs_qcom_mcq_esi_handler()
1564 static int ufs_qcom_config_esi(struct ufs_hba *hba) in ufs_qcom_config_esi() argument
1566 struct ufs_qcom_host *host = ufshcd_get_variant(hba); in ufs_qcom_config_esi()
1580 nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; in ufs_qcom_config_esi()
1581 ret = platform_msi_domain_alloc_irqs(hba->dev, nr_irqs, in ufs_qcom_config_esi()
1586 msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { in ufs_qcom_config_esi()
1590 ret = devm_request_irq(hba->dev, desc->irq, in ufs_qcom_config_esi()
1592 IRQF_SHARED, "qcom-mcq-esi", hba); in ufs_qcom_config_esi()
1594 dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n", in ufs_qcom_config_esi()
1603 msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { in ufs_qcom_config_esi()
1606 devm_free_irq(hba->dev, desc->irq, hba); in ufs_qcom_config_esi()
1608 platform_msi_domain_free_irqs(hba->dev); in ufs_qcom_config_esi()
1612 ufshcd_writel(hba, in ufs_qcom_config_esi()
1613 ufshcd_readl(hba, REG_UFS_CFG3) | 0x1F000, in ufs_qcom_config_esi()
1616 ufshcd_mcq_enable_esi(hba); in ufs_qcom_config_esi()
1622 dev_warn(hba->dev, "Failed to request Platform MSI %d\n", ret); in ufs_qcom_config_esi()
1688 struct ufs_hba *hba = platform_get_drvdata(pdev); in ufs_qcom_remove() local
1691 ufshcd_remove(hba); in ufs_qcom_remove()
1692 platform_msi_domain_free_irqs(hba->dev); in ufs_qcom_remove()