diff options
author | James Bottomley <JBottomley@Odin.com> | 2015-11-12 07:06:18 -0500 |
---|---|---|
committer | James Bottomley <JBottomley@Odin.com> | 2015-11-12 07:06:18 -0500 |
commit | febdfbd2137a5727f70dfbf920105c07e6c2a21e (patch) | |
tree | 9483a5493ad3e08626e1f53ded594f88a6f4e710 /drivers/scsi/ufs | |
parent | 0da39687a15403251bdfd1c6fb18025c0607326b (diff) | |
parent | 2c5d16d6a9e7218e57b716e4fd9d77c776d21471 (diff) | |
download | linux-febdfbd2137a5727f70dfbf920105c07e6c2a21e.tar.gz linux-febdfbd2137a5727f70dfbf920105c07e6c2a21e.tar.bz2 linux-febdfbd2137a5727f70dfbf920105c07e6c2a21e.zip |
Merge tag '4.4-scsi-mkp' into misc
SCSI queue for 4.4.
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/ufs')
-rw-r--r-- | drivers/scsi/ufs/Kconfig | 2 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-qcom.c | 905 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-qcom.h | 68 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd-pltfrm.c | 98 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd-pltfrm.h | 41 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd.c | 122 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd.h | 149 |
7 files changed, 1048 insertions, 337 deletions
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index e94538362536..5f4530744e0a 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -72,7 +72,7 @@ config SCSI_UFSHCD_PLATFORM If unsure, say N. config SCSI_UFS_QCOM - bool "QCOM specific hooks to UFS controller platform driver" + tristate "QCOM specific hooks to UFS controller platform driver" depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM select PHY_QCOM_UFS help diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 4cdffa46d401..4f38d008bfb4 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -19,16 +19,44 @@ #include <linux/phy/phy-qcom-ufs.h> #include "ufshcd.h" +#include "ufshcd-pltfrm.h" #include "unipro.h" #include "ufs-qcom.h" #include "ufshci.h" +#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \ + (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN) + +enum { + TSTBUS_UAWM, + TSTBUS_UARM, + TSTBUS_TXUC, + TSTBUS_RXUC, + TSTBUS_DFC, + TSTBUS_TRLUT, + TSTBUS_TMRLUT, + TSTBUS_OCSC, + TSTBUS_UTP_HCI, + TSTBUS_COMBINED, + TSTBUS_WRAPPER, + TSTBUS_UNIPRO, + TSTBUS_MAX, +}; static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS]; -static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result); -static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host, - const char *speed_mode); static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote); +static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); +static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, + u32 clk_cycles); + +static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len, + char *prefix) +{ + print_hex_dump(KERN_ERR, prefix, + len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, + 16, 4, (void __force *)hba->mmio_base + offset, + len * 4, false); +} static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) { @@ -149,13 +177,14 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", &host->tx_l1_sync_clk); + out: return err; } static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba) { - struct ufs_qcom_host *host = hba->priv; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct phy *phy = host->generic_phy; u32 tx_lanes; int err = 0; @@ -181,7 +210,9 @@ static int ufs_qcom_check_hibern8(struct ufs_hba *hba) do { err = ufshcd_dme_get(hba, - UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val); + UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), + &tx_fsm_val); if (err || tx_fsm_val == TX_FSM_HIBERN8) break; @@ -195,7 +226,9 @@ static int ufs_qcom_check_hibern8(struct ufs_hba *hba) */ if (time_after(jiffies, timeout)) err = ufshcd_dme_get(hba, - UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val); + UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), + &tx_fsm_val); if (err) { dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", @@ -209,9 +242,18 @@ static int ufs_qcom_check_hibern8(struct ufs_hba *hba) return err; } +static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) +{ + ufshcd_rmwl(host->hba, QUNIPRO_SEL, + ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0, + REG_UFS_CFG1); + /* make sure above configuration is applied before we return */ + mb(); +} + static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) { - struct ufs_qcom_host *host = hba->priv; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct phy *phy = host->generic_phy; int ret = 0; bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B) @@ -223,9 +265,11 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) usleep_range(1000, 1100); ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B); + if (ret) { - dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n", - __func__, ret); + dev_err(hba->dev, + "%s: ufs_qcom_phy_calibrate_phy()failed, ret = %d\n", + __func__, ret); goto out; } @@ -246,9 +290,12 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) ret = ufs_qcom_phy_is_pcs_ready(phy); if (ret) - dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n", + dev_err(hba->dev, + "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n", __func__, ret); + ufs_qcom_select_unipro_mode(host); + out: return ret; } @@ -271,9 +318,10 @@ static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) mb(); } -static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, bool status) +static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) { - struct ufs_qcom_host *host = hba->priv; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); int err = 0; switch (status) { @@ -301,13 +349,13 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, bool status) } /** - * Returns non-zero for success (which rate of core_clk) and 0 - * in case of a failure + * Returns zero for success and non-zero in case of a failure */ -static unsigned long -ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate) +static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, + u32 hs, u32 rate, bool update_link_startup_timer) { - struct ufs_qcom_host *host = hba->priv; + int ret = 0; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct ufs_clk_info *clki; u32 core_clk_period_in_ns; u32 tx_clk_cycles_per_us = 0; @@ -324,11 +372,13 @@ ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate) static u32 hs_fr_table_rA[][2] = { {UFS_HS_G1, 0x1F}, {UFS_HS_G2, 0x3e}, + {UFS_HS_G3, 0x7D}, }; static u32 hs_fr_table_rB[][2] = { {UFS_HS_G1, 0x24}, {UFS_HS_G2, 0x49}, + {UFS_HS_G3, 0x92}, }; /* @@ -356,7 +406,17 @@ ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate) core_clk_rate = DEFAULT_CLK_RATE_HZ; core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; - ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); + if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { + ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); + /* + * make sure above write gets applied before we return from + * this function. + */ + mb(); + } + + if (ufs_qcom_cap_qunipro(host)) + goto out; core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate; core_clk_period_in_ns <<= OFFSET_CLK_NS_REG; @@ -406,35 +466,59 @@ ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate) goto out_error; } - /* this register 2 fields shall be written at once */ - ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us, - REG_UFS_TX_SYMBOL_CLK_NS_US); + if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) != + (core_clk_period_in_ns | tx_clk_cycles_per_us)) { + /* this register 2 fields shall be written at once */ + ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us, + REG_UFS_TX_SYMBOL_CLK_NS_US); + /* + * make sure above write gets applied before we return from + * this function. + */ + mb(); + } + + if (update_link_startup_timer) { + ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100), + REG_UFS_PA_LINK_STARTUP_TIMER); + /* + * make sure that this configuration is applied before + * we return + */ + mb(); + } goto out; out_error: - core_clk_rate = 0; + ret = -EINVAL; out: - return core_clk_rate; + return ret; } -static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, bool status) +static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) { - unsigned long core_clk_rate = 0; - u32 core_clk_cycles_per_100ms; + int err = 0; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); switch (status) { case PRE_CHANGE: - core_clk_rate = ufs_qcom_cfg_timers(hba, UFS_PWM_G1, - SLOWAUTO_MODE, 0); - if (!core_clk_rate) { + if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, + 0, true)) { dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__); - return -EINVAL; + err = -EINVAL; + goto out; } - core_clk_cycles_per_100ms = - (core_clk_rate / MSEC_PER_SEC) * 100; - ufshcd_writel(hba, core_clk_cycles_per_100ms, - REG_UFS_PA_LINK_STARTUP_TIMER); + + if (ufs_qcom_cap_qunipro(host)) + /* + * set unipro core clock cycles to 150 & clear clock + * divider + */ + err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, + 150); + break; case POST_CHANGE: ufs_qcom_link_startup_post_change(hba); @@ -443,12 +527,13 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, bool status) break; } - return 0; +out: + return err; } static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) { - struct ufs_qcom_host *host = hba->priv; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct phy *phy = host->generic_phy; int ret = 0; @@ -470,8 +555,10 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) * If UniPro link is not active, PHY ref_clk, main PHY analog power * rail and low noise analog power rail for PLL can be switched off. */ - if (!ufs_qcom_is_link_active(hba)) + if (!ufs_qcom_is_link_active(hba)) { + ufs_qcom_disable_lane_clks(host); phy_power_off(phy); + } out: return ret; @@ -479,7 +566,7 @@ out: static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { - struct ufs_qcom_host *host = hba->priv; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct phy *phy = host->generic_phy; int err; @@ -490,6 +577,10 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) goto out; } + err = ufs_qcom_enable_lane_clks(host); + if (err) + goto out; + hba->is_sys_suspended = false; out: @@ -594,6 +685,81 @@ static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param, return 0; } +#ifdef CONFIG_MSM_BUS_SCALING +static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host, + const char *speed_mode) +{ + struct device *dev = host->hba->dev; + struct device_node *np = dev->of_node; + int err; + const char *key = "qcom,bus-vector-names"; + + if (!speed_mode) { + err = -EINVAL; + goto out; + } + + if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN")) + err = of_property_match_string(np, key, "MAX"); + else + err = of_property_match_string(np, key, speed_mode); + +out: + if (err < 0) + dev_err(dev, "%s: Invalid %s mode %d\n", + __func__, speed_mode, err); + return err; +} + +static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result) +{ + int gear = max_t(u32, p->gear_rx, p->gear_tx); + int lanes = max_t(u32, p->lane_rx, p->lane_tx); + int pwr; + + /* default to PWM Gear 1, Lane 1 if power mode is not initialized */ + if (!gear) + gear = 1; + + if (!lanes) + lanes = 1; + + if (!p->pwr_rx && !p->pwr_tx) { + pwr = SLOWAUTO_MODE; + snprintf(result, BUS_VECTOR_NAME_LEN, "MIN"); + } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE || + p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) { + pwr = FAST_MODE; + snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS", + p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes); + } else { + pwr = SLOW_MODE; + snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d", + "PWM", gear, lanes); + } +} + +static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote) +{ + int err = 0; + + if (vote != host->bus_vote.curr_vote) { + err = msm_bus_scale_client_update_request( + host->bus_vote.client_handle, vote); + if (err) { + dev_err(host->hba->dev, + "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n", + __func__, host->bus_vote.client_handle, + vote, err); + goto out; + } + + host->bus_vote.curr_vote = vote; + } +out: + return err; +} + static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host) { int vote; @@ -615,13 +781,137 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host) return err; } +static ssize_t +show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return snprintf(buf, PAGE_SIZE, "%u\n", + host->bus_vote.is_max_bw_needed); +} + +static ssize_t +store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + uint32_t value; + + if (!kstrtou32(buf, 0, &value)) { + host->bus_vote.is_max_bw_needed = !!value; + ufs_qcom_update_bus_bw_vote(host); + } + + return count; +} + +static int ufs_qcom_bus_register(struct ufs_qcom_host *host) +{ + int err; + struct msm_bus_scale_pdata *bus_pdata; + struct device *dev = host->hba->dev; + struct platform_device *pdev = to_platform_device(dev); + struct device_node *np = dev->of_node; + + bus_pdata = msm_bus_cl_get_pdata(pdev); + if (!bus_pdata) { + dev_err(dev, "%s: failed to get bus vectors\n", __func__); + err = -ENODATA; + goto out; + } + + err = of_property_count_strings(np, "qcom,bus-vector-names"); + if (err < 0 || err != bus_pdata->num_usecases) { + dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n", + __func__, err); + goto out; + } + + host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata); + if (!host->bus_vote.client_handle) { + dev_err(dev, "%s: msm_bus_scale_register_client failed\n", + __func__); + err = -EFAULT; + goto out; + } + + /* cache the vote index for minimum and maximum bandwidth */ + host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN"); + host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX"); + + host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw; + host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw; + sysfs_attr_init(&host->bus_vote.max_bus_bw.attr); + host->bus_vote.max_bus_bw.attr.name = "max_bus_bw"; + host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR; + err = device_create_file(dev, &host->bus_vote.max_bus_bw); +out: + return err; +} +#else /* CONFIG_MSM_BUS_SCALING */ +static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host) +{ + return 0; +} + +static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote) +{ + return 0; +} + +static int ufs_qcom_bus_register(struct ufs_qcom_host *host) +{ + return 0; +} +#endif /* CONFIG_MSM_BUS_SCALING */ + +static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable) +{ + if (host->dev_ref_clk_ctrl_mmio && + (enable ^ host->is_dev_ref_clk_enabled)) { + u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio); + + if (enable) + temp |= host->dev_ref_clk_en_mask; + else + temp &= ~host->dev_ref_clk_en_mask; + + /* + * If we are here to disable this clock it might be immediately + * after entering into hibern8 in which case we need to make + * sure that device ref_clk is active at least 1us after the + * hibern8 enter. + */ + if (!enable) + udelay(1); + + writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio); + + /* ensure that ref_clk is enabled/disabled before we return */ + wmb(); + + /* + * If we call hibern8 exit after this, we need to make sure that + * device ref_clk is stable for at least 1us before the hibern8 + * exit command. + */ + if (enable) + udelay(1); + + host->is_dev_ref_clk_enabled = enable; + } +} + static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, - bool status, + enum ufs_notify_change_status status, struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) { u32 val; - struct ufs_qcom_host *host = hba->priv; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct phy *phy = host->generic_phy; struct ufs_qcom_dev_params ufs_qcom_cap; int ret = 0; @@ -649,6 +939,20 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, ufs_qcom_cap.desired_working_mode = UFS_QCOM_LIMIT_DESIRED_MODE; + if (host->hw_ver.major == 0x1) { + /* + * HS-G3 operations may not reliably work on legacy QCOM + * UFS host controller hardware even though capability + * exchange during link startup phase may end up + * negotiating maximum supported gear as G3. + * Hence downgrade the maximum supported gear to HS-G2. + */ + if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2) + ufs_qcom_cap.hs_tx_gear = UFS_HS_G2; + if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2) + ufs_qcom_cap.hs_rx_gear = UFS_HS_G2; + } + ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap, dev_max_params, dev_req_params); @@ -660,9 +964,9 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, break; case POST_CHANGE: - if (!ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, + if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, dev_req_params->pwr_rx, - dev_req_params->hs_rate)) { + dev_req_params->hs_rate, false)) { dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__); /* @@ -696,7 +1000,7 @@ out: static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) { - struct ufs_qcom_host *host = hba->priv; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); if (host->hw_ver.major == 0x1) return UFSHCI_VERSION_11; @@ -715,7 +1019,7 @@ static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) */ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) { - struct ufs_qcom_host *host = hba->priv; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); if (host->hw_ver.major == 0x01) { hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS @@ -724,10 +1028,11 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001) hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR; + + hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC; } if (host->hw_ver.major >= 0x2) { - hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC; hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; if (!ufs_qcom_cap_qunipro(host)) @@ -740,79 +1045,29 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) static void ufs_qcom_set_caps(struct ufs_hba *hba) { - struct ufs_qcom_host *host = hba->priv; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); - if (host->hw_ver.major >= 0x2) - host->caps = UFS_QCOM_CAP_QUNIPRO; -} - -static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host, - const char *speed_mode) -{ - struct device *dev = host->hba->dev; - struct device_node *np = dev->of_node; - int err; - const char *key = "qcom,bus-vector-names"; - - if (!speed_mode) { - err = -EINVAL; - goto out; - } - - if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN")) - err = of_property_match_string(np, key, "MAX"); - else - err = of_property_match_string(np, key, speed_mode); - -out: - if (err < 0) - dev_err(dev, "%s: Invalid %s mode %d\n", - __func__, speed_mode, err); - return err; -} - -static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote) -{ - int err = 0; - - if (vote != host->bus_vote.curr_vote) - host->bus_vote.curr_vote = vote; - - return err; -} - -static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result) -{ - int gear = max_t(u32, p->gear_rx, p->gear_tx); - int lanes = max_t(u32, p->lane_rx, p->lane_tx); - int pwr; - - /* default to PWM Gear 1, Lane 1 if power mode is not initialized */ - if (!gear) - gear = 1; - - if (!lanes) - lanes = 1; + hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; + hba->caps |= UFSHCD_CAP_CLK_SCALING; + hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; - if (!p->pwr_rx && !p->pwr_tx) { - pwr = SLOWAUTO_MODE; - snprintf(result, BUS_VECTOR_NAME_LEN, "MIN"); - } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE || - p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) { - pwr = FAST_MODE; - snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS", - p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes); - } else { - pwr = SLOW_MODE; - snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d", - "PWM", gear, lanes); + if (host->hw_ver.major >= 0x2) { + host->caps = UFS_QCOM_CAP_QUNIPRO | + UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE; } } +/** + * ufs_qcom_setup_clocks - enables/disable clocks + * @hba: host controller instance + * @on: If true, enable clocks else disable them. + * + * Returns 0 on success, non-zero on failure. + */ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on) { - struct ufs_qcom_host *host = hba->priv; - int err = 0; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int err; int vote = 0; /* @@ -835,20 +1090,18 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on) ufs_qcom_phy_disable_iface_clk(host->generic_phy); goto out; } - /* enable the device ref clock */ - ufs_qcom_phy_enable_dev_ref_clk(host->generic_phy); vote = host->bus_vote.saved_vote; if (vote == host->bus_vote.min_bw_vote) ufs_qcom_update_bus_bw_vote(host); + } else { + /* M-PHY RMMI interface clocks can be turned off */ ufs_qcom_phy_disable_iface_clk(host->generic_phy); - if (!ufs_qcom_is_link_active(hba)) { - /* turn off UFS local PHY ref_clk */ - ufs_qcom_phy_disable_ref_clk(host->generic_phy); + if (!ufs_qcom_is_link_active(hba)) /* disable device ref_clk */ - ufs_qcom_phy_disable_dev_ref_clk(host->generic_phy); - } + ufs_qcom_dev_ref_clk_ctrl(host, false); + vote = host->bus_vote.min_bw_vote; } @@ -861,68 +1114,17 @@ out: return err; } -static ssize_t -show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct ufs_hba *hba = dev_get_drvdata(dev); - struct ufs_qcom_host *host = hba->priv; - - return snprintf(buf, PAGE_SIZE, "%u\n", - host->bus_vote.is_max_bw_needed); -} - -static ssize_t -store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct ufs_hba *hba = dev_get_drvdata(dev); - struct ufs_qcom_host *host = hba->priv; - uint32_t value; - - if (!kstrtou32(buf, 0, &value)) { - host->bus_vote.is_max_bw_needed = !!value; - ufs_qcom_update_bus_bw_vote(host); - } - - return count; -} - -static int ufs_qcom_bus_register(struct ufs_qcom_host *host) -{ - int err; - struct device *dev = host->hba->dev; - struct device_node *np = dev->of_node; - - err = of_property_count_strings(np, "qcom,bus-vector-names"); - if (err < 0 ) { - dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n", - __func__, err); - goto out; - } - - /* cache the vote index for minimum and maximum bandwidth */ - host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN"); - host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX"); - - host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw; - host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw; - sysfs_attr_init(&host->bus_vote.max_bus_bw.attr); - host->bus_vote.max_bus_bw.attr.name = "max_bus_bw"; - host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR; - err = device_create_file(dev, &host->bus_vote.max_bus_bw); -out: - return err; -} - #define ANDROID_BOOT_DEV_MAX 30 static char android_boot_dev[ANDROID_BOOT_DEV_MAX]; -static int get_android_boot_dev(char *str) + +#ifndef MODULE +static int __init get_android_boot_dev(char *str) { strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX); return 1; } __setup("androidboot.bootdevice=", get_android_boot_dev); +#endif /** * ufs_qcom_init - bind phy with controller @@ -938,7 +1140,9 @@ static int ufs_qcom_init(struct ufs_hba *hba) { int err; struct device *dev = hba->dev; + struct platform_device *pdev = to_platform_device(dev); struct ufs_qcom_host *host; + struct resource *res; if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev))) return -ENODEV; @@ -950,9 +1154,15 @@ static int ufs_qcom_init(struct ufs_hba *hba) goto out; } + /* Make a two way bind between the qcom host and the hba */ host->hba = hba; - hba->priv = (void *)host; + ufshcd_set_variant(hba, host); + /* + * voting/devoting device ref_clk source is time consuming hence + * skip devoting it during aggressive clock gating. This clock + * will still be gated off during runtime suspend. + */ host->generic_phy = devm_phy_get(dev, "ufsphy"); if (IS_ERR(host->generic_phy)) { @@ -968,6 +1178,30 @@ static int ufs_qcom_init(struct ufs_hba *hba) ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, &host->hw_ver.minor, &host->hw_ver.step); + /* + * for newer controllers, device reference clock control bit has + * moved inside UFS controller register address space itself. + */ + if (host->hw_ver.major >= 0x02) { + host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1; + host->dev_ref_clk_en_mask = BIT(26); + } else { + /* "dev_ref_clk_ctrl_mem" is optional resource */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + host->dev_ref_clk_ctrl_mmio = + devm_ioremap_resource(dev, res); + if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) { + dev_warn(dev, + "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n", + __func__, + PTR_ERR(host->dev_ref_clk_ctrl_mmio)); + host->dev_ref_clk_ctrl_mmio = NULL; + } + host->dev_ref_clk_en_mask = BIT(5); + } + } + /* update phy revision information before calling phy_init() */ ufs_qcom_phy_save_controller_version(host->generic_phy, host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step); @@ -984,14 +1218,20 @@ static int ufs_qcom_init(struct ufs_hba *hba) ufs_qcom_set_caps(hba); ufs_qcom_advertise_quirks(hba); - hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_CLK_SCALING; - hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; - ufs_qcom_setup_clocks(hba, true); if (hba->dev->id < MAX_UFS_QCOM_HOSTS) ufs_qcom_hosts[hba->dev->id] = host; + host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN; + ufs_qcom_get_default_testbus_cfg(host); + err = ufs_qcom_testbus_config(host); + if (err) { + dev_warn(dev, "%s: failed to configure the testbus %d\n", + __func__, err); + err = 0; + } + goto out; out_disable_phy: @@ -1000,40 +1240,266 @@ out_unregister_bus: phy_exit(host->generic_phy); out_host_free: devm_kfree(dev, host); - hba->priv = NULL; + ufshcd_set_variant(hba, NULL); out: return err; } static void ufs_qcom_exit(struct ufs_hba *hba) { - struct ufs_qcom_host *host = hba->priv; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); ufs_qcom_disable_lane_clks(host); phy_power_off(host->generic_phy); } -static -void ufs_qcom_clk_scale_notify(struct ufs_hba *hba) +static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, + u32 clk_cycles) +{ + int err; + u32 core_clk_ctrl_reg; + + if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK) + return -EINVAL; + + err = ufshcd_dme_get(hba, + UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), + &core_clk_ctrl_reg); + if (err) + goto out; + + core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK; + core_clk_ctrl_reg |= clk_cycles; + + /* Clear CORE_CLK_DIV_EN */ + core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; + + err = ufshcd_dme_set(hba, + UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), + core_clk_ctrl_reg); +out: + return err; +} + +static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba) +{ + /* nothing to do as of now */ + return 0; +} + +static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + if (!ufs_qcom_cap_qunipro(host)) + return 0; + + /* set unipro core clock cycles to 150 and clear clock divider */ + return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150); +} + +static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int err; + u32 core_clk_ctrl_reg; + + if (!ufs_qcom_cap_qunipro(host)) + return 0; + + err = ufshcd_dme_get(hba, + UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), + &core_clk_ctrl_reg); + + /* make sure CORE_CLK_DIV_EN is cleared */ + if (!err && + (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) { + core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT; + err = ufshcd_dme_set(hba, + UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), + core_clk_ctrl_reg); + } + + return err; +} + +static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + if (!ufs_qcom_cap_qunipro(host)) + return 0; + + /* set unipro core clock cycles to 75 and clear clock divider */ + return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75); +} + +static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, + bool scale_up, enum ufs_notify_change_status status) { - struct ufs_qcom_host *host = hba->priv; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params; + int err = 0; - if (!dev_req_params) - return; + if (status == PRE_CHANGE) { + if (scale_up) + err = ufs_qcom_clk_scale_up_pre_change(hba); + else + err = ufs_qcom_clk_scale_down_pre_change(hba); + } else { + if (scale_up) + err = ufs_qcom_clk_scale_up_post_change(hba); + else + err = ufs_qcom_clk_scale_down_post_change(hba); - ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, - dev_req_params->pwr_rx, - dev_req_params->hs_rate); + if (err || !dev_req_params) + goto out; + + ufs_qcom_cfg_timers(hba, + dev_req_params->gear_rx, + dev_req_params->pwr_rx, + dev_req_params->hs_rate, + false); + ufs_qcom_update_bus_bw_vote(host); + } + +out: + return err; } +static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) +{ + /* provide a legal default configuration */ + host->testbus.select_major = TSTBUS_UAWM; + host->testbus.select_minor = 1; +} + +static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host) +{ + if (host->testbus.select_major >= TSTBUS_MAX) { + dev_err(host->hba->dev, + "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n", + __func__, host->testbus.select_major); + return false; + } + + /* + * Not performing check for each individual select_major + * mappings of select_minor, since there is no harm in + * configuring a non-existent select_minor + */ + if (host->testbus.select_minor > 0x1F) { + dev_err(host->hba->dev, + "%s: 0x%05X is not a legal testbus option\n", + __func__, host->testbus.select_minor); + return false; + } + + return true; +} + +int ufs_qcom_testbus_config(struct ufs_qcom_host *host) +{ + int reg; + int offset; + u32 mask = TEST_BUS_SUB_SEL_MASK; + + if (!host) + return -EINVAL; + + if (!ufs_qcom_testbus_cfg_is_ok(host)) + return -EPERM; + + switch (host->testbus.select_major) { + case TSTBUS_UAWM: + reg = UFS_TEST_BUS_CTRL_0; + offset = 24; + break; + case TSTBUS_UARM: + reg = UFS_TEST_BUS_CTRL_0; + offset = 16; + break; + case TSTBUS_TXUC: + reg = UFS_TEST_BUS_CTRL_0; + offset = 8; + break; + case TSTBUS_RXUC: + reg = UFS_TEST_BUS_CTRL_0; + offset = 0; + break; + case TSTBUS_DFC: + reg = UFS_TEST_BUS_CTRL_1; + offset = 24; + break; + case TSTBUS_TRLUT: + reg = UFS_TEST_BUS_CTRL_1; + offset = 16; + break; + case TSTBUS_TMRLUT: + reg = UFS_TEST_BUS_CTRL_1; + offset = 8; + break; + case TSTBUS_OCSC: + reg = UFS_TEST_BUS_CTRL_1; + offset = 0; + break; + case TSTBUS_WRAPPER: + reg = UFS_TEST_BUS_CTRL_2; + offset = 16; + break; + case TSTBUS_COMBINED: + reg = UFS_TEST_BUS_CTRL_2; + offset = 8; + break; + case TSTBUS_UTP_HCI: + reg = UFS_TEST_BUS_CTRL_2; + offset = 0; + break; + case TSTBUS_UNIPRO: + reg = UFS_UNIPRO_CFG; + offset = 1; + break; + /* + * No need for a default case, since + * ufs_qcom_testbus_cfg_is_ok() checks that the configuration + * is legal + */ + } + mask <<= offset; + + pm_runtime_get_sync(host->hba->dev); + ufshcd_hold(host->hba, false); + ufshcd_rmwl(host->hba, TEST_BUS_SEL, + (u32)host->testbus.select_major << 19, + REG_UFS_CFG1); + ufshcd_rmwl(host->hba, mask, + (u32)host->testbus.select_minor << offset, + reg); + ufshcd_release(host->hba); + pm_runtime_put_sync(host->hba->dev); + + return 0; +} + +static void ufs_qcom_testbus_read(struct ufs_hba *hba) +{ + ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS "); +} + +static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) +{ + ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16, + "HCI Vendor Specific Registers "); + + ufs_qcom_testbus_read(hba); +} /** * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations * * The variant operations configure the necessary controller and PHY * handshake during initialization. */ -static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { +static struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .name = "qcom", .init = ufs_qcom_init, .exit = ufs_qcom_exit, @@ -1045,5 +1511,66 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .pwr_change_notify = ufs_qcom_pwr_change_notify, .suspend = ufs_qcom_suspend, .resume = ufs_qcom_resume, + .dbg_register_dump = ufs_qcom_dump_dbg_regs, +}; + +/** + * ufs_qcom_probe - probe routine of the driver + * @pdev: pointer to Platform device handle + * + * Return zero for success and non-zero for failure + */ +static int ufs_qcom_probe(struct platform_device *pdev) +{ + int err; + struct device *dev = &pdev->dev; + + /* Perform generic probe */ + err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops); + if (err) + dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); + + return err; +} + +/** + * ufs_qcom_remove - set driver_data of the device to NULL + * @pdev: pointer to platform device handle + * + * Always return 0 + */ +static int ufs_qcom_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&(pdev)->dev); + ufshcd_remove(hba); + return 0; +} + +static const struct of_device_id ufs_qcom_of_match[] = { + { .compatible = "qcom,ufshc"}, + {}, +}; + +static const struct dev_pm_ops ufs_qcom_pm_ops = { + .suspend = ufshcd_pltfrm_suspend, + .resume = ufshcd_pltfrm_resume, + .runtime_suspend = ufshcd_pltfrm_runtime_suspend, + .runtime_resume = ufshcd_pltfrm_runtime_resume, + .runtime_idle = ufshcd_pltfrm_runtime_idle, }; -EXPORT_SYMBOL(ufs_hba_qcom_vops); + +static struct platform_driver ufs_qcom_pltform = { + .probe = ufs_qcom_probe, + .remove = ufs_qcom_remove, + .shutdown = ufshcd_pltfrm_shutdown, + .driver = { + .name = "ufshcd-qcom", + .pm = &ufs_qcom_pm_ops, + .of_match_table = of_match_ptr(ufs_qcom_of_match), + }, +}; +module_platform_driver(ufs_qcom_pltform); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h index db2c0a00e846..36249b35f858 100644 --- a/drivers/scsi/ufs/ufs-qcom.h +++ b/drivers/scsi/ufs/ufs-qcom.h @@ -35,8 +35,8 @@ #define UFS_QCOM_LIMIT_NUM_LANES_RX 2 #define UFS_QCOM_LIMIT_NUM_LANES_TX 2 -#define UFS_QCOM_LIMIT_HSGEAR_RX UFS_HS_G2 -#define UFS_QCOM_LIMIT_HSGEAR_TX UFS_HS_G2 +#define UFS_QCOM_LIMIT_HSGEAR_RX UFS_HS_G3 +#define UFS_QCOM_LIMIT_HSGEAR_TX UFS_HS_G3 #define UFS_QCOM_LIMIT_PWMGEAR_RX UFS_PWM_G4 #define UFS_QCOM_LIMIT_PWMGEAR_TX UFS_PWM_G4 #define UFS_QCOM_LIMIT_RX_PWR_PWM SLOW_MODE @@ -58,6 +58,21 @@ enum { REG_UFS_CFG2 = 0xE0, REG_UFS_HW_VERSION = 0xE4, + UFS_TEST_BUS = 0xE8, + UFS_TEST_BUS_CTRL_0 = 0xEC, + UFS_TEST_BUS_CTRL_1 = 0xF0, + UFS_TEST_BUS_CTRL_2 = 0xF4, + UFS_UNIPRO_CFG = 0xF8, + + /* + * QCOM UFS host controller vendor specific registers + * added in HW Version 3.0.0 + */ + UFS_AH8_CFG = 0xFC, +}; + +/* QCOM UFS host controller vendor specific debug registers */ +enum { UFS_DBG_RD_REG_UAWM = 0x100, UFS_DBG_RD_REG_UARM = 0x200, UFS_DBG_RD_REG_TXUC = 0x300, @@ -73,6 +88,14 @@ enum { UFS_UFS_DBG_RD_EDTL_RAM = 0x1900, }; +#define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x) (0x000 + x) +#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x) + +/* bit definitions for REG_UFS_CFG1 register */ +#define QUNIPRO_SEL UFS_BIT(0) +#define TEST_BUS_EN BIT(18) +#define TEST_BUS_SEL GENMASK(22, 19) + /* bit definitions for REG_UFS_CFG2 register */ #define UAWM_HW_CGC_EN (1 << 0) #define UARM_HW_CGC_EN (1 << 1) @@ -83,6 +106,9 @@ enum { #define TMRLUT_HW_CGC_EN (1 << 6) #define OCSC_HW_CGC_EN (1 << 7) +/* bit definition for UFS_UFS_TEST_BUS_CTRL_n */ +#define TEST_BUS_SUB_SEL_MASK 0x1F /* All XXX_SEL fields are 5 bits wide */ + #define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\ TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\ DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\ @@ -106,6 +132,21 @@ enum ufs_qcom_phy_init_type { UFS_PHY_INIT_CFG_RESTORE, }; +/* QCOM UFS debug print bit mask */ +#define UFS_QCOM_DBG_PRINT_REGS_EN BIT(0) +#define UFS_QCOM_DBG_PRINT_ICE_REGS_EN BIT(1) +#define UFS_QCOM_DBG_PRINT_TEST_BUS_EN BIT(2) + +#define UFS_QCOM_DBG_PRINT_ALL \ + (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_ICE_REGS_EN | \ + UFS_QCOM_DBG_PRINT_TEST_BUS_EN) + +/* QUniPro Vendor specific attributes */ +#define DME_VS_CORE_CLK_CTRL 0xD002 +/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */ +#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8) +#define DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK 0xFF + static inline void ufs_qcom_get_controller_revision(struct ufs_hba *hba, u8 *major, u16 *minor, u16 *step) @@ -157,8 +198,13 @@ struct ufs_hw_version { u16 minor; u8 major; }; -struct ufs_qcom_host { +struct ufs_qcom_testbus { + u8 select_major; + u8 select_minor; +}; + +struct ufs_qcom_host { /* * Set this capability if host controller supports the QUniPro mode * and if driver wants the Host controller to operate in QUniPro mode. @@ -166,6 +212,12 @@ struct ufs_qcom_host { * controller supports the QUniPro mode. */ #define UFS_QCOM_CAP_QUNIPRO UFS_BIT(0) + + /* + * Set this capability if host controller can retain the secure + * configuration even after UFS controller core power collapse. + */ + #define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE UFS_BIT(1) u32 caps; struct phy *generic_phy; @@ -178,13 +230,23 @@ struct ufs_qcom_host { struct clk *tx_l1_sync_clk; bool is_lane_clks_enabled; + void __iomem *dev_ref_clk_ctrl_mmio; + bool is_dev_ref_clk_enabled; struct ufs_hw_version hw_ver; + + u32 dev_ref_clk_en_mask; + + /* Bitmask for enabling debug prints */ + u32 dbg_print_en; + struct ufs_qcom_testbus testbus; }; #define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba) #define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba) #define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba) +int ufs_qcom_testbus_config(struct ufs_qcom_host *host); + static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host) { if (host->caps & UFS_QCOM_CAP_QUNIPRO) diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 7db9564f507d..9714f2a8b329 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -38,20 +38,7 @@ #include <linux/of.h> #include "ufshcd.h" - -static const struct of_device_id ufs_of_match[]; -static struct ufs_hba_variant_ops *get_variant_ops(struct device *dev) -{ - if (dev->of_node) { - const struct of_device_id *match; - - match = of_match_node(ufs_of_match, dev->of_node); - if (match) - return (struct ufs_hba_variant_ops *)match->data; - } - - return NULL; -} +#include "ufshcd-pltfrm.h" static int ufshcd_parse_clock_info(struct ufs_hba *hba) { @@ -245,10 +232,11 @@ out: * Returns 0 if successful * Returns non-zero otherwise */ -static int ufshcd_pltfrm_suspend(struct device *dev) +int ufshcd_pltfrm_suspend(struct device *dev) { return ufshcd_system_suspend(dev_get_drvdata(dev)); } +EXPORT_SYMBOL_GPL(ufshcd_pltfrm_suspend); /** * ufshcd_pltfrm_resume - resume power management function @@ -257,43 +245,47 @@ static int ufshcd_pltfrm_suspend(struct device *dev) * Returns 0 if successful * Returns non-zero otherwise */ -static int ufshcd_pltfrm_resume(struct device *dev) +int ufshcd_pltfrm_resume(struct device *dev) { return ufshcd_system_resume(dev_get_drvdata(dev)); } +EXPORT_SYMBOL_GPL(ufshcd_pltfrm_resume); -static int ufshcd_pltfrm_runtime_suspend(struct device *dev) +int ufshcd_pltfrm_runtime_suspend(struct device *dev) { return ufshcd_runtime_suspend(dev_get_drvdata(dev)); } -static int ufshcd_pltfrm_runtime_resume(struct device *dev) +EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_suspend); + +int ufshcd_pltfrm_runtime_resume(struct device *dev) { return ufshcd_runtime_resume(dev_get_drvdata(dev)); } -static int ufshcd_pltfrm_runtime_idle(struct device *dev) +EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_resume); + +int ufshcd_pltfrm_runtime_idle(struct device *dev) { return ufshcd_runtime_idle(dev_get_drvdata(dev)); } -#else /* !CONFIG_PM */ -#define ufshcd_pltfrm_suspend NULL -#define ufshcd_pltfrm_resume NULL -#define ufshcd_pltfrm_runtime_suspend NULL -#define ufshcd_pltfrm_runtime_resume NULL -#define ufshcd_pltfrm_runtime_idle NULL +EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_idle); + #endif /* CONFIG_PM */ -static void ufshcd_pltfrm_shutdown(struct platform_device *pdev) +void ufshcd_pltfrm_shutdown(struct platform_device *pdev) { ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev)); } +EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown); /** - * ufshcd_pltfrm_probe - probe routine of the driver + * ufshcd_pltfrm_init - probe routine of the driver * @pdev: pointer to Platform device handle + * @vops: pointer to variant ops * * Returns 0 on success, non-zero value on failure */ -static int ufshcd_pltfrm_probe(struct platform_device *pdev) +int ufshcd_pltfrm_init(struct platform_device *pdev, + struct ufs_hba_variant_ops *vops) { struct ufs_hba *hba; void __iomem *mmio_base; @@ -321,19 +313,19 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev) goto out; } - hba->vops = get_variant_ops(&pdev->dev); + hba->vops = vops; err = ufshcd_parse_clock_info(hba); if (err) { dev_err(&pdev->dev, "%s: clock parse failed %d\n", __func__, err); - goto out; + goto dealloc_host; } err = ufshcd_parse_regulator_info(hba); if (err) { dev_err(&pdev->dev, "%s: regulator init failed %d\n", __func__, err); - goto out; + goto dealloc_host; } pm_runtime_set_active(&pdev->dev); @@ -352,50 +344,12 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev) out_disable_rpm: pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); +dealloc_host: + ufshcd_dealloc_host(hba); out: return err; } - -/** - * ufshcd_pltfrm_remove - remove platform driver routine - * @pdev: pointer to platform device handle - * - * Returns 0 on success, non-zero value on failure - */ -static int ufshcd_pltfrm_remove(struct platform_device *pdev) -{ - struct ufs_hba *hba = platform_get_drvdata(pdev); - - pm_runtime_get_sync(&(pdev)->dev); - ufshcd_remove(hba); - return 0; -} - -static const struct of_device_id ufs_of_match[] = { - { .compatible = "jedec,ufs-1.1"}, - {}, -}; - -static const struct dev_pm_ops ufshcd_dev_pm_ops = { - .suspend = ufshcd_pltfrm_suspend, - .resume = ufshcd_pltfrm_resume, - .runtime_suspend = ufshcd_pltfrm_runtime_suspend, - .runtime_resume = ufshcd_pltfrm_runtime_resume, - .runtime_idle = ufshcd_pltfrm_runtime_idle, -}; - -static struct platform_driver ufshcd_pltfrm_driver = { - .probe = ufshcd_pltfrm_probe, - .remove = ufshcd_pltfrm_remove, - .shutdown = ufshcd_pltfrm_shutdown, - .driver = { - .name = "ufshcd", - .pm = &ufshcd_dev_pm_ops, - .of_match_table = ufs_of_match, - }, -}; - -module_platform_driver(ufshcd_pltfrm_driver); +EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init); MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.h b/drivers/scsi/ufs/ufshcd-pltfrm.h new file mode 100644 index 000000000000..df64c4180340 --- /dev/null +++ b/drivers/scsi/ufs/ufshcd-pltfrm.h @@ -0,0 +1,41 @@ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef UFSHCD_PLTFRM_H_ +#define UFSHCD_PLTFRM_H_ + +#include "ufshcd.h" + +int ufshcd_pltfrm_init(struct platform_device *pdev, + struct ufs_hba_variant_ops *vops); +void ufshcd_pltfrm_shutdown(struct platform_device *pdev); + +#ifdef CONFIG_PM + +int ufshcd_pltfrm_suspend(struct device *dev); +int ufshcd_pltfrm_resume(struct device *dev); +int ufshcd_pltfrm_runtime_suspend(struct device *dev); +int ufshcd_pltfrm_runtime_resume(struct device *dev); +int ufshcd_pltfrm_runtime_idle(struct device *dev); + +#else /* !CONFIG_PM */ + +#define ufshcd_pltfrm_suspend NULL +#define ufshcd_pltfrm_resume NULL +#define ufshcd_pltfrm_runtime_suspend NULL +#define ufshcd_pltfrm_runtime_resume NULL +#define ufshcd_pltfrm_runtime_idle NULL + +#endif /* CONFIG_PM */ + +#endif /* UFSHCD_PLTFRM_H_ */ diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 9065eb451677..85cd2564c157 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -271,10 +271,8 @@ static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) */ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) { - if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) { - if (hba->vops && hba->vops->get_ufs_hci_version) - return hba->vops->get_ufs_hci_version(hba); - } + if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION) + return ufshcd_vops_get_ufs_hci_version(hba); return ufshcd_readl(hba, REG_UFS_VERSION); } @@ -627,6 +625,7 @@ start: out: return rc; } +EXPORT_SYMBOL_GPL(ufshcd_hold); static void ufshcd_gate_work(struct work_struct *work) { @@ -714,6 +713,7 @@ void ufshcd_release(struct ufs_hba *hba) __ufshcd_release(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); } +EXPORT_SYMBOL_GPL(ufshcd_release); static ssize_t ufshcd_clkgate_delay_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -2473,9 +2473,8 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba, dev_err(hba->dev, "%s: power mode change failed %d\n", __func__, ret); } else { - if (hba->vops && hba->vops->pwr_change_notify) - hba->vops->pwr_change_notify(hba, - POST_CHANGE, NULL, pwr_mode); + ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, + pwr_mode); memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr)); @@ -2495,10 +2494,10 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr final_params = { 0 }; int ret; - if (hba->vops && hba->vops->pwr_change_notify) - hba->vops->pwr_change_notify(hba, - PRE_CHANGE, desired_pwr_mode, &final_params); - else + ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, + desired_pwr_mode, &final_params); + + if (ret) memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); ret = ufshcd_change_power_mode(hba, &final_params); @@ -2647,8 +2646,7 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) /* UniPro link is disabled at this point */ ufshcd_set_link_off(hba); - if (hba->vops && hba->vops->hce_enable_notify) - hba->vops->hce_enable_notify(hba, PRE_CHANGE); + ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); /* start controller initialization sequence */ ufshcd_hba_start(hba); @@ -2681,8 +2679,7 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) /* enable UIC related interrupts */ ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); - if (hba->vops && hba->vops->hce_enable_notify) - hba->vops->hce_enable_notify(hba, POST_CHANGE); + ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); return 0; } @@ -2735,8 +2732,7 @@ static int ufshcd_link_startup(struct ufs_hba *hba) int retries = DME_LINKSTARTUP_RETRIES; do { - if (hba->vops && hba->vops->link_startup_notify) - hba->vops->link_startup_notify(hba, PRE_CHANGE); + ufshcd_vops_link_startup_notify(hba, PRE_CHANGE); ret = ufshcd_dme_link_startup(hba); @@ -2767,11 +2763,9 @@ static int ufshcd_link_startup(struct ufs_hba *hba) } /* Include any host controller configuration via UIC commands */ - if (hba->vops && hba->vops->link_startup_notify) { - ret = hba->vops->link_startup_notify(hba, POST_CHANGE); - if (ret) - goto out; - } + ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE); + if (ret) + goto out; ret = ufshcd_make_hba_operational(hba); out: @@ -4577,8 +4571,7 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, } } - if (hba->vops && hba->vops->setup_clocks) - ret = hba->vops->setup_clocks(hba, on); + ret = ufshcd_vops_setup_clocks(hba, on); out: if (ret) { list_for_each_entry(clki, head, list) { @@ -4644,27 +4637,22 @@ static int ufshcd_variant_hba_init(struct ufs_hba *hba) if (!hba->vops) goto out; - if (hba->vops->init) { - err = hba->vops->init(hba); - if (err) - goto out; - } + err = ufshcd_vops_init(hba); + if (err) + goto out; - if (hba->vops->setup_regulators) { - err = hba->vops->setup_regulators(hba, true); - if (err) - goto out_exit; - } + err = ufshcd_vops_setup_regulators(hba, true); + if (err) + goto out_exit; goto out; out_exit: - if (hba->vops->exit) - hba->vops->exit(hba); + ufshcd_vops_exit(hba); out: if (err) dev_err(hba->dev, "%s: variant %s init failed err %d\n", - __func__, hba->vops ? hba->vops->name : "", err); + __func__, ufshcd_get_var_name(hba), err); return err; } @@ -4673,14 +4661,11 @@ static void ufshcd_variant_hba_exit(struct ufs_hba *hba) if (!hba->vops) return; - if (hba->vops->setup_clocks) - hba->vops->setup_clocks(hba, false); + ufshcd_vops_setup_clocks(hba, false); - if (hba->vops->setup_regulators) - hba->vops->setup_regulators(hba, false); + ufshcd_vops_setup_regulators(hba, false); - if (hba->vops->exit) - hba->vops->exit(hba); + ufshcd_vops_exit(hba); } static int ufshcd_hba_init(struct ufs_hba *hba) @@ -5057,17 +5042,13 @@ disable_clks: * vendor specific host controller register space call them before the * host clocks are ON. */ - if (hba->vops && hba->vops->suspend) { - ret = hba->vops->suspend(hba, pm_op); - if (ret) - goto set_link_active; - } + ret = ufshcd_vops_suspend(hba, pm_op); + if (ret) + goto set_link_active; - if (hba->vops && hba->vops->setup_clocks) { - ret = hba->vops->setup_clocks(hba, false); - if (ret) - goto vops_resume; - } + ret = ufshcd_vops_setup_clocks(hba, false); + if (ret) + goto vops_resume; if (!ufshcd_is_link_active(hba)) ufshcd_setup_clocks(hba, false); @@ -5078,7 +5059,7 @@ disable_clks: hba->clk_gating.state = CLKS_OFF; /* * Disable the host irq as host controller as there won't be any - * host controller trasanction expected till resume. + * host controller transaction expected till resume. */ ufshcd_disable_irq(hba); /* Put the host controller in low power mode if possible */ @@ -5086,8 +5067,7 @@ disable_clks: goto out; vops_resume: - if (hba->vops && hba->vops->resume) - hba->vops->resume(hba, pm_op); + ufshcd_vops_resume(hba, pm_op); set_link_active: ufshcd_vreg_set_hpm(hba); if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) @@ -5143,11 +5123,9 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) * vendor specific host controller register space call them when the * host clocks are ON. */ - if (hba->vops && hba->vops->resume) { - ret = hba->vops->resume(hba, pm_op); - if (ret) - goto disable_vreg; - } + ret = ufshcd_vops_resume(hba, pm_op); + if (ret) + goto disable_vreg; if (ufshcd_is_link_hibern8(hba)) { ret = ufshcd_uic_hibern8_exit(hba); @@ -5188,8 +5166,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) set_old_link_state: ufshcd_link_state_transition(hba, old_link_state, 0); vendor_suspend: - if (hba->vops && hba->vops->suspend) - hba->vops->suspend(hba, pm_op); + ufshcd_vops_suspend(hba, pm_op); disable_vreg: ufshcd_vreg_set_lpm(hba); disable_irq_and_vops_clks: @@ -5372,6 +5349,16 @@ void ufshcd_remove(struct ufs_hba *hba) EXPORT_SYMBOL_GPL(ufshcd_remove); /** + * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA) + * @hba: pointer to Host Bus Adapter (HBA) + */ +void ufshcd_dealloc_host(struct ufs_hba *hba) +{ + scsi_host_put(hba->host); +} +EXPORT_SYMBOL_GPL(ufshcd_dealloc_host); + +/** * ufshcd_set_dma_mask - Set dma mask based on the controller * addressing capability * @hba: per adapter instance @@ -5432,6 +5419,10 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) if (!head || list_empty(head)) goto out; + ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); + if (ret) + return ret; + list_for_each_entry(clki, head, list) { if (!IS_ERR_OR_NULL(clki->clk)) { if (scale_up && clki->max_freq) { @@ -5462,8 +5453,9 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, clki->name, clk_get_rate(clki->clk)); } - if (hba->vops->clk_scale_notify) - hba->vops->clk_scale_notify(hba); + + ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); + out: return ret; } diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index c40a0e78a6c4..2570d9477b37 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -223,8 +223,10 @@ struct ufs_clk_info { bool enabled; }; -#define PRE_CHANGE 0 -#define POST_CHANGE 1 +enum ufs_notify_change_status { + PRE_CHANGE, + POST_CHANGE, +}; struct ufs_pa_layer_attr { u32 gear_rx; @@ -259,22 +261,28 @@ struct ufs_pwr_mode_info { * to be set. * @suspend: called during host controller PM callback * @resume: called during host controller PM callback + * @dbg_register_dump: used to dump controller debug information */ struct ufs_hba_variant_ops { const char *name; int (*init)(struct ufs_hba *); void (*exit)(struct ufs_hba *); u32 (*get_ufs_hci_version)(struct ufs_hba *); - void (*clk_scale_notify)(struct ufs_hba *); - int (*setup_clocks)(struct ufs_hba *, bool); + int (*clk_scale_notify)(struct ufs_hba *, bool, + enum ufs_notify_change_status); + int (*setup_clocks)(struct ufs_hba *, bool); int (*setup_regulators)(struct ufs_hba *, bool); - int (*hce_enable_notify)(struct ufs_hba *, bool); - int (*link_startup_notify)(struct ufs_hba *, bool); + int (*hce_enable_notify)(struct ufs_hba *, + enum ufs_notify_change_status); + int (*link_startup_notify)(struct ufs_hba *, + enum ufs_notify_change_status); int (*pwr_change_notify)(struct ufs_hba *, - bool, struct ufs_pa_layer_attr *, + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *, struct ufs_pa_layer_attr *); int (*suspend)(struct ufs_hba *, enum ufs_pm_op); int (*resume)(struct ufs_hba *, enum ufs_pm_op); + void (*dbg_register_dump)(struct ufs_hba *hba); }; /* clock gating state */ @@ -576,6 +584,7 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg) } int ufshcd_alloc_host(struct device *, struct ufs_hba **); +void ufshcd_dealloc_host(struct ufs_hba *); int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int); void ufshcd_remove(struct ufs_hba *); @@ -594,6 +603,27 @@ static inline void check_upiu_size(void) GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); } +/** + * ufshcd_set_variant - set variant specific data to the hba + * @hba - per adapter instance + * @variant - pointer to variant specific data + */ +static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant) +{ + BUG_ON(!hba); + hba->priv = variant; +} + +/** + * ufshcd_get_variant - get variant specific data from the hba + * @hba - per adapter instance + */ +static inline void *ufshcd_get_variant(struct ufs_hba *hba) +{ + BUG_ON(!hba); + return hba->priv; +} + extern int ufshcd_runtime_suspend(struct ufs_hba *hba); extern int ufshcd_runtime_resume(struct ufs_hba *hba); extern int ufshcd_runtime_idle(struct ufs_hba *hba); @@ -653,4 +683,109 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, int ufshcd_hold(struct ufs_hba *hba, bool async); void ufshcd_release(struct ufs_hba *hba); + +/* Wrapper functions for safely calling variant operations */ +static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) +{ + if (hba->vops) + return hba->vops->name; + return ""; +} + +static inline int ufshcd_vops_init(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->init) + return hba->vops->init(hba); + + return 0; +} + +static inline void ufshcd_vops_exit(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->exit) + return hba->vops->exit(hba); +} + +static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->get_ufs_hci_version) + return hba->vops->get_ufs_hci_version(hba); + + return ufshcd_readl(hba, REG_UFS_VERSION); +} + +static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, + bool up, enum ufs_notify_change_status status) +{ + if (hba->vops && hba->vops->clk_scale_notify) + return hba->vops->clk_scale_notify(hba, up, status); + return 0; +} + +static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on) +{ + if (hba->vops && hba->vops->setup_clocks) + return hba->vops->setup_clocks(hba, on); + return 0; +} + +static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status) +{ + if (hba->vops && hba->vops->setup_regulators) + return hba->vops->setup_regulators(hba, status); + + return 0; +} + +static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba, + bool status) +{ + if (hba->vops && hba->vops->hce_enable_notify) + return hba->vops->hce_enable_notify(hba, status); + + return 0; +} +static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba, + bool status) +{ + if (hba->vops && hba->vops->link_startup_notify) + return hba->vops->link_startup_notify(hba, status); + + return 0; +} + +static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, + bool status, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + if (hba->vops && hba->vops->pwr_change_notify) + return hba->vops->pwr_change_notify(hba, status, + dev_max_params, dev_req_params); + + return -ENOTSUPP; +} + +static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op) +{ + if (hba->vops && hba->vops->suspend) + return hba->vops->suspend(hba, op); + + return 0; +} + +static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op) +{ + if (hba->vops && hba->vops->resume) + return hba->vops->resume(hba, op); + + return 0; +} + +static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->dbg_register_dump) + hba->vops->dbg_register_dump(hba); +} + #endif /* End of Header */ |