summaryrefslogtreecommitdiffstats
path: root/drivers/ufs/host
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/host')
-rw-r--r--drivers/ufs/host/ufs-qcom.c172
-rw-r--r--drivers/ufs/host/ufs-qcom.h70
2 files changed, 128 insertions, 114 deletions
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 4350c44a6fc7..5e7ba3b6a59d 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -101,7 +101,7 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
{
- int err = 0;
+ int err;
struct device *dev = host->hba->dev;
if (host->is_lane_clks_enabled)
@@ -110,7 +110,7 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
host->rx_l0_sync_clk);
if (err)
- goto out;
+ return err;
err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
host->tx_l0_sync_clk);
@@ -128,7 +128,8 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
goto disable_rx_l1;
host->is_lane_clks_enabled = true;
- goto out;
+
+ return 0;
disable_rx_l1:
clk_disable_unprepare(host->rx_l1_sync_clk);
@@ -136,7 +137,7 @@ disable_tx_l0:
clk_disable_unprepare(host->tx_l0_sync_clk);
disable_rx_l0:
clk_disable_unprepare(host->rx_l0_sync_clk);
-out:
+
return err;
}
@@ -151,25 +152,25 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
&host->rx_l0_sync_clk, false);
if (err)
- goto out;
+ return err;
err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
&host->tx_l0_sync_clk, false);
if (err)
- goto out;
+ return err;
/* In case of single lane per direction, don't read lane1 clocks */
if (host->hba->lanes_per_direction > 1) {
err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
&host->rx_l1_sync_clk, false);
if (err)
- goto out;
+ return err;
err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
&host->tx_l1_sync_clk, true);
}
-out:
- return err;
+
+ return 0;
}
static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
@@ -232,7 +233,7 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
if (!host->core_reset) {
dev_warn(hba->dev, "%s: reset control not set\n", __func__);
- goto out;
+ return 0;
}
reenable_intr = hba->is_irq_enabled;
@@ -243,7 +244,7 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
if (ret) {
dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
__func__, ret);
- goto out;
+ return ret;
}
/*
@@ -265,16 +266,35 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
hba->is_irq_enabled = true;
}
-out:
- return ret;
+ return 0;
+}
+
+static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (host->hw_ver.major == 0x1) {
+ /*
+ * HS-G3 operations may not reliably work on legacy QCOM
+ * UFS host controller hardware even though capability
+ * exchange during link startup phase may end up
+ * negotiating maximum supported gear as G3.
+ * Hence downgrade the maximum supported gear to HS-G2.
+ */
+ return UFS_HS_G2;
+ } else if (host->hw_ver.major >= 0x4) {
+ return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0));
+ }
+
+ /* Default is HS-G3 */
+ return UFS_HS_G3;
}
static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct phy *phy = host->generic_phy;
- int ret = 0;
- bool is_rate_B = UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B;
+ int ret;
/* Reset UFS Host Controller and PHY */
ret = ufs_qcom_host_reset(hba);
@@ -282,17 +302,16 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
dev_warn(hba->dev, "%s: host reset returned %d\n",
__func__, ret);
- if (is_rate_B)
- phy_set_mode(phy, PHY_MODE_UFS_HS_B);
-
/* phy initialization - calibrate the phy */
ret = phy_init(phy);
if (ret) {
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
__func__, ret);
- goto out;
+ return ret;
}
+ phy_set_mode_ext(phy, PHY_MODE_UFS_HS_B, host->hs_gear);
+
/* power on phy - start serdes and phy's power and clocks */
ret = phy_power_on(phy);
if (ret) {
@@ -307,7 +326,7 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
out_disable_phy:
phy_exit(phy);
-out:
+
return ret;
}
@@ -365,7 +384,6 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
u32 hs, u32 rate, bool update_link_startup_timer)
{
- int ret = 0;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
u32 core_clk_period_in_ns;
@@ -400,11 +418,11 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
* Aggregation logic.
*/
if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
- goto out;
+ return 0;
if (gear == 0) {
dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
- goto out_error;
+ return -EINVAL;
}
list_for_each_entry(clki, &hba->clk_list_head, list) {
@@ -427,7 +445,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
}
if (ufs_qcom_cap_qunipro(host))
- goto out;
+ return 0;
core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
@@ -442,7 +460,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(hs_fr_table_rA));
- goto out_error;
+ return -EINVAL;
}
tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
} else if (rate == PA_HS_MODE_B) {
@@ -451,13 +469,13 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(hs_fr_table_rB));
- goto out_error;
+ return -EINVAL;
}
tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
} else {
dev_err(hba->dev, "%s: invalid rate = %d\n",
__func__, rate);
- goto out_error;
+ return -EINVAL;
}
break;
case SLOWAUTO_MODE:
@@ -467,14 +485,14 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(pwm_fr_table));
- goto out_error;
+ return -EINVAL;
}
tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
break;
case UNCHANGED:
default:
dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
- goto out_error;
+ return -EINVAL;
}
if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
@@ -498,12 +516,8 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
*/
mb();
}
- goto out;
-out_error:
- ret = -EINVAL;
-out:
- return ret;
+ return 0;
}
static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
@@ -518,8 +532,7 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
0, true)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
if (ufs_qcom_cap_qunipro(host))
@@ -545,7 +558,6 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
break;
}
-out:
return err;
}
@@ -680,24 +692,18 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
struct ufs_dev_params ufs_qcom_cap;
int ret = 0;
+ if (!dev_req_params) {
+ pr_err("%s: incoming dev_req_params is NULL\n", __func__);
+ return -EINVAL;
+ }
+
switch (status) {
case PRE_CHANGE:
ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
- if (host->hw_ver.major == 0x1) {
- /*
- * HS-G3 operations may not reliably work on legacy QCOM
- * UFS host controller hardware even though capability
- * exchange during link startup phase may end up
- * negotiating maximum supported gear as G3.
- * Hence downgrade the maximum supported gear to HS-G2.
- */
- if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
- ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
- if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
- ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
- }
+ /* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */
+ ufs_qcom_cap.hs_tx_gear = ufs_qcom_cap.hs_rx_gear = ufs_qcom_get_hs_gear(hba);
ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
dev_max_params,
@@ -705,9 +711,12 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
if (ret) {
dev_err(hba->dev, "%s: failed to determine capabilities\n",
__func__);
- goto out;
+ return ret;
}
+ /* Use the agreed gear */
+ host->hs_gear = dev_req_params->gear_tx;
+
/* enable the device ref clock before changing to HS mode */
if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
ufshcd_is_hs_mode(dev_req_params))
@@ -746,7 +755,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
ret = -EINVAL;
break;
}
-out:
+
return ret;
}
@@ -758,14 +767,11 @@ static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
&pa_vs_config_reg1);
if (err)
- goto out;
+ return err;
/* Allow extension of MSB bits of PA_SaveConfigTime attribute */
- err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+ return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
(pa_vs_config_reg1 | (1 << 12)));
-
-out:
- return err;
}
static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
@@ -824,6 +830,9 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
| UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
| UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
}
+
+ if (host->hw_ver.major > 0x3)
+ hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
}
static void ufs_qcom_set_caps(struct ufs_hba *hba)
@@ -891,8 +900,6 @@ ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
- /* Currently this code only knows about a single reset. */
- WARN_ON(id);
ufs_qcom_assert_reset(host->hba);
/* provide 1ms delay to let the reset pulse propagate. */
usleep_range(1000, 1100);
@@ -904,8 +911,6 @@ ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
- /* Currently this code only knows about a single reset. */
- WARN_ON(id);
ufs_qcom_deassert_reset(host->hba);
/*
@@ -942,9 +947,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
- err = -ENOMEM;
dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
- goto out;
+ return -ENOMEM;
}
/* Make a two way bind between the qcom host and the hba */
@@ -965,10 +969,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host->rcdev.owner = dev->driver->owner;
host->rcdev.nr_resets = 1;
err = devm_reset_controller_register(dev, &host->rcdev);
- if (err) {
+ if (err)
dev_warn(dev, "Failed to register reset controller\n");
- err = 0;
- }
if (!has_acpi_companion(dev)) {
host->generic_phy = devm_phy_get(dev, "ufsphy");
@@ -1033,17 +1035,22 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_get_default_testbus_cfg(host);
err = ufs_qcom_testbus_config(host);
- if (err) {
+ if (err)
+ /* Failure is non-fatal */
dev_warn(dev, "%s: failed to configure the testbus %d\n",
__func__, err);
- err = 0;
- }
- goto out;
+ /*
+ * Power up the PHY using the minimum supported gear (UFS_HS_G2).
+ * Switching to max gear will be performed during reinit if supported.
+ */
+ host->hs_gear = UFS_HS_G2;
+
+ return 0;
out_variant_clear:
ufshcd_set_variant(hba, NULL);
-out:
+
return err;
}
@@ -1069,7 +1076,7 @@ static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
if (err)
- goto out;
+ return err;
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
core_clk_ctrl_reg |= clk_cycles;
@@ -1077,11 +1084,9 @@ static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
/* Clear CORE_CLK_DIV_EN */
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
- err = ufshcd_dme_set(hba,
+ return ufshcd_dme_set(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
core_clk_ctrl_reg);
-out:
- return err;
}
static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
@@ -1164,7 +1169,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
if (err || !dev_req_params) {
ufshcd_uic_hibern8_exit(hba);
- goto out;
+ return err;
}
ufs_qcom_cfg_timers(hba,
@@ -1175,8 +1180,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
ufshcd_uic_hibern8_exit(hba);
}
-out:
- return err;
+ return 0;
}
static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
@@ -1385,6 +1389,13 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
}
#endif
+static void ufs_qcom_reinit_notify(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ phy_power_off(host->generic_phy);
+}
+
/*
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@@ -1408,6 +1419,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.device_reset = ufs_qcom_device_reset,
.config_scaling_param = ufs_qcom_config_scaling_param,
.program_key = ufs_qcom_ice_program_key,
+ .reinit_notify = ufs_qcom_reinit_notify,
};
/**
@@ -1424,9 +1436,9 @@ static int ufs_qcom_probe(struct platform_device *pdev)
/* Perform generic probe */
err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
if (err)
- dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
+ return dev_err_probe(dev, err, "ufshcd_pltfrm_init() failed\n");
- return err;
+ return 0;
}
/**
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index e567e4636357..f744a9e62002 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -17,12 +17,9 @@
#define DEFAULT_CLK_RATE_HZ 1000000
#define BUS_VECTOR_NAME_LEN 32
-#define UFS_HW_VER_MAJOR_SHFT (28)
-#define UFS_HW_VER_MAJOR_MASK (0x000F << UFS_HW_VER_MAJOR_SHFT)
-#define UFS_HW_VER_MINOR_SHFT (16)
-#define UFS_HW_VER_MINOR_MASK (0x0FFF << UFS_HW_VER_MINOR_SHFT)
-#define UFS_HW_VER_STEP_SHFT (0)
-#define UFS_HW_VER_STEP_MASK (0xFFFF << UFS_HW_VER_STEP_SHFT)
+#define UFS_HW_VER_MAJOR_MASK GENMASK(31, 28)
+#define UFS_HW_VER_MINOR_MASK GENMASK(27, 16)
+#define UFS_HW_VER_STEP_MASK GENMASK(15, 0)
/* vendor specific pre-defined parameters */
#define SLOW 1
@@ -36,7 +33,8 @@ enum {
REG_UFS_TX_SYMBOL_CLK_NS_US = 0xC4,
REG_UFS_LOCAL_PORT_ID_REG = 0xC8,
REG_UFS_PA_ERR_CODE = 0xCC,
- REG_UFS_RETRY_TIMER_REG = 0xD0,
+ /* On older UFS revisions, this register is called "RETRY_TIMER_REG" */
+ REG_UFS_PARAM0 = 0xD0,
REG_UFS_PA_LINK_STARTUP_TIMER = 0xD8,
REG_UFS_CFG1 = 0xDC,
REG_UFS_CFG2 = 0xE0,
@@ -76,24 +74,32 @@ enum {
#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x)
/* bit definitions for REG_UFS_CFG1 register */
-#define QUNIPRO_SEL 0x1
-#define UTP_DBG_RAMS_EN 0x20000
+#define QUNIPRO_SEL BIT(0)
+#define UFS_PHY_SOFT_RESET BIT(1)
+#define UTP_DBG_RAMS_EN BIT(17)
#define TEST_BUS_EN BIT(18)
#define TEST_BUS_SEL GENMASK(22, 19)
#define UFS_REG_TEST_BUS_EN BIT(30)
+#define UFS_PHY_RESET_ENABLE 1
+#define UFS_PHY_RESET_DISABLE 0
+
/* bit definitions for REG_UFS_CFG2 register */
-#define UAWM_HW_CGC_EN (1 << 0)
-#define UARM_HW_CGC_EN (1 << 1)
-#define TXUC_HW_CGC_EN (1 << 2)
-#define RXUC_HW_CGC_EN (1 << 3)
-#define DFC_HW_CGC_EN (1 << 4)
-#define TRLUT_HW_CGC_EN (1 << 5)
-#define TMRLUT_HW_CGC_EN (1 << 6)
-#define OCSC_HW_CGC_EN (1 << 7)
+#define UAWM_HW_CGC_EN BIT(0)
+#define UARM_HW_CGC_EN BIT(1)
+#define TXUC_HW_CGC_EN BIT(2)
+#define RXUC_HW_CGC_EN BIT(3)
+#define DFC_HW_CGC_EN BIT(4)
+#define TRLUT_HW_CGC_EN BIT(5)
+#define TMRLUT_HW_CGC_EN BIT(6)
+#define OCSC_HW_CGC_EN BIT(7)
+
+/* bit definitions for REG_UFS_PARAM0 */
+#define MAX_HS_GEAR_MASK GENMASK(6, 4)
+#define UFS_QCOM_MAX_GEAR(x) FIELD_GET(MAX_HS_GEAR_MASK, (x))
/* bit definition for UFS_UFS_TEST_BUS_CTRL_n */
-#define TEST_BUS_SUB_SEL_MASK 0x1F /* All XXX_SEL fields are 5 bits wide */
+#define TEST_BUS_SUB_SEL_MASK GENMASK(4, 0) /* All XXX_SEL fields are 5 bits wide */
#define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\
TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\
@@ -101,17 +107,11 @@ enum {
TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
/* bit offset */
-enum {
- OFFSET_UFS_PHY_SOFT_RESET = 1,
- OFFSET_CLK_NS_REG = 10,
-};
+#define OFFSET_CLK_NS_REG 0xa
/* bit masks */
-enum {
- MASK_UFS_PHY_SOFT_RESET = 0x2,
- MASK_TX_SYMBOL_CLK_1US_REG = 0x3FF,
- MASK_CLK_NS_REG = 0xFFFC00,
-};
+#define MASK_TX_SYMBOL_CLK_1US_REG GENMASK(9, 0)
+#define MASK_CLK_NS_REG GENMASK(23, 10)
/* QUniPro Vendor specific attributes */
#define PA_VS_CONFIG_REG1 0x9000
@@ -126,15 +126,15 @@ ufs_qcom_get_controller_revision(struct ufs_hba *hba,
{
u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION);
- *major = (ver & UFS_HW_VER_MAJOR_MASK) >> UFS_HW_VER_MAJOR_SHFT;
- *minor = (ver & UFS_HW_VER_MINOR_MASK) >> UFS_HW_VER_MINOR_SHFT;
- *step = (ver & UFS_HW_VER_STEP_MASK) >> UFS_HW_VER_STEP_SHFT;
+ *major = FIELD_GET(UFS_HW_VER_MAJOR_MASK, ver);
+ *minor = FIELD_GET(UFS_HW_VER_MINOR_MASK, ver);
+ *step = FIELD_GET(UFS_HW_VER_STEP_MASK, ver);
};
static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
{
- ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
- 1 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+ ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, FIELD_PREP(UFS_PHY_SOFT_RESET, UFS_PHY_RESET_ENABLE),
+ REG_UFS_CFG1);
/*
* Make sure assertion of ufs phy reset is written to
@@ -145,8 +145,8 @@ static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
{
- ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
- 0 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+ ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, FIELD_PREP(UFS_PHY_SOFT_RESET, UFS_PHY_RESET_DISABLE),
+ REG_UFS_CFG1);
/*
* Make sure de-assertion of ufs phy reset is written to
@@ -210,6 +210,8 @@ struct ufs_qcom_host {
struct reset_controller_dev rcdev;
struct gpio_desc *device_reset;
+
+ u32 hs_gear;
};
static inline u32