summaryrefslogtreecommitdiffstats
path: root/drivers/ufs
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs')
-rw-r--r--drivers/ufs/core/ufs-mcq.c12
-rw-r--r--drivers/ufs/core/ufs-sysfs.c54
-rw-r--r--drivers/ufs/core/ufshcd-priv.h1
-rw-r--r--drivers/ufs/core/ufshcd.c123
-rw-r--r--drivers/ufs/host/ufs-exynos.c85
-rw-r--r--drivers/ufs/host/ufs-exynos.h6
-rw-r--r--drivers/ufs/host/ufs-qcom.c43
-rw-r--r--drivers/ufs/host/ufs-qcom.h18
8 files changed, 288 insertions, 54 deletions
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 240ce135bbfb..f1294c29f484 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -677,13 +677,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
unsigned long flags;
int err;
- if (!ufshcd_cmd_inflight(lrbp->cmd)) {
- dev_err(hba->dev,
- "%s: skip abort. cmd at tag %d already completed.\n",
- __func__, tag);
- return FAILED;
- }
-
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
@@ -692,6 +685,11 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
}
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq) {
+ dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n",
+ __func__, tag);
+ return FAILED;
+ }
if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
/*
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 90b5ab60f5ae..634cf163f4cb 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -466,6 +466,56 @@ static ssize_t critical_health_show(struct device *dev,
return sysfs_emit(buf, "%d\n", hba->critical_health_count);
}
+static ssize_t device_lvl_exception_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (hba->dev_info.wspecversion < 0x410)
+ return -EOPNOTSUPP;
+
+ return sysfs_emit(buf, "%u\n", atomic_read(&hba->dev_lvl_exception_count));
+}
+
+static ssize_t device_lvl_exception_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned int value;
+
+ if (kstrtouint(buf, 0, &value))
+ return -EINVAL;
+
+ /* the only supported usecase is to reset the dev_lvl_exception_count */
+ if (value)
+ return -EINVAL;
+
+ atomic_set(&hba->dev_lvl_exception_count, 0);
+
+ return count;
+}
+
+static ssize_t device_lvl_exception_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u64 exception_id;
+ int err;
+
+ ufshcd_rpm_get_sync(hba);
+ err = ufshcd_read_device_lvl_exception_id(hba, &exception_id);
+ ufshcd_rpm_put_sync(hba);
+
+ if (err)
+ return err;
+
+ hba->dev_lvl_exception_id = exception_id;
+ return sysfs_emit(buf, "%llu\n", exception_id);
+}
+
static DEVICE_ATTR_RW(rpm_lvl);
static DEVICE_ATTR_RO(rpm_target_dev_state);
static DEVICE_ATTR_RO(rpm_target_link_state);
@@ -479,6 +529,8 @@ static DEVICE_ATTR_RW(wb_flush_threshold);
static DEVICE_ATTR_RW(rtc_update_ms);
static DEVICE_ATTR_RW(pm_qos_enable);
static DEVICE_ATTR_RO(critical_health);
+static DEVICE_ATTR_RW(device_lvl_exception_count);
+static DEVICE_ATTR_RO(device_lvl_exception_id);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -494,6 +546,8 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rtc_update_ms.attr,
&dev_attr_pm_qos_enable.attr,
&dev_attr_critical_health.attr,
+ &dev_attr_device_lvl_exception_count.attr,
+ &dev_attr_device_lvl_exception_id.attr,
NULL
};
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 10b4a19a70f1..d0a2c963a27d 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -94,6 +94,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
enum query_opcode desc_op);
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
+int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id);
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 0534390c2a35..5cb6132b8147 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -278,6 +278,7 @@ static const struct ufs_dev_quirk ufs_fixups[] = {
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
+ UFS_DEVICE_QUIRK_PA_HIBER8TIME |
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = UFS_ANY_MODEL,
@@ -3176,16 +3177,10 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
int err;
retry:
- time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
+ time_left = wait_for_completion_timeout(&hba->dev_cmd.complete,
time_left);
if (likely(time_left)) {
- /*
- * The completion handler called complete() and the caller of
- * this function still owns the @lrbp tag so the code below does
- * not trigger any race conditions.
- */
- hba->dev_cmd.complete = NULL;
err = ufshcd_get_tr_ocs(lrbp, NULL);
if (!err)
err = ufshcd_dev_cmd_completion(hba, lrbp);
@@ -3199,7 +3194,6 @@ retry:
/* successfully cleared the command, retry if needed */
if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
err = -EAGAIN;
- hba->dev_cmd.complete = NULL;
return err;
}
@@ -3215,11 +3209,9 @@ retry:
spin_lock_irqsave(&hba->outstanding_lock, flags);
pending = test_bit(lrbp->task_tag,
&hba->outstanding_reqs);
- if (pending) {
- hba->dev_cmd.complete = NULL;
+ if (pending)
__clear_bit(lrbp->task_tag,
&hba->outstanding_reqs);
- }
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (!pending) {
@@ -3237,8 +3229,6 @@ retry:
spin_lock_irqsave(&hba->outstanding_lock, flags);
pending = test_bit(lrbp->task_tag,
&hba->outstanding_reqs);
- if (pending)
- hba->dev_cmd.complete = NULL;
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (!pending) {
@@ -3272,13 +3262,9 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
const u32 tag, int timeout)
{
- DECLARE_COMPLETION_ONSTACK(wait);
int err;
- hba->dev_cmd.complete = &wait;
-
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
-
ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
@@ -5585,12 +5571,12 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
ufshcd_release_scsi_cmd(hba, lrbp);
/* Do not touch lrbp after scsi done */
scsi_done(cmd);
- } else if (hba->dev_cmd.complete) {
+ } else {
if (cqe) {
ocs = le32_to_cpu(cqe->status) & MASK_OCS;
lrbp->utr_descriptor_ptr->header.ocs = ocs;
}
- complete(hba->dev_cmd.complete);
+ complete(&hba->dev_cmd.complete);
}
}
@@ -5692,6 +5678,8 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
continue;
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq)
+ continue;
if (force_compl) {
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
@@ -6013,6 +6001,42 @@ out:
__func__, err);
}
+int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id)
+{
+ struct utp_upiu_query_v4_0 *upiu_resp;
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err;
+
+ if (hba->dev_info.wspecversion < 0x410)
+ return -EOPNOTSUPP;
+
+ ufshcd_hold(hba);
+ mutex_lock(&hba->dev_cmd.lock);
+
+ ufshcd_init_query(hba, &request, &response,
+ UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_DEV_LVL_EXCEPTION_ID, 0, 0);
+
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev, "%s: failed to read device level exception %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ upiu_resp = (struct utp_upiu_query_v4_0 *)response;
+ *exception_id = get_unaligned_be64(&upiu_resp->osf3);
+out:
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+
+ return err;
+}
+
static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
{
u8 index;
@@ -6083,7 +6107,7 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
return ret;
}
-static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
+static bool ufshcd_wb_curr_buff_threshold_check(struct ufs_hba *hba,
u32 avail_buf)
{
u32 cur_buf;
@@ -6165,15 +6189,13 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
}
/*
- * The ufs device needs the vcc to be ON to flush.
* With user-space reduction enabled, it's enough to enable flush
* by checking only the available buffer. The threshold
* defined here is > 90% full.
* With user-space preserved enabled, the current-buffer
* should be checked too because the wb buffer size can reduce
* when disk tends to be full. This info is provided by current
- * buffer (dCurrentWriteBoosterBufferSize). There's no point in
- * keeping vcc on when current buffer is empty.
+ * buffer (dCurrentWriteBoosterBufferSize).
*/
index = ufshcd_wb_get_query_index(hba);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
@@ -6188,7 +6210,7 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
if (!hba->dev_info.b_presrv_uspc_en)
return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
- return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
+ return ufshcd_wb_curr_buff_threshold_check(hba, avail_buf);
}
static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
@@ -6240,6 +6262,11 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
sysfs_notify(&hba->dev->kobj, NULL, "critical_health");
}
+ if (status & hba->ee_drv_mask & MASK_EE_DEV_LVL_EXCEPTION) {
+ atomic_inc(&hba->dev_lvl_exception_count);
+ sysfs_notify(&hba->dev->kobj, NULL, "device_lvl_exception_count");
+ }
+
ufs_debugfs_exception_event(hba, status);
}
@@ -8139,6 +8166,22 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
}
}
+static void ufshcd_device_lvl_exception_probe(struct ufs_hba *hba, u8 *desc_buf)
+{
+ u32 ext_ufs_feature;
+
+ if (hba->dev_info.wspecversion < 0x410)
+ return;
+
+ ext_ufs_feature = get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+ if (!(ext_ufs_feature & UFS_DEV_LVL_EXCEPTION_SUP))
+ return;
+
+ atomic_set(&hba->dev_lvl_exception_count, 0);
+ ufshcd_enable_ee(hba, MASK_EE_DEV_LVL_EXCEPTION);
+}
+
static void ufshcd_set_rtt(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
@@ -8339,6 +8382,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufs_init_rtc(hba, desc_buf);
+ ufshcd_device_lvl_exception_probe(hba, desc_buf);
+
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -8428,6 +8473,31 @@ out:
return ret;
}
+/**
+ * ufshcd_quirk_override_pa_h8time - Ensures proper adjustment of PA_HIBERN8TIME.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require specific adjustments to the PA_HIBERN8TIME parameter
+ * to ensure proper hibernation timing. This function retrieves the current
+ * PA_HIBERN8TIME value and increments it by 100us.
+ */
+static void ufshcd_quirk_override_pa_h8time(struct ufs_hba *hba)
+{
+ u32 pa_h8time;
+ int ret;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), &pa_h8time);
+ if (ret) {
+ dev_err(hba->dev, "Failed to get PA_HIBERN8TIME: %d\n", ret);
+ return;
+ }
+
+ /* Increment by 1 to increase hibernation time by 100 µs */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), pa_h8time + 1);
+ if (ret)
+ dev_err(hba->dev, "Failed updating PA_HIBERN8TIME: %d\n", ret);
+}
+
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
ufshcd_vops_apply_dev_quirks(hba);
@@ -8438,6 +8508,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME)
+ ufshcd_quirk_override_pa_h8time(hba);
}
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
@@ -10490,6 +10563,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
+ init_completion(&hba->dev_cmd.complete);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index d7539cda97da..3e545af536e5 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -34,7 +34,7 @@
* Exynos's Vendor specific registers for UFSHCI
*/
#define HCI_TXPRDT_ENTRY_SIZE 0x00
-#define PRDT_PREFECT_EN BIT(31)
+#define PRDT_PREFETCH_EN BIT(31)
#define HCI_RXPRDT_ENTRY_SIZE 0x04
#define HCI_1US_TO_CNT_VAL 0x0C
#define CNT_VAL_1US_MASK 0x3FF
@@ -92,11 +92,16 @@
UIC_TRANSPORT_NO_CONNECTION_RX |\
UIC_TRANSPORT_BAD_TC)
-/* FSYS UFS Shareability */
-#define UFS_WR_SHARABLE BIT(2)
-#define UFS_RD_SHARABLE BIT(1)
-#define UFS_SHARABLE (UFS_WR_SHARABLE | UFS_RD_SHARABLE)
-#define UFS_SHAREABILITY_OFFSET 0x710
+/* UFS Shareability */
+#define UFS_EXYNOSAUTO_WR_SHARABLE BIT(2)
+#define UFS_EXYNOSAUTO_RD_SHARABLE BIT(1)
+#define UFS_EXYNOSAUTO_SHARABLE (UFS_EXYNOSAUTO_WR_SHARABLE | \
+ UFS_EXYNOSAUTO_RD_SHARABLE)
+#define UFS_GS101_WR_SHARABLE BIT(1)
+#define UFS_GS101_RD_SHARABLE BIT(0)
+#define UFS_GS101_SHARABLE (UFS_GS101_WR_SHARABLE | \
+ UFS_GS101_RD_SHARABLE)
+#define UFS_SHAREABILITY_OFFSET 0x710
/* Multi-host registers */
#define MHCTRL 0xC4
@@ -209,8 +214,8 @@ static int exynos_ufs_shareability(struct exynos_ufs *ufs)
/* IO Coherency setting */
if (ufs->sysreg) {
return regmap_update_bits(ufs->sysreg,
- ufs->shareability_reg_offset,
- UFS_SHARABLE, UFS_SHARABLE);
+ ufs->iocc_offset,
+ ufs->iocc_mask, ufs->iocc_val);
}
return 0;
@@ -957,6 +962,12 @@ static int exynos_ufs_phy_init(struct exynos_ufs *ufs)
}
phy_set_bus_width(generic_phy, ufs->avail_ln_rx);
+
+ if (generic_phy->power_count) {
+ phy_power_off(generic_phy);
+ phy_exit(generic_phy);
+ }
+
ret = phy_init(generic_phy);
if (ret) {
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
@@ -1049,9 +1060,14 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4);
exynos_ufs_set_unipro_pclk_div(ufs);
+ exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
+
/* unipro */
exynos_ufs_config_unipro(ufs);
+ if (ufs->drv_data->pre_link)
+ ufs->drv_data->pre_link(ufs);
+
/* m-phy */
exynos_ufs_phy_init(ufs);
if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) {
@@ -1059,11 +1075,6 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
exynos_ufs_config_phy_cap_attr(ufs);
}
- exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
-
- if (ufs->drv_data->pre_link)
- ufs->drv_data->pre_link(ufs);
-
return 0;
}
@@ -1087,12 +1098,17 @@ static int exynos_ufs_post_link(struct ufs_hba *hba)
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct phy *generic_phy = ufs->phy;
struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+ u32 val = ilog2(DATA_UNIT_SIZE);
exynos_ufs_establish_connt(ufs);
exynos_ufs_fit_aggr_timeout(ufs);
hci_writel(ufs, 0xa, HCI_DATA_REORDER);
- hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_TXPRDT_ENTRY_SIZE);
+
+ if (hba->caps & UFSHCD_CAP_CRYPTO)
+ val |= PRDT_PREFETCH_EN;
+ hci_writel(ufs, val, HCI_TXPRDT_ENTRY_SIZE);
+
hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE);
hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
@@ -1168,12 +1184,22 @@ static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs)
ufs->sysreg = NULL;
else {
if (of_property_read_u32_index(np, "samsung,sysreg", 1,
- &ufs->shareability_reg_offset)) {
+ &ufs->iocc_offset)) {
dev_warn(dev, "can't get an offset from sysreg. Set to default value\n");
- ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET;
+ ufs->iocc_offset = UFS_SHAREABILITY_OFFSET;
}
}
+ ufs->iocc_mask = ufs->drv_data->iocc_mask;
+ /*
+ * no 'dma-coherent' property means the descriptors are
+ * non-cacheable so iocc shareability should be disabled.
+ */
+ if (of_dma_is_coherent(dev->of_node))
+ ufs->iocc_val = ufs->iocc_mask;
+ else
+ ufs->iocc_val = 0;
+
ufs->pclk_avail_min = PCLK_AVAIL_MIN;
ufs->pclk_avail_max = PCLK_AVAIL_MAX;
@@ -1497,6 +1523,14 @@ out:
return ret;
}
+static void exynos_ufs_exit(struct ufs_hba *hba)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+
+ phy_power_off(ufs->phy);
+ phy_exit(ufs->phy);
+}
+
static int exynos_ufs_host_reset(struct ufs_hba *hba)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
@@ -1667,6 +1701,12 @@ static void exynos_ufs_hibern8_notify(struct ufs_hba *hba,
}
}
+static int gs101_ufs_suspend(struct exynos_ufs *ufs)
+{
+ hci_writel(ufs, 0 << 0, HCI_GPIO_OUT);
+ return 0;
+}
+
static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
@@ -1675,6 +1715,9 @@ static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
if (status == PRE_CHANGE)
return 0;
+ if (ufs->drv_data->suspend)
+ ufs->drv_data->suspend(ufs);
+
if (!ufshcd_is_link_active(hba))
phy_power_off(ufs->phy);
@@ -1952,6 +1995,7 @@ static int gs101_ufs_pre_pwr_change(struct exynos_ufs *ufs,
static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.name = "exynos_ufs",
.init = exynos_ufs_init,
+ .exit = exynos_ufs_exit,
.hce_enable_notify = exynos_ufs_hce_enable_notify,
.link_startup_notify = exynos_ufs_link_startup_notify,
.pwr_change_notify = exynos_ufs_pwr_change_notify,
@@ -1990,13 +2034,7 @@ static int exynos_ufs_probe(struct platform_device *pdev)
static void exynos_ufs_remove(struct platform_device *pdev)
{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
-
ufshcd_pltfrm_remove(pdev);
-
- phy_power_off(ufs->phy);
- phy_exit(ufs->phy);
}
static struct exynos_ufs_uic_attr exynos7_uic_attr = {
@@ -2035,6 +2073,7 @@ static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
.opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
+ .iocc_mask = UFS_EXYNOSAUTO_SHARABLE,
.drv_init = exynosauto_ufs_drv_init,
.post_hce_enable = exynosauto_ufs_post_hce_enable,
.pre_link = exynosauto_ufs_pre_link,
@@ -2136,10 +2175,12 @@ static const struct exynos_ufs_drv_data gs101_ufs_drvs = {
.opts = EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
EXYNOS_UFS_OPT_UFSPR_SECURE |
EXYNOS_UFS_OPT_TIMER_TICK_SELECT,
+ .iocc_mask = UFS_GS101_SHARABLE,
.drv_init = gs101_ufs_drv_init,
.pre_link = gs101_ufs_pre_link,
.post_link = gs101_ufs_post_link,
.pre_pwr_change = gs101_ufs_pre_pwr_change,
+ .suspend = gs101_ufs_suspend,
};
static const struct of_device_id exynos_ufs_of_match[] = {
diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h
index aac517276189..abe7e472759e 100644
--- a/drivers/ufs/host/ufs-exynos.h
+++ b/drivers/ufs/host/ufs-exynos.h
@@ -181,6 +181,7 @@ struct exynos_ufs_drv_data {
struct exynos_ufs_uic_attr *uic_attr;
unsigned int quirks;
unsigned int opts;
+ u32 iocc_mask;
/* SoC's specific operations */
int (*drv_init)(struct exynos_ufs *ufs);
int (*pre_link)(struct exynos_ufs *ufs);
@@ -191,6 +192,7 @@ struct exynos_ufs_drv_data {
const struct ufs_pa_layer_attr *pwr);
int (*pre_hce_enable)(struct exynos_ufs *ufs);
int (*post_hce_enable)(struct exynos_ufs *ufs);
+ int (*suspend)(struct exynos_ufs *ufs);
};
struct ufs_phy_time_cfg {
@@ -230,7 +232,9 @@ struct exynos_ufs {
ktime_t entry_hibern8_t;
const struct exynos_ufs_drv_data *drv_data;
struct regmap *sysreg;
- u32 shareability_reg_offset;
+ u32 iocc_offset;
+ u32 iocc_mask;
+ u32 iocc_val;
u32 opts;
#define EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL BIT(0)
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 1b37449fbffc..c0761ccc1381 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -33,6 +33,10 @@
((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT)
#define MCQ_QCFG_SIZE 0x40
+/* De-emphasis for gear-5 */
+#define DEEMPHASIS_3_5_dB 0x04
+#define NO_DEEMPHASIS 0x0
+
enum {
TSTBUS_UAWM,
TSTBUS_UARM,
@@ -795,6 +799,23 @@ static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host)
return ufs_qcom_icc_set_bw(host, bw_table.mem_bw, bw_table.cfg_bw);
}
+static void ufs_qcom_set_tx_hs_equalizer(struct ufs_hba *hba, u32 gear, u32 tx_lanes)
+{
+ u32 equalizer_val;
+ int ret, i;
+
+ /* Determine the equalizer value based on the gear */
+ equalizer_val = (gear == 5) ? DEEMPHASIS_3_5_dB : NO_DEEMPHASIS;
+
+ for (i = 0; i < tx_lanes; i++) {
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HS_EQUALIZER, i),
+ equalizer_val);
+ if (ret)
+ dev_err(hba->dev, "%s: failed equalizer lane %d\n",
+ __func__, i);
+ }
+}
+
static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
const struct ufs_pa_layer_attr *dev_max_params,
@@ -846,6 +867,11 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
dev_req_params->gear_tx,
PA_INITIAL_ADAPT);
}
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING)
+ ufs_qcom_set_tx_hs_equalizer(hba,
+ dev_req_params->gear_tx, dev_req_params->lane_tx);
+
break;
case POST_CHANGE:
if (ufs_qcom_cfg_timers(hba, false)) {
@@ -893,6 +919,16 @@ static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
(pa_vs_config_reg1 | (1 << 12)));
}
+static void ufs_qcom_override_pa_tx_hsg1_sync_len(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TX_HSG1_SYNC_LENGTH),
+ PA_TX_HSG1_SYNC_LENGTH_VAL);
+ if (err)
+ dev_err(hba->dev, "Failed (%d) set PA_TX_HSG1_SYNC_LENGTH\n", err);
+}
+
static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
{
int err = 0;
@@ -900,6 +936,9 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH)
+ ufs_qcom_override_pa_tx_hsg1_sync_len(hba);
+
return err;
}
@@ -914,6 +953,10 @@ static struct ufs_dev_quirk ufs_qcom_dev_fixups[] = {
{ .wmanufacturerid = UFS_VENDOR_WDC,
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE },
+ { .wmanufacturerid = UFS_VENDOR_SAMSUNG,
+ .model = UFS_ANY_MODEL,
+ .quirk = UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH |
+ UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING },
{}
};
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index d0e6ec9128e7..05d4cb569c50 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -122,8 +122,11 @@ enum {
TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
/* QUniPro Vendor specific attributes */
+#define PA_TX_HSG1_SYNC_LENGTH 0x1552
#define PA_VS_CONFIG_REG1 0x9000
#define DME_VS_CORE_CLK_CTRL 0xD002
+#define TX_HS_EQUALIZER 0x0037
+
/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
#define CLK_1US_CYCLES_MASK_V4 GENMASK(27, 16)
#define CLK_1US_CYCLES_MASK GENMASK(7, 0)
@@ -141,6 +144,21 @@ enum {
#define UNIPRO_CORE_CLK_FREQ_201_5_MHZ 202
#define UNIPRO_CORE_CLK_FREQ_403_MHZ 403
+/* TX_HSG1_SYNC_LENGTH attr value */
+#define PA_TX_HSG1_SYNC_LENGTH_VAL 0x4A
+
+/*
+ * Some ufs device vendors need a different TSync length.
+ * Enable this quirk to give an additional TX_HS_SYNC_LENGTH.
+ */
+#define UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH BIT(16)
+
+/*
+ * Some ufs device vendors need a different Deemphasis setting.
+ * Enable this quirk to tune TX Deemphasis parameters.
+ */
+#define UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING BIT(17)
+
/* ICE allocator type to share AES engines among TX stream and RX stream */
#define ICE_ALLOCATOR_TYPE 2