summaryrefslogtreecommitdiffstats
path: root/drivers/ufs
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs')
-rw-r--r--drivers/ufs/core/ufs-mcq.c12
-rw-r--r--drivers/ufs/core/ufs-sysfs.c64
-rw-r--r--drivers/ufs/core/ufs_trace.h135
-rw-r--r--drivers/ufs/core/ufshcd-crypto.c7
-rw-r--r--drivers/ufs/core/ufshcd-priv.h22
-rw-r--r--drivers/ufs/core/ufshcd.c271
-rw-r--r--drivers/ufs/host/Kconfig12
-rw-r--r--drivers/ufs/host/Makefile1
-rw-r--r--drivers/ufs/host/ufs-exynos.c98
-rw-r--r--drivers/ufs/host/ufs-exynos.h8
-rw-r--r--drivers/ufs/host/ufs-hisi.c6
-rw-r--r--drivers/ufs/host/ufs-mediatek.c11
-rw-r--r--drivers/ufs/host/ufs-qcom.c174
-rw-r--r--drivers/ufs/host/ufs-qcom.h57
-rw-r--r--drivers/ufs/host/ufs-renesas.c723
-rw-r--r--drivers/ufs/host/ufs-rockchip.c354
-rw-r--r--drivers/ufs/host/ufs-rockchip.h90
-rw-r--r--drivers/ufs/host/ufs-sprd.c6
-rw-r--r--drivers/ufs/host/ufshcd-pci.c2
19 files changed, 1524 insertions, 529 deletions
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 240ce135bbfb..f1294c29f484 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -677,13 +677,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
unsigned long flags;
int err;
- if (!ufshcd_cmd_inflight(lrbp->cmd)) {
- dev_err(hba->dev,
- "%s: skip abort. cmd at tag %d already completed.\n",
- __func__, tag);
- return FAILED;
- }
-
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
@@ -692,6 +685,11 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
}
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq) {
+ dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n",
+ __func__, tag);
+ return FAILED;
+ }
if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
/*
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 3438269a5440..634cf163f4cb 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -458,6 +458,64 @@ static ssize_t pm_qos_enable_store(struct device *dev,
return count;
}
+static ssize_t critical_health_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hba->critical_health_count);
+}
+
+static ssize_t device_lvl_exception_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (hba->dev_info.wspecversion < 0x410)
+ return -EOPNOTSUPP;
+
+ return sysfs_emit(buf, "%u\n", atomic_read(&hba->dev_lvl_exception_count));
+}
+
+static ssize_t device_lvl_exception_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned int value;
+
+ if (kstrtouint(buf, 0, &value))
+ return -EINVAL;
+
+ /* the only supported usecase is to reset the dev_lvl_exception_count */
+ if (value)
+ return -EINVAL;
+
+ atomic_set(&hba->dev_lvl_exception_count, 0);
+
+ return count;
+}
+
+static ssize_t device_lvl_exception_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u64 exception_id;
+ int err;
+
+ ufshcd_rpm_get_sync(hba);
+ err = ufshcd_read_device_lvl_exception_id(hba, &exception_id);
+ ufshcd_rpm_put_sync(hba);
+
+ if (err)
+ return err;
+
+ hba->dev_lvl_exception_id = exception_id;
+ return sysfs_emit(buf, "%llu\n", exception_id);
+}
+
static DEVICE_ATTR_RW(rpm_lvl);
static DEVICE_ATTR_RO(rpm_target_dev_state);
static DEVICE_ATTR_RO(rpm_target_link_state);
@@ -470,6 +528,9 @@ static DEVICE_ATTR_RW(enable_wb_buf_flush);
static DEVICE_ATTR_RW(wb_flush_threshold);
static DEVICE_ATTR_RW(rtc_update_ms);
static DEVICE_ATTR_RW(pm_qos_enable);
+static DEVICE_ATTR_RO(critical_health);
+static DEVICE_ATTR_RW(device_lvl_exception_count);
+static DEVICE_ATTR_RO(device_lvl_exception_id);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -484,6 +545,9 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_wb_flush_threshold.attr,
&dev_attr_rtc_update_ms.attr,
&dev_attr_pm_qos_enable.attr,
+ &dev_attr_critical_health.attr,
+ &dev_attr_device_lvl_exception_count.attr,
+ &dev_attr_device_lvl_exception_id.attr,
NULL
};
diff --git a/drivers/ufs/core/ufs_trace.h b/drivers/ufs/core/ufs_trace.h
index 84deca2b841d..caa32e23ffa5 100644
--- a/drivers/ufs/core/ufs_trace.h
+++ b/drivers/ufs/core/ufs_trace.h
@@ -83,34 +83,34 @@ UFS_CMD_TRACE_TSF_TYPES
TRACE_EVENT(ufshcd_clk_gating,
- TP_PROTO(const char *dev_name, int state),
+ TP_PROTO(struct ufs_hba *hba, int state),
- TP_ARGS(dev_name, state),
+ TP_ARGS(hba, state),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(int, state)
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->state = state;
),
TP_printk("%s: gating state changed to %s",
- __get_str(dev_name),
+ dev_name(__entry->hba->dev),
__print_symbolic(__entry->state, UFSCHD_CLK_GATING_STATES))
);
TRACE_EVENT(ufshcd_clk_scaling,
- TP_PROTO(const char *dev_name, const char *state, const char *clk,
+ TP_PROTO(struct ufs_hba *hba, const char *state, const char *clk,
u32 prev_state, u32 curr_state),
- TP_ARGS(dev_name, state, clk, prev_state, curr_state),
+ TP_ARGS(hba, state, clk, prev_state, curr_state),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__string(state, state)
__string(clk, clk)
__field(u32, prev_state)
@@ -118,7 +118,7 @@ TRACE_EVENT(ufshcd_clk_scaling,
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__assign_str(state);
__assign_str(clk);
__entry->prev_state = prev_state;
@@ -126,80 +126,80 @@ TRACE_EVENT(ufshcd_clk_scaling,
),
TP_printk("%s: %s %s from %u to %u Hz",
- __get_str(dev_name), __get_str(state), __get_str(clk),
+ dev_name(__entry->hba->dev), __get_str(state), __get_str(clk),
__entry->prev_state, __entry->curr_state)
);
TRACE_EVENT(ufshcd_auto_bkops_state,
- TP_PROTO(const char *dev_name, const char *state),
+ TP_PROTO(struct ufs_hba *hba, const char *state),
- TP_ARGS(dev_name, state),
+ TP_ARGS(hba, state),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__string(state, state)
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__assign_str(state);
),
TP_printk("%s: auto bkops - %s",
- __get_str(dev_name), __get_str(state))
+ dev_name(__entry->hba->dev), __get_str(state))
);
DECLARE_EVENT_CLASS(ufshcd_profiling_template,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us,
int err),
- TP_ARGS(dev_name, profile_info, time_us, err),
+ TP_ARGS(hba, profile_info, time_us, err),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__string(profile_info, profile_info)
__field(s64, time_us)
__field(int, err)
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__assign_str(profile_info);
__entry->time_us = time_us;
__entry->err = err;
),
TP_printk("%s: %s: took %lld usecs, err %d",
- __get_str(dev_name), __get_str(profile_info),
+ dev_name(__entry->hba->dev), __get_str(profile_info),
__entry->time_us, __entry->err)
);
DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_hibern8,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us,
int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
+ TP_ARGS(hba, profile_info, time_us, err));
DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_gating,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us,
int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
+ TP_ARGS(hba, profile_info, time_us, err));
DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_scaling,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us,
int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
+ TP_ARGS(hba, profile_info, time_us, err));
DECLARE_EVENT_CLASS(ufshcd_template,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state),
+ TP_ARGS(hba, err, usecs, dev_state, link_state),
TP_STRUCT__entry(
__field(s64, usecs)
__field(int, err)
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(int, dev_state)
__field(int, link_state)
),
@@ -207,14 +207,14 @@ DECLARE_EVENT_CLASS(ufshcd_template,
TP_fast_assign(
__entry->usecs = usecs;
__entry->err = err;
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->dev_state = dev_state;
__entry->link_state = link_state;
),
TP_printk(
"%s: took %lld usecs, dev_state: %s, link_state: %s, err %d",
- __get_str(dev_name),
+ dev_name(__entry->hba->dev),
__entry->usecs,
__print_symbolic(__entry->dev_state, UFS_PWR_MODES),
__print_symbolic(__entry->link_state, UFS_LINK_STATES),
@@ -223,60 +223,62 @@ DECLARE_EVENT_CLASS(ufshcd_template,
);
DEFINE_EVENT(ufshcd_template, ufshcd_system_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_system_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_runtime_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_runtime_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_init,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_wl_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_wl_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
TRACE_EVENT(ufshcd_command,
- TP_PROTO(struct scsi_device *sdev, enum ufs_trace_str_t str_t,
+ TP_PROTO(struct scsi_device *sdev, struct ufs_hba *hba,
+ enum ufs_trace_str_t str_t,
unsigned int tag, u32 doorbell, u32 hwq_id, int transfer_len,
u32 intr, u64 lba, u8 opcode, u8 group_id),
- TP_ARGS(sdev, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba,
+ TP_ARGS(sdev, hba, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba,
opcode, group_id),
TP_STRUCT__entry(
__field(struct scsi_device *, sdev)
+ __field(struct ufs_hba *, hba)
__field(enum ufs_trace_str_t, str_t)
__field(unsigned int, tag)
__field(u32, doorbell)
@@ -290,6 +292,7 @@ TRACE_EVENT(ufshcd_command,
TP_fast_assign(
__entry->sdev = sdev;
+ __entry->hba = hba;
__entry->str_t = str_t;
__entry->tag = tag;
__entry->doorbell = doorbell;
@@ -312,13 +315,13 @@ TRACE_EVENT(ufshcd_command,
);
TRACE_EVENT(ufshcd_uic_command,
- TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, u32 cmd,
+ TP_PROTO(struct ufs_hba *hba, enum ufs_trace_str_t str_t, u32 cmd,
u32 arg1, u32 arg2, u32 arg3),
- TP_ARGS(dev_name, str_t, cmd, arg1, arg2, arg3),
+ TP_ARGS(hba, str_t, cmd, arg1, arg2, arg3),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(enum ufs_trace_str_t, str_t)
__field(u32, cmd)
__field(u32, arg1)
@@ -327,7 +330,7 @@ TRACE_EVENT(ufshcd_uic_command,
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->str_t = str_t;
__entry->cmd = cmd;
__entry->arg1 = arg1;
@@ -337,19 +340,19 @@ TRACE_EVENT(ufshcd_uic_command,
TP_printk(
"%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x",
- show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
+ show_ufs_cmd_trace_str(__entry->str_t), dev_name(__entry->hba->dev),
__entry->cmd, __entry->arg1, __entry->arg2, __entry->arg3
)
);
TRACE_EVENT(ufshcd_upiu,
- TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, void *hdr,
+ TP_PROTO(struct ufs_hba *hba, enum ufs_trace_str_t str_t, void *hdr,
void *tsf, enum ufs_trace_tsf_t tsf_t),
- TP_ARGS(dev_name, str_t, hdr, tsf, tsf_t),
+ TP_ARGS(hba, str_t, hdr, tsf, tsf_t),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(enum ufs_trace_str_t, str_t)
__array(unsigned char, hdr, 12)
__array(unsigned char, tsf, 16)
@@ -357,7 +360,7 @@ TRACE_EVENT(ufshcd_upiu,
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->str_t = str_t;
memcpy(__entry->hdr, hdr, sizeof(__entry->hdr));
memcpy(__entry->tsf, tsf, sizeof(__entry->tsf));
@@ -366,7 +369,7 @@ TRACE_EVENT(ufshcd_upiu,
TP_printk(
"%s: %s: HDR:%s, %s:%s",
- show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
+ show_ufs_cmd_trace_str(__entry->str_t), dev_name(__entry->hba->dev),
__print_hex(__entry->hdr, sizeof(__entry->hdr)),
show_ufs_cmd_trace_tsf(__entry->tsf_t),
__print_hex(__entry->tsf, sizeof(__entry->tsf))
@@ -375,22 +378,22 @@ TRACE_EVENT(ufshcd_upiu,
TRACE_EVENT(ufshcd_exception_event,
- TP_PROTO(const char *dev_name, u16 status),
+ TP_PROTO(struct ufs_hba *hba, u16 status),
- TP_ARGS(dev_name, status),
+ TP_ARGS(hba, status),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(u16, status)
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->status = status;
),
TP_printk("%s: status 0x%x",
- __get_str(dev_name), __entry->status
+ dev_name(__entry->hba->dev), __entry->status
)
);
diff --git a/drivers/ufs/core/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c
index 694ff7578fc1..9e63a9d3cb7e 100644
--- a/drivers/ufs/core/ufshcd-crypto.c
+++ b/drivers/ufs/core/ufshcd-crypto.c
@@ -72,11 +72,11 @@ static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
if (ccap_array[cap_idx].algorithm_id == UFS_CRYPTO_ALG_AES_XTS) {
/* In XTS mode, the blk_crypto_key's size is already doubled */
- memcpy(cfg.crypto_key, key->raw, key->size/2);
+ memcpy(cfg.crypto_key, key->bytes, key->size/2);
memcpy(cfg.crypto_key + UFS_CRYPTO_KEY_MAX_SIZE/2,
- key->raw + key->size/2, key->size/2);
+ key->bytes + key->size/2, key->size/2);
} else {
- memcpy(cfg.crypto_key, key->raw, key->size);
+ memcpy(cfg.crypto_key, key->bytes, key->size);
}
ufshcd_program_key(hba, &cfg, slot);
@@ -185,6 +185,7 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
hba->crypto_profile.ll_ops = ufshcd_crypto_ops;
/* UFS only supports 8 bytes for any DUN */
hba->crypto_profile.max_dun_bytes_supported = 8;
+ hba->crypto_profile.key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
hba->crypto_profile.dev = hba->dev;
/*
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 786f20ef2238..d0a2c963a27d 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -94,6 +94,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
enum query_opcode desc_op);
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
+int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id);
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
@@ -117,11 +118,12 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
return ufshcd_readl(hba, REG_UFS_VERSION);
}
-static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
- bool up, enum ufs_notify_change_status status)
+static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, bool up,
+ unsigned long target_freq,
+ enum ufs_notify_change_status status)
{
if (hba->vops && hba->vops->clk_scale_notify)
- return hba->vops->clk_scale_notify(hba, up, status);
+ return hba->vops->clk_scale_notify(hba, up, target_freq, status);
return 0;
}
@@ -159,9 +161,9 @@ static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
}
static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ enum ufs_notify_change_status status,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
if (hba->vops && hba->vops->pwr_change_notify)
return hba->vops->pwr_change_notify(hba, status,
@@ -270,6 +272,14 @@ static inline int ufshcd_mcq_vops_config_esi(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
+static inline u32 ufshcd_vops_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
+{
+ if (hba->vops && hba->vops->freq_to_gear_speed)
+ return hba->vops->freq_to_gear_speed(hba, freq);
+
+ return 0;
+}
+
extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/**
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 464f13da259a..5cb6132b8147 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -278,6 +278,7 @@ static const struct ufs_dev_quirk ufs_fixups[] = {
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
+ UFS_DEVICE_QUIRK_PA_HIBER8TIME |
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = UFS_ANY_MODEL,
@@ -369,7 +370,7 @@ static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
else
header = &hba->lrb[tag].ucd_rsp_ptr->header;
- trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
+ trace_ufshcd_upiu(hba, str_t, header, &rq->sc.cdb,
UFS_TSF_CDB);
}
@@ -380,7 +381,7 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
if (!trace_ufshcd_upiu_enabled())
return;
- trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
+ trace_ufshcd_upiu(hba, str_t, &rq_rsp->header,
&rq_rsp->qr, UFS_TSF_OSF);
}
@@ -393,12 +394,12 @@ static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
return;
if (str_t == UFS_TM_SEND)
- trace_ufshcd_upiu(dev_name(hba->dev), str_t,
+ trace_ufshcd_upiu(hba, str_t,
&descp->upiu_req.req_header,
&descp->upiu_req.input_param1,
UFS_TSF_TM_INPUT);
else
- trace_ufshcd_upiu(dev_name(hba->dev), str_t,
+ trace_ufshcd_upiu(hba, str_t,
&descp->upiu_rsp.rsp_header,
&descp->upiu_rsp.output_param1,
UFS_TSF_TM_OUTPUT);
@@ -418,7 +419,7 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
else
cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
- trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
+ trace_ufshcd_uic_command(hba, str_t, cmd,
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
@@ -473,7 +474,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
} else {
doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
}
- trace_ufshcd_command(cmd->device, str_t, tag, doorbell, hwq_id,
+ trace_ufshcd_command(cmd->device, hba, str_t, tag, doorbell, hwq_id,
transfer_len, intr, lba, opcode, group_id);
}
@@ -1063,7 +1064,7 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
clki->max_freq, ret);
break;
}
- trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_clk_scaling(hba,
"scaled up", clki->name,
clki->curr_freq,
clki->max_freq);
@@ -1081,7 +1082,7 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
clki->min_freq, ret);
break;
}
- trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_clk_scaling(hba,
"scaled down", clki->name,
clki->curr_freq,
clki->min_freq);
@@ -1122,7 +1123,7 @@ int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table,
return ret;
}
- trace_ufshcd_clk_scaling(dev_name(dev),
+ trace_ufshcd_clk_scaling(hba,
(scaling_down ? "scaled down" : "scaled up"),
clki->name, hba->clk_scaling.target_freq, freq);
}
@@ -1162,7 +1163,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
int ret = 0;
ktime_t start = ktime_get();
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, PRE_CHANGE);
if (ret)
goto out;
@@ -1173,7 +1174,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
if (ret)
goto out;
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, POST_CHANGE);
if (ret) {
if (hba->use_pm_opp)
ufshcd_opp_set_rate(hba,
@@ -1186,7 +1187,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
ufshcd_pm_qos_update(hba, scale_up);
out:
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_profile_clk_scaling(hba,
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
@@ -1313,16 +1314,26 @@ out:
/**
* ufshcd_scale_gear - scale up/down UFS gear
* @hba: per adapter instance
+ * @target_gear: target gear to scale to
* @scale_up: True for scaling up gear and false for scaling down
*
* Return: 0 for success; -EBUSY if scaling can't happen at this time;
* non-zero for any other errors.
*/
-static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
+static int ufshcd_scale_gear(struct ufs_hba *hba, u32 target_gear, bool scale_up)
{
int ret = 0;
struct ufs_pa_layer_attr new_pwr_info;
+ if (target_gear) {
+ new_pwr_info = hba->pwr_info;
+ new_pwr_info.gear_tx = target_gear;
+ new_pwr_info.gear_rx = target_gear;
+
+ goto config_pwr_mode;
+ }
+
+ /* Legacy gear scaling, in case vops_freq_to_gear_speed() is not implemented */
if (scale_up) {
memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info,
sizeof(struct ufs_pa_layer_attr));
@@ -1343,6 +1354,7 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
}
}
+config_pwr_mode:
/* check if the power mode needs to be changed or not? */
ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
if (ret)
@@ -1387,13 +1399,13 @@ out:
return ret;
}
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err)
{
up_write(&hba->clk_scaling_lock);
- /* Enable Write Booster if we have scaled up else disable it */
+ /* Enable Write Booster if current gear requires it else disable it */
if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
- ufshcd_wb_toggle(hba, scale_up);
+ ufshcd_wb_toggle(hba, hba->pwr_info.gear_rx >= hba->clk_scaling.wb_gear);
mutex_unlock(&hba->wb_mutex);
@@ -1413,15 +1425,19 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
bool scale_up)
{
+ u32 old_gear = hba->pwr_info.gear_rx;
+ u32 new_gear = 0;
int ret = 0;
+ new_gear = ufshcd_vops_freq_to_gear_speed(hba, freq);
+
ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
if (ret)
return ret;
/* scale down the gear before scaling down clocks */
if (!scale_up) {
- ret = ufshcd_scale_gear(hba, false);
+ ret = ufshcd_scale_gear(hba, new_gear, false);
if (ret)
goto out_unprepare;
}
@@ -1429,13 +1445,13 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
ret = ufshcd_scale_clks(hba, freq, scale_up);
if (ret) {
if (!scale_up)
- ufshcd_scale_gear(hba, true);
+ ufshcd_scale_gear(hba, old_gear, true);
goto out_unprepare;
}
/* scale up the gear after scaling up clocks */
if (scale_up) {
- ret = ufshcd_scale_gear(hba, true);
+ ret = ufshcd_scale_gear(hba, new_gear, true);
if (ret) {
ufshcd_scale_clks(hba, hba->devfreq->previous_freq,
false);
@@ -1444,7 +1460,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
}
out_unprepare:
- ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
+ ufshcd_clock_scaling_unprepare(hba, ret);
return ret;
}
@@ -1548,7 +1564,7 @@ static int ufshcd_devfreq_target(struct device *dev,
if (!ret)
hba->clk_scaling.target_freq = *freq;
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_profile_clk_scaling(hba,
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
@@ -1720,6 +1736,8 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_clk_info *clki;
+ unsigned long freq;
u32 value;
int err = 0;
@@ -1743,14 +1761,25 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
if (value) {
ufshcd_resume_clkscaling(hba);
- } else {
- ufshcd_suspend_clkscaling(hba);
- err = ufshcd_devfreq_scale(hba, ULONG_MAX, true);
- if (err)
- dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
- __func__, err);
+ goto out_rel;
}
+ clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
+ freq = clki->max_freq;
+
+ ufshcd_suspend_clkscaling(hba);
+
+ if (!ufshcd_is_devfreq_scaling_required(hba, freq, true))
+ goto out_rel;
+
+ err = ufshcd_devfreq_scale(hba, freq, true);
+ if (err)
+ dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
+ __func__, err);
+ else
+ hba->clk_scaling.target_freq = freq;
+
+out_rel:
ufshcd_release(hba);
ufshcd_rpm_put_sync(hba);
out:
@@ -1783,6 +1812,10 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
if (!hba->clk_scaling.min_gear)
hba->clk_scaling.min_gear = UFS_HS_G1;
+ if (!hba->clk_scaling.wb_gear)
+ /* Use intermediate gear speed HS_G3 as the default wb_gear */
+ hba->clk_scaling.wb_gear = UFS_HS_G3;
+
INIT_WORK(&hba->clk_scaling.suspend_work,
ufshcd_clk_scaling_suspend_work);
INIT_WORK(&hba->clk_scaling.resume_work,
@@ -1881,7 +1914,7 @@ start:
case REQ_CLKS_OFF:
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
break;
}
@@ -1893,7 +1926,7 @@ start:
fallthrough;
case CLKS_OFF:
hba->clk_gating.state = REQ_CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
queue_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.ungate_work);
@@ -1933,7 +1966,7 @@ static void ufshcd_gate_work(struct work_struct *work)
if (hba->clk_gating.is_suspended ||
hba->clk_gating.state != REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
return;
}
@@ -1955,7 +1988,7 @@ static void ufshcd_gate_work(struct work_struct *work)
hba->clk_gating.state = CLKS_ON;
dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
__func__, ret);
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
return;
}
@@ -1980,7 +2013,7 @@ static void ufshcd_gate_work(struct work_struct *work)
guard(spinlock_irqsave)(&hba->clk_gating.lock);
if (hba->clk_gating.state == REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
}
}
@@ -2006,7 +2039,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
}
hba->clk_gating.state = REQ_CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+ trace_ufshcd_clk_gating(hba, hba->clk_gating.state);
queue_delayed_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.gate_work,
msecs_to_jiffies(hba->clk_gating.delay_ms));
@@ -3144,16 +3177,10 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
int err;
retry:
- time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
+ time_left = wait_for_completion_timeout(&hba->dev_cmd.complete,
time_left);
if (likely(time_left)) {
- /*
- * The completion handler called complete() and the caller of
- * this function still owns the @lrbp tag so the code below does
- * not trigger any race conditions.
- */
- hba->dev_cmd.complete = NULL;
err = ufshcd_get_tr_ocs(lrbp, NULL);
if (!err)
err = ufshcd_dev_cmd_completion(hba, lrbp);
@@ -3167,7 +3194,6 @@ retry:
/* successfully cleared the command, retry if needed */
if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
err = -EAGAIN;
- hba->dev_cmd.complete = NULL;
return err;
}
@@ -3183,11 +3209,9 @@ retry:
spin_lock_irqsave(&hba->outstanding_lock, flags);
pending = test_bit(lrbp->task_tag,
&hba->outstanding_reqs);
- if (pending) {
- hba->dev_cmd.complete = NULL;
+ if (pending)
__clear_bit(lrbp->task_tag,
&hba->outstanding_reqs);
- }
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (!pending) {
@@ -3205,8 +3229,6 @@ retry:
spin_lock_irqsave(&hba->outstanding_lock, flags);
pending = test_bit(lrbp->task_tag,
&hba->outstanding_reqs);
- if (pending)
- hba->dev_cmd.complete = NULL;
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (!pending) {
@@ -3240,13 +3262,9 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
const u32 tag, int timeout)
{
- DECLARE_COMPLETION_ONSTACK(wait);
int err;
- hba->dev_cmd.complete = &wait;
-
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
-
ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
@@ -4005,7 +4023,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
*
* Return: 0 on success, non-zero value on failure.
*/
-static int ufshcd_dme_reset(struct ufs_hba *hba)
+int ufshcd_dme_reset(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {
.command = UIC_CMD_DME_RESET,
@@ -4019,6 +4037,7 @@ static int ufshcd_dme_reset(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_dme_reset);
int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
int agreed_gear,
@@ -4044,7 +4063,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
*
* Return: 0 on success, non-zero value on failure.
*/
-static int ufshcd_dme_enable(struct ufs_hba *hba)
+int ufshcd_dme_enable(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {
.command = UIC_CMD_DME_ENABLE,
@@ -4058,6 +4077,7 @@ static int ufshcd_dme_enable(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_dme_enable);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
{
@@ -4422,7 +4442,7 @@ int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
- trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
+ trace_ufshcd_profile_hibern8(hba, "enter",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret)
@@ -4447,7 +4467,7 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
- trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
+ trace_ufshcd_profile_hibern8(hba, "exit",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret) {
@@ -5551,12 +5571,12 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
ufshcd_release_scsi_cmd(hba, lrbp);
/* Do not touch lrbp after scsi done */
scsi_done(cmd);
- } else if (hba->dev_cmd.complete) {
+ } else {
if (cqe) {
ocs = le32_to_cpu(cqe->status) & MASK_OCS;
lrbp->utr_descriptor_ptr->header.ocs = ocs;
}
- complete(hba->dev_cmd.complete);
+ complete(&hba->dev_cmd.complete);
}
}
@@ -5658,6 +5678,8 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
continue;
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+ if (!hwq)
+ continue;
if (force_compl) {
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
@@ -5808,7 +5830,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
}
hba->auto_bkops_enabled = true;
- trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
+ trace_ufshcd_auto_bkops_state(hba, "Enabled");
/* No need of URGENT_BKOPS exception from the device */
err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
@@ -5859,7 +5881,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
}
hba->auto_bkops_enabled = false;
- trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
+ trace_ufshcd_auto_bkops_state(hba, "Disabled");
hba->is_urgent_bkops_lvl_checked = false;
out:
return err;
@@ -5979,6 +6001,42 @@ out:
__func__, err);
}
+int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id)
+{
+ struct utp_upiu_query_v4_0 *upiu_resp;
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err;
+
+ if (hba->dev_info.wspecversion < 0x410)
+ return -EOPNOTSUPP;
+
+ ufshcd_hold(hba);
+ mutex_lock(&hba->dev_cmd.lock);
+
+ ufshcd_init_query(hba, &request, &response,
+ UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_DEV_LVL_EXCEPTION_ID, 0, 0);
+
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev, "%s: failed to read device level exception %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ upiu_resp = (struct utp_upiu_query_v4_0 *)response;
+ *exception_id = get_unaligned_be64(&upiu_resp->osf3);
+out:
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+
+ return err;
+}
+
static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
{
u8 index;
@@ -6049,7 +6107,7 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
return ret;
}
-static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
+static bool ufshcd_wb_curr_buff_threshold_check(struct ufs_hba *hba,
u32 avail_buf)
{
u32 cur_buf;
@@ -6131,15 +6189,13 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
}
/*
- * The ufs device needs the vcc to be ON to flush.
* With user-space reduction enabled, it's enough to enable flush
* by checking only the available buffer. The threshold
* defined here is > 90% full.
* With user-space preserved enabled, the current-buffer
* should be checked too because the wb buffer size can reduce
* when disk tends to be full. This info is provided by current
- * buffer (dCurrentWriteBoosterBufferSize). There's no point in
- * keeping vcc on when current buffer is empty.
+ * buffer (dCurrentWriteBoosterBufferSize).
*/
index = ufshcd_wb_get_query_index(hba);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
@@ -6154,7 +6210,7 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
if (!hba->dev_info.b_presrv_uspc_en)
return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
- return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
+ return ufshcd_wb_curr_buff_threshold_check(hba, avail_buf);
}
static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
@@ -6193,7 +6249,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
return;
}
- trace_ufshcd_exception_event(dev_name(hba->dev), status);
+ trace_ufshcd_exception_event(hba, status);
if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
ufshcd_bkops_exception_event_handler(hba);
@@ -6201,6 +6257,16 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
+ if (status & hba->ee_drv_mask & MASK_EE_HEALTH_CRITICAL) {
+ hba->critical_health_count++;
+ sysfs_notify(&hba->dev->kobj, NULL, "critical_health");
+ }
+
+ if (status & hba->ee_drv_mask & MASK_EE_DEV_LVL_EXCEPTION) {
+ atomic_inc(&hba->dev_lvl_exception_count);
+ sysfs_notify(&hba->dev->kobj, NULL, "device_lvl_exception_count");
+ }
+
ufs_debugfs_exception_event(hba, status);
}
@@ -7652,7 +7718,7 @@ static void ufshcd_process_probe_result(struct ufs_hba *hba,
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
- trace_ufshcd_init(dev_name(hba->dev), ret,
+ trace_ufshcd_init(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), probe_start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
}
@@ -8100,6 +8166,22 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
}
}
+static void ufshcd_device_lvl_exception_probe(struct ufs_hba *hba, u8 *desc_buf)
+{
+ u32 ext_ufs_feature;
+
+ if (hba->dev_info.wspecversion < 0x410)
+ return;
+
+ ext_ufs_feature = get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+ if (!(ext_ufs_feature & UFS_DEV_LVL_EXCEPTION_SUP))
+ return;
+
+ atomic_set(&hba->dev_lvl_exception_count, 0);
+ ufshcd_enable_ee(hba, MASK_EE_DEV_LVL_EXCEPTION);
+}
+
static void ufshcd_set_rtt(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
@@ -8293,8 +8375,15 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufshcd_temp_notif_probe(hba, desc_buf);
+ if (dev_info->wspecversion >= 0x410) {
+ hba->critical_health_count = 0;
+ ufshcd_enable_ee(hba, MASK_EE_HEALTH_CRITICAL);
+ }
+
ufs_init_rtc(hba, desc_buf);
+ ufshcd_device_lvl_exception_probe(hba, desc_buf);
+
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -8384,6 +8473,31 @@ out:
return ret;
}
+/**
+ * ufshcd_quirk_override_pa_h8time - Ensures proper adjustment of PA_HIBERN8TIME.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require specific adjustments to the PA_HIBERN8TIME parameter
+ * to ensure proper hibernation timing. This function retrieves the current
+ * PA_HIBERN8TIME value and increments it by 100us.
+ */
+static void ufshcd_quirk_override_pa_h8time(struct ufs_hba *hba)
+{
+ u32 pa_h8time;
+ int ret;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), &pa_h8time);
+ if (ret) {
+ dev_err(hba->dev, "Failed to get PA_HIBERN8TIME: %d\n", ret);
+ return;
+ }
+
+ /* Increment by 1 to increase hibernation time by 100 µs */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), pa_h8time + 1);
+ if (ret)
+ dev_err(hba->dev, "Failed updating PA_HIBERN8TIME: %d\n", ret);
+}
+
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
ufshcd_vops_apply_dev_quirks(hba);
@@ -8394,6 +8508,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME)
+ ufshcd_quirk_override_pa_h8time(hba);
}
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
@@ -9148,12 +9265,12 @@ out:
} else if (!ret && on && hba->clk_gating.is_initialized) {
scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
}
if (clk_state_changed)
- trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_profile_clk_gating(hba,
(on ? "on" : "off"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
@@ -9853,7 +9970,7 @@ static int ufshcd_wl_runtime_suspend(struct device *dev)
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
- trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
+ trace_ufshcd_wl_runtime_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9873,7 +9990,7 @@ static int ufshcd_wl_runtime_resume(struct device *dev)
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
- trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
+ trace_ufshcd_wl_runtime_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9905,7 +10022,7 @@ static int ufshcd_wl_suspend(struct device *dev)
out:
if (!ret)
hba->is_sys_suspended = true;
- trace_ufshcd_wl_suspend(dev_name(dev), ret,
+ trace_ufshcd_wl_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9928,7 +10045,7 @@ static int ufshcd_wl_resume(struct device *dev)
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
out:
- trace_ufshcd_wl_resume(dev_name(dev), ret,
+ trace_ufshcd_wl_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
if (!ret)
@@ -9966,7 +10083,7 @@ static int ufshcd_suspend(struct ufs_hba *hba)
}
if (ufshcd_is_clkgating_allowed(hba)) {
hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
}
@@ -10039,7 +10156,7 @@ int ufshcd_system_suspend(struct device *dev)
ret = ufshcd_suspend(hba);
out:
- trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
+ trace_ufshcd_system_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
@@ -10067,7 +10184,7 @@ int ufshcd_system_resume(struct device *dev)
ret = ufshcd_resume(hba);
out:
- trace_ufshcd_system_resume(dev_name(hba->dev), ret,
+ trace_ufshcd_system_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -10093,7 +10210,7 @@ int ufshcd_runtime_suspend(struct device *dev)
ret = ufshcd_suspend(hba);
- trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
+ trace_ufshcd_runtime_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
@@ -10120,7 +10237,7 @@ int ufshcd_runtime_resume(struct device *dev)
ret = ufshcd_resume(hba);
- trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
+ trace_ufshcd_runtime_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
@@ -10446,6 +10563,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
+ init_completion(&hba->dev_cmd.complete);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig
index 580c8d0bd8bb..191fbd799ec5 100644
--- a/drivers/ufs/host/Kconfig
+++ b/drivers/ufs/host/Kconfig
@@ -142,3 +142,15 @@ config SCSI_UFS_SPRD
Select this if you have UFS controller on Unisoc chipset.
If unsure, say N.
+
+config SCSI_UFS_ROCKCHIP
+ tristate "Rockchip UFS host controller driver"
+ depends on SCSI_UFSHCD_PLATFORM && (ARCH_ROCKCHIP || COMPILE_TEST)
+ help
+ This selects the Rockchip specific additions to UFSHCD platform driver.
+ UFS host on Rockchip needs some vendor specific configuration before
+ accessing the hardware which includes PHY configuration and vendor
+ specific registers.
+
+ Select this if you have UFS controller on Rockchip chipset.
+ If unsure, say N.
diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile
index 4573aead02eb..2f97feb5db3f 100644
--- a/drivers/ufs/host/Makefile
+++ b/drivers/ufs/host/Makefile
@@ -10,5 +10,6 @@ obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o
+obj-$(CONFIG_SCSI_UFS_ROCKCHIP) += ufs-rockchip.o
obj-$(CONFIG_SCSI_UFS_SPRD) += ufs-sprd.o
obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index 13dd5dfc03eb..3e545af536e5 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -34,7 +34,7 @@
* Exynos's Vendor specific registers for UFSHCI
*/
#define HCI_TXPRDT_ENTRY_SIZE 0x00
-#define PRDT_PREFECT_EN BIT(31)
+#define PRDT_PREFETCH_EN BIT(31)
#define HCI_RXPRDT_ENTRY_SIZE 0x04
#define HCI_1US_TO_CNT_VAL 0x0C
#define CNT_VAL_1US_MASK 0x3FF
@@ -92,11 +92,16 @@
UIC_TRANSPORT_NO_CONNECTION_RX |\
UIC_TRANSPORT_BAD_TC)
-/* FSYS UFS Shareability */
-#define UFS_WR_SHARABLE BIT(2)
-#define UFS_RD_SHARABLE BIT(1)
-#define UFS_SHARABLE (UFS_WR_SHARABLE | UFS_RD_SHARABLE)
-#define UFS_SHAREABILITY_OFFSET 0x710
+/* UFS Shareability */
+#define UFS_EXYNOSAUTO_WR_SHARABLE BIT(2)
+#define UFS_EXYNOSAUTO_RD_SHARABLE BIT(1)
+#define UFS_EXYNOSAUTO_SHARABLE (UFS_EXYNOSAUTO_WR_SHARABLE | \
+ UFS_EXYNOSAUTO_RD_SHARABLE)
+#define UFS_GS101_WR_SHARABLE BIT(1)
+#define UFS_GS101_RD_SHARABLE BIT(0)
+#define UFS_GS101_SHARABLE (UFS_GS101_WR_SHARABLE | \
+ UFS_GS101_RD_SHARABLE)
+#define UFS_SHAREABILITY_OFFSET 0x710
/* Multi-host registers */
#define MHCTRL 0xC4
@@ -209,8 +214,8 @@ static int exynos_ufs_shareability(struct exynos_ufs *ufs)
/* IO Coherency setting */
if (ufs->sysreg) {
return regmap_update_bits(ufs->sysreg,
- ufs->shareability_reg_offset,
- UFS_SHARABLE, UFS_SHARABLE);
+ ufs->iocc_offset,
+ ufs->iocc_mask, ufs->iocc_val);
}
return 0;
@@ -321,7 +326,7 @@ static int exynosauto_ufs_pre_pwr_change(struct exynos_ufs *ufs,
}
static int exynosauto_ufs_post_pwr_change(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr)
+ const struct ufs_pa_layer_attr *pwr)
{
struct ufs_hba *hba = ufs->hba;
u32 enabled_vh;
@@ -396,7 +401,7 @@ static int exynos7_ufs_pre_pwr_change(struct exynos_ufs *ufs,
}
static int exynos7_ufs_post_pwr_change(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr)
+ const struct ufs_pa_layer_attr *pwr)
{
struct ufs_hba *hba = ufs->hba;
int lanes = max_t(u32, pwr->lane_rx, pwr->lane_tx);
@@ -813,7 +818,7 @@ static u32 exynos_ufs_get_hs_gear(struct ufs_hba *hba)
}
static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *dev_max_params,
+ const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
@@ -865,7 +870,7 @@ out:
#define PWR_MODE_STR_LEN 64
static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *pwr_req)
+ const struct ufs_pa_layer_attr *pwr_req)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct phy *generic_phy = ufs->phy;
@@ -957,6 +962,12 @@ static int exynos_ufs_phy_init(struct exynos_ufs *ufs)
}
phy_set_bus_width(generic_phy, ufs->avail_ln_rx);
+
+ if (generic_phy->power_count) {
+ phy_power_off(generic_phy);
+ phy_exit(generic_phy);
+ }
+
ret = phy_init(generic_phy);
if (ret) {
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
@@ -1049,9 +1060,14 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4);
exynos_ufs_set_unipro_pclk_div(ufs);
+ exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
+
/* unipro */
exynos_ufs_config_unipro(ufs);
+ if (ufs->drv_data->pre_link)
+ ufs->drv_data->pre_link(ufs);
+
/* m-phy */
exynos_ufs_phy_init(ufs);
if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) {
@@ -1059,11 +1075,6 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
exynos_ufs_config_phy_cap_attr(ufs);
}
- exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
-
- if (ufs->drv_data->pre_link)
- ufs->drv_data->pre_link(ufs);
-
return 0;
}
@@ -1087,12 +1098,17 @@ static int exynos_ufs_post_link(struct ufs_hba *hba)
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct phy *generic_phy = ufs->phy;
struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+ u32 val = ilog2(DATA_UNIT_SIZE);
exynos_ufs_establish_connt(ufs);
exynos_ufs_fit_aggr_timeout(ufs);
hci_writel(ufs, 0xa, HCI_DATA_REORDER);
- hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_TXPRDT_ENTRY_SIZE);
+
+ if (hba->caps & UFSHCD_CAP_CRYPTO)
+ val |= PRDT_PREFETCH_EN;
+ hci_writel(ufs, val, HCI_TXPRDT_ENTRY_SIZE);
+
hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE);
hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
@@ -1168,12 +1184,22 @@ static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs)
ufs->sysreg = NULL;
else {
if (of_property_read_u32_index(np, "samsung,sysreg", 1,
- &ufs->shareability_reg_offset)) {
+ &ufs->iocc_offset)) {
dev_warn(dev, "can't get an offset from sysreg. Set to default value\n");
- ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET;
+ ufs->iocc_offset = UFS_SHAREABILITY_OFFSET;
}
}
+ ufs->iocc_mask = ufs->drv_data->iocc_mask;
+ /*
+ * no 'dma-coherent' property means the descriptors are
+ * non-cacheable so iocc shareability should be disabled.
+ */
+ if (of_dma_is_coherent(dev->of_node))
+ ufs->iocc_val = ufs->iocc_mask;
+ else
+ ufs->iocc_val = 0;
+
ufs->pclk_avail_min = PCLK_AVAIL_MIN;
ufs->pclk_avail_max = PCLK_AVAIL_MAX;
@@ -1320,6 +1346,7 @@ static void exynos_ufs_fmp_init(struct ufs_hba *hba, struct exynos_ufs *ufs)
return;
}
profile->max_dun_bytes_supported = AES_BLOCK_SIZE;
+ profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
profile->dev = hba->dev;
profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] =
DATA_UNIT_SIZE;
@@ -1366,7 +1393,7 @@ static int exynos_ufs_fmp_fill_prdt(struct ufs_hba *hba,
void *prdt, unsigned int num_segments)
{
struct fmp_sg_entry *fmp_prdt = prdt;
- const u8 *enckey = crypt_ctx->bc_key->raw;
+ const u8 *enckey = crypt_ctx->bc_key->bytes;
const u8 *twkey = enckey + AES_KEYSIZE_256;
u64 dun_lo = crypt_ctx->bc_dun[0];
u64 dun_hi = crypt_ctx->bc_dun[1];
@@ -1496,6 +1523,14 @@ out:
return ret;
}
+static void exynos_ufs_exit(struct ufs_hba *hba)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+
+ phy_power_off(ufs->phy);
+ phy_exit(ufs->phy);
+}
+
static int exynos_ufs_host_reset(struct ufs_hba *hba)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
@@ -1634,7 +1669,7 @@ static int exynos_ufs_link_startup_notify(struct ufs_hba *hba,
static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
+ const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
int ret = 0;
@@ -1666,6 +1701,12 @@ static void exynos_ufs_hibern8_notify(struct ufs_hba *hba,
}
}
+static int gs101_ufs_suspend(struct exynos_ufs *ufs)
+{
+ hci_writel(ufs, 0 << 0, HCI_GPIO_OUT);
+ return 0;
+}
+
static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
@@ -1674,6 +1715,9 @@ static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
if (status == PRE_CHANGE)
return 0;
+ if (ufs->drv_data->suspend)
+ ufs->drv_data->suspend(ufs);
+
if (!ufshcd_is_link_active(hba))
phy_power_off(ufs->phy);
@@ -1951,6 +1995,7 @@ static int gs101_ufs_pre_pwr_change(struct exynos_ufs *ufs,
static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.name = "exynos_ufs",
.init = exynos_ufs_init,
+ .exit = exynos_ufs_exit,
.hce_enable_notify = exynos_ufs_hce_enable_notify,
.link_startup_notify = exynos_ufs_link_startup_notify,
.pwr_change_notify = exynos_ufs_pwr_change_notify,
@@ -1989,13 +2034,7 @@ static int exynos_ufs_probe(struct platform_device *pdev)
static void exynos_ufs_remove(struct platform_device *pdev)
{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
-
ufshcd_pltfrm_remove(pdev);
-
- phy_power_off(ufs->phy);
- phy_exit(ufs->phy);
}
static struct exynos_ufs_uic_attr exynos7_uic_attr = {
@@ -2034,6 +2073,7 @@ static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
.opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
+ .iocc_mask = UFS_EXYNOSAUTO_SHARABLE,
.drv_init = exynosauto_ufs_drv_init,
.post_hce_enable = exynosauto_ufs_post_hce_enable,
.pre_link = exynosauto_ufs_pre_link,
@@ -2135,10 +2175,12 @@ static const struct exynos_ufs_drv_data gs101_ufs_drvs = {
.opts = EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
EXYNOS_UFS_OPT_UFSPR_SECURE |
EXYNOS_UFS_OPT_TIMER_TICK_SELECT,
+ .iocc_mask = UFS_GS101_SHARABLE,
.drv_init = gs101_ufs_drv_init,
.pre_link = gs101_ufs_pre_link,
.post_link = gs101_ufs_post_link,
.pre_pwr_change = gs101_ufs_pre_pwr_change,
+ .suspend = gs101_ufs_suspend,
};
static const struct of_device_id exynos_ufs_of_match[] = {
diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h
index 9670dc138d1e..abe7e472759e 100644
--- a/drivers/ufs/host/ufs-exynos.h
+++ b/drivers/ufs/host/ufs-exynos.h
@@ -181,6 +181,7 @@ struct exynos_ufs_drv_data {
struct exynos_ufs_uic_attr *uic_attr;
unsigned int quirks;
unsigned int opts;
+ u32 iocc_mask;
/* SoC's specific operations */
int (*drv_init)(struct exynos_ufs *ufs);
int (*pre_link)(struct exynos_ufs *ufs);
@@ -188,9 +189,10 @@ struct exynos_ufs_drv_data {
int (*pre_pwr_change)(struct exynos_ufs *ufs,
struct ufs_pa_layer_attr *pwr);
int (*post_pwr_change)(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr);
+ const struct ufs_pa_layer_attr *pwr);
int (*pre_hce_enable)(struct exynos_ufs *ufs);
int (*post_hce_enable)(struct exynos_ufs *ufs);
+ int (*suspend)(struct exynos_ufs *ufs);
};
struct ufs_phy_time_cfg {
@@ -230,7 +232,9 @@ struct exynos_ufs {
ktime_t entry_hibern8_t;
const struct exynos_ufs_drv_data *drv_data;
struct regmap *sysreg;
- u32 shareability_reg_offset;
+ u32 iocc_offset;
+ u32 iocc_mask;
+ u32 iocc_val;
u32 opts;
#define EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL BIT(0)
diff --git a/drivers/ufs/host/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c
index 6e6569de74d8..6f2e6bf31225 100644
--- a/drivers/ufs/host/ufs-hisi.c
+++ b/drivers/ufs/host/ufs-hisi.c
@@ -361,9 +361,9 @@ static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
}
static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ enum ufs_notify_change_status status,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_host_params host_params;
int ret = 0;
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 135cd78109e2..182f58d0c9db 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -1081,8 +1081,8 @@ static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
}
static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_host_params host_params;
@@ -1134,9 +1134,9 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
}
static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status stage,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ enum ufs_notify_change_status stage,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
int ret = 0;
@@ -1643,6 +1643,7 @@ static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
}
static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
+ unsigned long target_freq,
enum ufs_notify_change_status status)
{
if (!ufshcd_is_clkscaling_supported(hba))
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 23b9f6efa047..c0761ccc1381 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -15,6 +15,8 @@
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/time.h>
+#include <linux/unaligned.h>
+#include <linux/units.h>
#include <soc/qcom/ice.h>
@@ -31,6 +33,10 @@
((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT)
#define MCQ_QCFG_SIZE 0x40
+/* De-emphasis for gear-5 */
+#define DEEMPHASIS_3_5_dB 0x04
+#define NO_DEEMPHASIS 0x0
+
enum {
TSTBUS_UAWM,
TSTBUS_UARM,
@@ -97,7 +103,7 @@ static const struct __ufs_qcom_bw_table {
};
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
-static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up);
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq);
static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
{
@@ -105,6 +111,26 @@ static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
}
#ifdef CONFIG_SCSI_UFS_CRYPTO
+/**
+ * ufs_qcom_config_ice_allocator() - ICE core allocator configuration
+ *
+ * @host: pointer to qcom specific variant structure.
+ */
+static void ufs_qcom_config_ice_allocator(struct ufs_qcom_host *host)
+{
+ struct ufs_hba *hba = host->hba;
+ static const uint8_t val[4] = { NUM_RX_R1W0, NUM_TX_R0W1, NUM_RX_R1W1, NUM_TX_R1W1 };
+ u32 config;
+
+ if (!(host->caps & UFS_QCOM_CAP_ICE_CONFIG) ||
+ !(host->hba->caps & UFSHCD_CAP_CRYPTO))
+ return;
+
+ config = get_unaligned_le32(val);
+
+ ufshcd_writel(hba, ICE_ALLOCATOR_TYPE, REG_UFS_MEM_ICE_CONFIG);
+ ufshcd_writel(hba, config, REG_UFS_MEM_ICE_NUM_CORE);
+}
static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
{
@@ -125,7 +151,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
int err;
int i;
- ice = of_qcom_ice_get(dev);
+ ice = devm_of_qcom_ice_get(dev);
if (ice == ERR_PTR(-EOPNOTSUPP)) {
dev_warn(dev, "Disabling inline encryption support\n");
ice = NULL;
@@ -147,6 +173,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
profile->ll_ops = ufs_qcom_crypto_ops;
profile->max_dun_bytes_supported = 8;
+ profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
profile->dev = dev;
/*
@@ -202,7 +229,7 @@ static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile,
err = qcom_ice_program_key(host->ice,
QCOM_ICE_CRYPTO_ALG_AES_XTS,
QCOM_ICE_CRYPTO_KEY_SIZE_256,
- key->raw,
+ key->bytes,
key->crypto_cfg.data_unit_size / 512,
slot);
ufshcd_release(hba);
@@ -248,6 +275,11 @@ static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
{
return 0;
}
+
+static void ufs_qcom_config_ice_allocator(struct ufs_qcom_host *host)
+{
+}
+
#endif
static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
@@ -496,6 +528,7 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
err = ufs_qcom_check_hibern8(hba);
ufs_qcom_enable_hw_clk_gating(hba);
ufs_qcom_ice_enable(host);
+ ufs_qcom_config_ice_allocator(host);
break;
default:
dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
@@ -509,16 +542,10 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
* ufs_qcom_cfg_timers - Configure ufs qcom cfg timers
*
* @hba: host controller instance
- * @gear: Current operating gear
- * @hs: current power mode
- * @rate: current operating rate (A or B)
- * @update_link_startup_timer: indicate if link_start ongoing
* @is_pre_scale_up: flag to check if pre scale up condition.
* Return: zero for success and non-zero in case of a failure.
*/
-static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
- u32 hs, u32 rate, bool update_link_startup_timer,
- bool is_pre_scale_up)
+static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
@@ -534,11 +561,6 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba))
return 0;
- if (gear == 0) {
- dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
- return -EINVAL;
- }
-
list_for_each_entry(clki, &hba->clk_list_head, list) {
if (!strcmp(clki->name, "core_clk")) {
if (is_pre_scale_up)
@@ -574,14 +596,13 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
- if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
- 0, true, false)) {
+ if (ufs_qcom_cfg_timers(hba, false)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
return -EINVAL;
}
- err = ufs_qcom_set_core_clk_ctrl(hba, true);
+ err = ufs_qcom_set_core_clk_ctrl(hba, ULONG_MAX);
if (err)
dev_err(hba->dev, "cfg core clk ctrl failed\n");
/*
@@ -778,9 +799,26 @@ static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host)
return ufs_qcom_icc_set_bw(host, bw_table.mem_bw, bw_table.cfg_bw);
}
+static void ufs_qcom_set_tx_hs_equalizer(struct ufs_hba *hba, u32 gear, u32 tx_lanes)
+{
+ u32 equalizer_val;
+ int ret, i;
+
+ /* Determine the equalizer value based on the gear */
+ equalizer_val = (gear == 5) ? DEEMPHASIS_3_5_dB : NO_DEEMPHASIS;
+
+ for (i = 0; i < tx_lanes; i++) {
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HS_EQUALIZER, i),
+ equalizer_val);
+ if (ret)
+ dev_err(hba->dev, "%s: failed equalizer lane %d\n",
+ __func__, i);
+ }
+}
+
static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
+ const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -829,11 +867,14 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
dev_req_params->gear_tx,
PA_INITIAL_ADAPT);
}
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING)
+ ufs_qcom_set_tx_hs_equalizer(hba,
+ dev_req_params->gear_tx, dev_req_params->lane_tx);
+
break;
case POST_CHANGE:
- if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
- dev_req_params->pwr_rx,
- dev_req_params->hs_rate, false, false)) {
+ if (ufs_qcom_cfg_timers(hba, false)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
/*
@@ -878,6 +919,16 @@ static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
(pa_vs_config_reg1 | (1 << 12)));
}
+static void ufs_qcom_override_pa_tx_hsg1_sync_len(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TX_HSG1_SYNC_LENGTH),
+ PA_TX_HSG1_SYNC_LENGTH_VAL);
+ if (err)
+ dev_err(hba->dev, "Failed (%d) set PA_TX_HSG1_SYNC_LENGTH\n", err);
+}
+
static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
{
int err = 0;
@@ -885,6 +936,9 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH)
+ ufs_qcom_override_pa_tx_hsg1_sync_len(hba);
+
return err;
}
@@ -899,6 +953,10 @@ static struct ufs_dev_quirk ufs_qcom_dev_fixups[] = {
{ .wmanufacturerid = UFS_VENDOR_WDC,
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE },
+ { .wmanufacturerid = UFS_VENDOR_SAMSUNG,
+ .model = UFS_ANY_MODEL,
+ .quirk = UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH |
+ UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING },
{}
};
@@ -989,6 +1047,14 @@ static void ufs_qcom_set_host_params(struct ufs_hba *hba)
host_params->hs_tx_gear = host_params->hs_rx_gear = ufs_qcom_get_hs_gear(hba);
}
+static void ufs_qcom_set_host_caps(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (host->hw_ver.major >= 0x5)
+ host->caps |= UFS_QCOM_CAP_ICE_CONFIG;
+}
+
static void ufs_qcom_set_caps(struct ufs_hba *hba)
{
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
@@ -997,6 +1063,8 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
hba->caps |= UFSHCD_CAP_WB_EN;
hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+
+ ufs_qcom_set_host_caps(hba);
}
/**
@@ -1292,7 +1360,7 @@ static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba,
return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg);
}
-static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct list_head *head = &hba->clk_list_head;
@@ -1306,10 +1374,11 @@ static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
!strcmp(clki->name, "core_clk_unipro")) {
if (!clki->max_freq)
cycles_in_1us = 150; /* default for backwards compatibility */
- else if (is_scale_up)
- cycles_in_1us = ceil(clki->max_freq, (1000 * 1000));
+ else if (freq == ULONG_MAX)
+ cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ);
else
- cycles_in_1us = ceil(clk_get_rate(clki->clk), (1000 * 1000));
+ cycles_in_1us = ceil(freq, HZ_PER_MHZ);
+
break;
}
}
@@ -1346,20 +1415,17 @@ static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
return ufs_qcom_set_clk_40ns_cycles(hba, cycles_in_1us);
}
-static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
+static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba, unsigned long freq)
{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_pa_layer_attr *attr = &host->dev_req_params;
int ret;
- ret = ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
- attr->hs_rate, false, true);
+ ret = ufs_qcom_cfg_timers(hba, true);
if (ret) {
dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__);
return ret;
}
/* set unipro core clock attributes and clear clock divider */
- return ufs_qcom_set_core_clk_ctrl(hba, true);
+ return ufs_qcom_set_core_clk_ctrl(hba, freq);
}
static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
@@ -1388,14 +1454,15 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
return err;
}
-static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
+static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba, unsigned long freq)
{
/* set unipro core clock attributes and clear clock divider */
- return ufs_qcom_set_core_clk_ctrl(hba, false);
+ return ufs_qcom_set_core_clk_ctrl(hba, freq);
}
-static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
- bool scale_up, enum ufs_notify_change_status status)
+static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
+ unsigned long target_freq,
+ enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
@@ -1409,7 +1476,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
if (err)
return err;
if (scale_up)
- err = ufs_qcom_clk_scale_up_pre_change(hba);
+ err = ufs_qcom_clk_scale_up_pre_change(hba, target_freq);
else
err = ufs_qcom_clk_scale_down_pre_change(hba);
@@ -1421,7 +1488,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
if (scale_up)
err = ufs_qcom_clk_scale_up_post_change(hba);
else
- err = ufs_qcom_clk_scale_down_post_change(hba);
+ err = ufs_qcom_clk_scale_down_post_change(hba, target_freq);
if (err) {
@@ -1855,6 +1922,36 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
return ret;
}
+static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
+{
+ u32 gear = 0;
+
+ switch (freq) {
+ case 403000000:
+ gear = UFS_HS_G5;
+ break;
+ case 300000000:
+ gear = UFS_HS_G4;
+ break;
+ case 201500000:
+ gear = UFS_HS_G3;
+ break;
+ case 150000000:
+ case 100000000:
+ gear = UFS_HS_G2;
+ break;
+ case 75000000:
+ case 37500000:
+ gear = UFS_HS_G1;
+ break;
+ default:
+ dev_err(hba->dev, "%s: Unsupported clock freq : %lu\n", __func__, freq);
+ break;
+ }
+
+ return gear;
+}
+
/*
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@@ -1883,6 +1980,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.op_runtime_config = ufs_qcom_op_runtime_config,
.get_outstanding_cqs = ufs_qcom_get_outstanding_cqs,
.config_esi = ufs_qcom_config_esi,
+ .freq_to_gear_speed = ufs_qcom_freq_to_gear_speed,
};
/**
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 919f53682beb..05d4cb569c50 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -50,6 +50,9 @@ enum {
*/
UFS_AH8_CFG = 0xFC,
+ REG_UFS_MEM_ICE_CONFIG = 0x260C,
+ REG_UFS_MEM_ICE_NUM_CORE = 0x2664,
+
REG_UFS_CFG3 = 0x271C,
REG_UFS_DEBUG_SPARE_CFG = 0x284C,
@@ -110,14 +113,20 @@ enum {
/* bit definition for UFS_UFS_TEST_BUS_CTRL_n */
#define TEST_BUS_SUB_SEL_MASK GENMASK(4, 0) /* All XXX_SEL fields are 5 bits wide */
+/* bit definition for UFS Shared ICE config */
+#define UFS_QCOM_CAP_ICE_CONFIG BIT(0)
+
#define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\
TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\
DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
/* QUniPro Vendor specific attributes */
+#define PA_TX_HSG1_SYNC_LENGTH 0x1552
#define PA_VS_CONFIG_REG1 0x9000
#define DME_VS_CORE_CLK_CTRL 0xD002
+#define TX_HS_EQUALIZER 0x0037
+
/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
#define CLK_1US_CYCLES_MASK_V4 GENMASK(27, 16)
#define CLK_1US_CYCLES_MASK GENMASK(7, 0)
@@ -135,6 +144,52 @@ enum {
#define UNIPRO_CORE_CLK_FREQ_201_5_MHZ 202
#define UNIPRO_CORE_CLK_FREQ_403_MHZ 403
+/* TX_HSG1_SYNC_LENGTH attr value */
+#define PA_TX_HSG1_SYNC_LENGTH_VAL 0x4A
+
+/*
+ * Some ufs device vendors need a different TSync length.
+ * Enable this quirk to give an additional TX_HS_SYNC_LENGTH.
+ */
+#define UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH BIT(16)
+
+/*
+ * Some ufs device vendors need a different Deemphasis setting.
+ * Enable this quirk to tune TX Deemphasis parameters.
+ */
+#define UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING BIT(17)
+
+/* ICE allocator type to share AES engines among TX stream and RX stream */
+#define ICE_ALLOCATOR_TYPE 2
+
+/*
+ * Number of cores allocated for RX stream when Read data block received and
+ * Write data block is not in progress
+ */
+#define NUM_RX_R1W0 28
+
+/*
+ * Number of cores allocated for TX stream when Device asked to send write
+ * data block and Read data block is not in progress
+ */
+#define NUM_TX_R0W1 28
+
+/*
+ * Number of cores allocated for RX stream when Read data block received and
+ * Write data block is in progress
+ * OR
+ * Device asked to send write data block and Read data block is in progress
+ */
+#define NUM_RX_R1W1 15
+
+/*
+ * Number of cores allocated for TX stream (UFS write) when Read data block
+ * received and Write data block is in progress
+ * OR
+ * Device asked to send write data block and Read data block is in progress
+ */
+#define NUM_TX_R1W1 13
+
static inline void
ufs_qcom_get_controller_revision(struct ufs_hba *hba,
u8 *major, u16 *minor, u16 *step)
@@ -196,7 +251,7 @@ struct ufs_qcom_host {
#ifdef CONFIG_SCSI_UFS_CRYPTO
struct qcom_ice *ice;
#endif
-
+ u32 caps;
void __iomem *dev_ref_clk_ctrl_mmio;
bool is_dev_ref_clk_enabled;
struct ufs_hw_version hw_ver;
diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c
index 03cd82db751b..5bf7d0e77ad8 100644
--- a/drivers/ufs/host/ufs-renesas.c
+++ b/drivers/ufs/host/ufs-renesas.c
@@ -9,324 +9,408 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
+#include <linux/firmware.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/sys_soc.h>
#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
+#define EFUSE_CALIB_SIZE 8
+
struct ufs_renesas_priv {
+ const struct firmware *fw;
+ void (*pre_init)(struct ufs_hba *hba);
bool initialized; /* The hardware needs initialization once */
+ u8 calib[EFUSE_CALIB_SIZE];
};
-enum {
- SET_PHY_INDEX_LO = 0,
- SET_PHY_INDEX_HI,
- TIMER_INDEX,
- MAX_INDEX
-};
+#define UFS_RENESAS_FIRMWARE_NAME "r8a779f0_ufs.bin"
+MODULE_FIRMWARE(UFS_RENESAS_FIRMWARE_NAME);
-enum ufs_renesas_init_param_mode {
- MODE_RESTORE,
- MODE_SET,
- MODE_SAVE,
- MODE_POLL,
- MODE_WAIT,
- MODE_WRITE,
-};
+static void ufs_renesas_dbg_register_dump(struct ufs_hba *hba)
+{
+ ufshcd_dump_regs(hba, 0xc0, 0x40, "regs: 0xc0 + ");
+}
-#define PARAM_RESTORE(_reg, _index) \
- { .mode = MODE_RESTORE, .reg = _reg, .index = _index }
-#define PARAM_SET(_index, _set) \
- { .mode = MODE_SET, .index = _index, .u.set = _set }
-#define PARAM_SAVE(_reg, _mask, _index) \
- { .mode = MODE_SAVE, .reg = _reg, .mask = (u32)(_mask), \
- .index = _index }
-#define PARAM_POLL(_reg, _expected, _mask) \
- { .mode = MODE_POLL, .reg = _reg, .u.expected = _expected, \
- .mask = (u32)(_mask) }
-#define PARAM_WAIT(_delay_us) \
- { .mode = MODE_WAIT, .u.delay_us = _delay_us }
-
-#define PARAM_WRITE(_reg, _val) \
- { .mode = MODE_WRITE, .reg = _reg, .u.val = _val }
-
-#define PARAM_WRITE_D0_D4(_d0, _d4) \
- PARAM_WRITE(0xd0, _d0), PARAM_WRITE(0xd4, _d4)
-
-#define PARAM_WRITE_800_80C_POLL(_addr, _data_800) \
- PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
- PARAM_WRITE_D0_D4(0x00000800, ((_data_800) << 16) | BIT(8) | (_addr)), \
- PARAM_WRITE(0xd0, 0x0000080c), \
- PARAM_POLL(0xd4, BIT(8), BIT(8))
-
-#define PARAM_RESTORE_800_80C_POLL(_index) \
- PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
- PARAM_WRITE(0xd0, 0x00000800), \
- PARAM_RESTORE(0xd4, _index), \
- PARAM_WRITE(0xd0, 0x0000080c), \
- PARAM_POLL(0xd4, BIT(8), BIT(8))
-
-#define PARAM_WRITE_804_80C_POLL(_addr, _data_804) \
- PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
- PARAM_WRITE_D0_D4(0x00000804, ((_data_804) << 16) | BIT(8) | (_addr)), \
- PARAM_WRITE(0xd0, 0x0000080c), \
- PARAM_POLL(0xd4, BIT(8), BIT(8))
-
-#define PARAM_WRITE_828_82C_POLL(_data_828) \
- PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000), \
- PARAM_WRITE_D0_D4(0x00000828, _data_828), \
- PARAM_WRITE(0xd0, 0x0000082c), \
- PARAM_POLL(0xd4, _data_828, _data_828)
-
-#define PARAM_WRITE_PHY(_addr16, _data16) \
- PARAM_WRITE(0xf0, 1), \
- PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x18, (_data16) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x19, ((_data16) >> 8) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
- PARAM_WRITE_828_82C_POLL(0x0f000000), \
- PARAM_WRITE(0xf0, 0)
-
-#define PARAM_SET_PHY(_addr16, _data16) \
- PARAM_WRITE(0xf0, 1), \
- PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
- PARAM_WRITE_828_82C_POLL(0x0f000000), \
- PARAM_WRITE_804_80C_POLL(0x1a, 0), \
- PARAM_WRITE(0xd0, 0x00000808), \
- PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_LO), \
- PARAM_WRITE_804_80C_POLL(0x1b, 0), \
- PARAM_WRITE(0xd0, 0x00000808), \
- PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_HI), \
- PARAM_WRITE_828_82C_POLL(0x0f000000), \
- PARAM_WRITE(0xf0, 0), \
- PARAM_WRITE(0xf0, 1), \
- PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
- PARAM_SET(SET_PHY_INDEX_LO, ((_data16 & 0xff) << 16) | BIT(8) | 0x18), \
- PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_LO), \
- PARAM_SET(SET_PHY_INDEX_HI, (((_data16 >> 8) & 0xff) << 16) | BIT(8) | 0x19), \
- PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_HI), \
- PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
- PARAM_WRITE_828_82C_POLL(0x0f000000), \
- PARAM_WRITE(0xf0, 0)
-
-#define PARAM_INDIRECT_WRITE(_gpio, _addr, _data_800) \
- PARAM_WRITE(0xf0, _gpio), \
- PARAM_WRITE_800_80C_POLL(_addr, _data_800), \
- PARAM_WRITE_828_82C_POLL(0x0f000000), \
- PARAM_WRITE(0xf0, 0)
-
-#define PARAM_INDIRECT_POLL(_gpio, _addr, _expected, _mask) \
- PARAM_WRITE(0xf0, _gpio), \
- PARAM_WRITE_800_80C_POLL(_addr, 0), \
- PARAM_WRITE(0xd0, 0x00000808), \
- PARAM_POLL(0xd4, _expected, _mask), \
- PARAM_WRITE(0xf0, 0)
-
-struct ufs_renesas_init_param {
- enum ufs_renesas_init_param_mode mode;
- u32 reg;
- union {
- u32 expected;
- u32 delay_us;
- u32 set;
- u32 val;
- } u;
- u32 mask;
- u32 index;
-};
+static void ufs_renesas_poll(struct ufs_hba *hba, u32 reg, u32 expected, u32 mask)
+{
+ int ret;
+ u32 val;
-/* This setting is for SERIES B */
-static const struct ufs_renesas_init_param ufs_param[] = {
- PARAM_WRITE(0xc0, 0x49425308),
- PARAM_WRITE_D0_D4(0x00000104, 0x00000002),
- PARAM_WAIT(1),
- PARAM_WRITE_D0_D4(0x00000828, 0x00000200),
- PARAM_WAIT(1),
- PARAM_WRITE_D0_D4(0x00000828, 0x00000000),
- PARAM_WRITE_D0_D4(0x00000104, 0x00000001),
- PARAM_WRITE_D0_D4(0x00000940, 0x00000001),
- PARAM_WAIT(1),
- PARAM_WRITE_D0_D4(0x00000940, 0x00000000),
-
- PARAM_WRITE(0xc0, 0x49425308),
- PARAM_WRITE(0xc0, 0x41584901),
-
- PARAM_WRITE_D0_D4(0x0000080c, 0x00000100),
- PARAM_WRITE_D0_D4(0x00000804, 0x00000000),
- PARAM_WRITE(0xd0, 0x0000080c),
- PARAM_POLL(0xd4, BIT(8), BIT(8)),
-
- PARAM_WRITE(REG_CONTROLLER_ENABLE, 0x00000001),
-
- PARAM_WRITE(0xd0, 0x00000804),
- PARAM_POLL(0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0)),
-
- PARAM_WRITE(0xd0, 0x00000d00),
- PARAM_SAVE(0xd4, 0x0000ffff, TIMER_INDEX),
- PARAM_WRITE(0xd4, 0x00000000),
- PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000),
- PARAM_WRITE_D0_D4(0x00000828, 0x08000000),
- PARAM_WRITE(0xd0, 0x0000082c),
- PARAM_POLL(0xd4, BIT(27), BIT(27)),
- PARAM_WRITE(0xd0, 0x00000d2c),
- PARAM_POLL(0xd4, BIT(0), BIT(0)),
+ ret = readl_poll_timeout_atomic(hba->mmio_base + reg,
+ val, (val & mask) == expected,
+ 10, 1000);
+ if (ret)
+ dev_err(hba->dev, "%s: poll failed %d (%08x, %08x, %08x)\n",
+ __func__, ret, val, mask, expected);
+}
- /* phy setup */
- PARAM_INDIRECT_WRITE(1, 0x01, 0x001f),
- PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014),
- PARAM_INDIRECT_WRITE(7, 0x5e, 0x0014),
- PARAM_INDIRECT_WRITE(7, 0x0d, 0x0003),
- PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007),
- PARAM_INDIRECT_WRITE(7, 0x5f, 0x0003),
- PARAM_INDIRECT_WRITE(7, 0x60, 0x0003),
- PARAM_INDIRECT_WRITE(7, 0x5b, 0x00a6),
- PARAM_INDIRECT_WRITE(7, 0x5c, 0x0003),
-
- PARAM_INDIRECT_POLL(7, 0x3c, 0, BIT(7)),
- PARAM_INDIRECT_POLL(7, 0x4c, 0, BIT(4)),
-
- PARAM_INDIRECT_WRITE(1, 0x32, 0x0080),
- PARAM_INDIRECT_WRITE(1, 0x1f, 0x0001),
- PARAM_INDIRECT_WRITE(0, 0x2c, 0x0001),
- PARAM_INDIRECT_WRITE(0, 0x32, 0x0087),
-
- PARAM_INDIRECT_WRITE(1, 0x4d, 0x0061),
- PARAM_INDIRECT_WRITE(4, 0x9b, 0x0009),
- PARAM_INDIRECT_WRITE(4, 0xa6, 0x0005),
- PARAM_INDIRECT_WRITE(4, 0xa5, 0x0058),
- PARAM_INDIRECT_WRITE(1, 0x39, 0x0027),
- PARAM_INDIRECT_WRITE(1, 0x47, 0x004c),
-
- PARAM_INDIRECT_WRITE(7, 0x0d, 0x0002),
- PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007),
-
- PARAM_WRITE_PHY(0x0028, 0x0061),
- PARAM_WRITE_PHY(0x4014, 0x0061),
- PARAM_SET_PHY(0x401c, BIT(2)),
- PARAM_WRITE_PHY(0x4000, 0x0000),
- PARAM_WRITE_PHY(0x4001, 0x0000),
-
- PARAM_WRITE_PHY(0x10ae, 0x0001),
- PARAM_WRITE_PHY(0x10ad, 0x0000),
- PARAM_WRITE_PHY(0x10af, 0x0001),
- PARAM_WRITE_PHY(0x10b6, 0x0001),
- PARAM_WRITE_PHY(0x10ae, 0x0000),
-
- PARAM_WRITE_PHY(0x10ae, 0x0001),
- PARAM_WRITE_PHY(0x10ad, 0x0000),
- PARAM_WRITE_PHY(0x10af, 0x0002),
- PARAM_WRITE_PHY(0x10b6, 0x0001),
- PARAM_WRITE_PHY(0x10ae, 0x0000),
-
- PARAM_WRITE_PHY(0x10ae, 0x0001),
- PARAM_WRITE_PHY(0x10ad, 0x0080),
- PARAM_WRITE_PHY(0x10af, 0x0000),
- PARAM_WRITE_PHY(0x10b6, 0x0001),
- PARAM_WRITE_PHY(0x10ae, 0x0000),
-
- PARAM_WRITE_PHY(0x10ae, 0x0001),
- PARAM_WRITE_PHY(0x10ad, 0x0080),
- PARAM_WRITE_PHY(0x10af, 0x001a),
- PARAM_WRITE_PHY(0x10b6, 0x0001),
- PARAM_WRITE_PHY(0x10ae, 0x0000),
-
- PARAM_INDIRECT_WRITE(7, 0x70, 0x0016),
- PARAM_INDIRECT_WRITE(7, 0x71, 0x0016),
- PARAM_INDIRECT_WRITE(7, 0x72, 0x0014),
- PARAM_INDIRECT_WRITE(7, 0x73, 0x0014),
- PARAM_INDIRECT_WRITE(7, 0x74, 0x0000),
- PARAM_INDIRECT_WRITE(7, 0x75, 0x0000),
- PARAM_INDIRECT_WRITE(7, 0x76, 0x0010),
- PARAM_INDIRECT_WRITE(7, 0x77, 0x0010),
- PARAM_INDIRECT_WRITE(7, 0x78, 0x00ff),
- PARAM_INDIRECT_WRITE(7, 0x79, 0x0000),
-
- PARAM_INDIRECT_WRITE(7, 0x19, 0x0007),
-
- PARAM_INDIRECT_WRITE(7, 0x1a, 0x0007),
-
- PARAM_INDIRECT_WRITE(7, 0x24, 0x000c),
-
- PARAM_INDIRECT_WRITE(7, 0x25, 0x000c),
-
- PARAM_INDIRECT_WRITE(7, 0x62, 0x0000),
- PARAM_INDIRECT_WRITE(7, 0x63, 0x0000),
- PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014),
- PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017),
- PARAM_INDIRECT_WRITE(7, 0x5d, 0x0004),
- PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017),
- PARAM_INDIRECT_POLL(7, 0x55, 0, BIT(6)),
- PARAM_INDIRECT_POLL(7, 0x41, 0, BIT(7)),
- /* end of phy setup */
+static u32 ufs_renesas_read(struct ufs_hba *hba, u32 reg)
+{
+ return ufshcd_readl(hba, reg);
+}
- PARAM_WRITE(0xf0, 0),
- PARAM_WRITE(0xd0, 0x00000d00),
- PARAM_RESTORE(0xd4, TIMER_INDEX),
-};
+static void ufs_renesas_write(struct ufs_hba *hba, u32 reg, u32 value)
+{
+ ufshcd_writel(hba, value, reg);
+}
-static void ufs_renesas_dbg_register_dump(struct ufs_hba *hba)
+static void ufs_renesas_write_d0_d4(struct ufs_hba *hba, u32 data_d0, u32 data_d4)
{
- ufshcd_dump_regs(hba, 0xc0, 0x40, "regs: 0xc0 + ");
+ ufs_renesas_write(hba, 0xd0, data_d0);
+ ufs_renesas_write(hba, 0xd4, data_d4);
}
-static void ufs_renesas_reg_control(struct ufs_hba *hba,
- const struct ufs_renesas_init_param *p)
+static void ufs_renesas_write_800_80c_poll(struct ufs_hba *hba, u32 addr,
+ u32 data_800)
{
- static u32 save[MAX_INDEX];
- int ret;
- u32 val;
+ ufs_renesas_write_d0_d4(hba, 0x0000080c, 0x00000100);
+ ufs_renesas_write_d0_d4(hba, 0x00000800, (data_800 << 16) | BIT(8) | addr);
+ ufs_renesas_write(hba, 0xd0, 0x0000080c);
+ ufs_renesas_poll(hba, 0xd4, BIT(8), BIT(8));
+}
+
+static void ufs_renesas_write_804_80c_poll(struct ufs_hba *hba, u32 addr, u32 data_804)
+{
+ ufs_renesas_write_d0_d4(hba, 0x0000080c, 0x00000100);
+ ufs_renesas_write_d0_d4(hba, 0x00000804, (data_804 << 16) | BIT(8) | addr);
+ ufs_renesas_write(hba, 0xd0, 0x0000080c);
+ ufs_renesas_poll(hba, 0xd4, BIT(8), BIT(8));
+}
+
+static void ufs_renesas_write_828_82c_poll(struct ufs_hba *hba, u32 data_828)
+{
+ ufs_renesas_write_d0_d4(hba, 0x0000082c, 0x0f000000);
+ ufs_renesas_write_d0_d4(hba, 0x00000828, data_828);
+ ufs_renesas_write(hba, 0xd0, 0x0000082c);
+ ufs_renesas_poll(hba, 0xd4, data_828, data_828);
+}
+
+static void ufs_renesas_write_phy(struct ufs_hba *hba, u32 addr16, u32 data16)
+{
+ ufs_renesas_write(hba, 0xf0, 1);
+ ufs_renesas_write_800_80c_poll(hba, 0x16, addr16 & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x17, (addr16 >> 8) & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x18, data16 & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x19, (data16 >> 8) & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x1c, 0x01);
+ ufs_renesas_write_828_82c_poll(hba, 0x0f000000);
+ ufs_renesas_write(hba, 0xf0, 0);
+}
+
+static void ufs_renesas_set_phy(struct ufs_hba *hba, u32 addr16, u32 data16)
+{
+ u32 low, high;
+
+ ufs_renesas_write(hba, 0xf0, 1);
+ ufs_renesas_write_800_80c_poll(hba, 0x16, addr16 & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x17, (addr16 >> 8) & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x1c, 0x01);
+ ufs_renesas_write_828_82c_poll(hba, 0x0f000000);
+ ufs_renesas_write_804_80c_poll(hba, 0x1a, 0);
+ ufs_renesas_write(hba, 0xd0, 0x00000808);
+ low = ufs_renesas_read(hba, 0xd4) & 0xff;
+ ufs_renesas_write_804_80c_poll(hba, 0x1b, 0);
+ ufs_renesas_write(hba, 0xd0, 0x00000808);
+ high = ufs_renesas_read(hba, 0xd4) & 0xff;
+ ufs_renesas_write_828_82c_poll(hba, 0x0f000000);
+ ufs_renesas_write(hba, 0xf0, 0);
+
+ data16 |= (high << 8) | low;
+ ufs_renesas_write_phy(hba, addr16, data16);
+}
+
+static void ufs_renesas_reset_indirect_write(struct ufs_hba *hba, int gpio,
+ u32 addr, u32 data)
+{
+ ufs_renesas_write(hba, 0xf0, gpio);
+ ufs_renesas_write_800_80c_poll(hba, addr, data);
+}
+
+static void ufs_renesas_reset_indirect_update(struct ufs_hba *hba)
+{
+ ufs_renesas_write_d0_d4(hba, 0x0000082c, 0x0f000000);
+ ufs_renesas_write_d0_d4(hba, 0x00000828, 0x0f000000);
+ ufs_renesas_write(hba, 0xd0, 0x0000082c);
+ ufs_renesas_poll(hba, 0xd4, BIT(27) | BIT(26) | BIT(24), BIT(27) | BIT(26) | BIT(24));
+ ufs_renesas_write(hba, 0xf0, 0);
+}
+
+static void ufs_renesas_indirect_write(struct ufs_hba *hba, u32 gpio, u32 addr,
+ u32 data_800)
+{
+ ufs_renesas_write(hba, 0xf0, gpio);
+ ufs_renesas_write_800_80c_poll(hba, addr, data_800);
+ ufs_renesas_write_828_82c_poll(hba, 0x0f000000);
+ ufs_renesas_write(hba, 0xf0, 0);
+}
+
+static void ufs_renesas_indirect_poll(struct ufs_hba *hba, u32 gpio, u32 addr,
+ u32 expected, u32 mask)
+{
+ ufs_renesas_write(hba, 0xf0, gpio);
+ ufs_renesas_write_800_80c_poll(hba, addr, 0);
+ ufs_renesas_write(hba, 0xd0, 0x00000808);
+ ufs_renesas_poll(hba, 0xd4, expected, mask);
+ ufs_renesas_write(hba, 0xf0, 0);
+}
+
+static void ufs_renesas_init_step1_to_3(struct ufs_hba *hba, bool init108)
+{
+ ufs_renesas_write(hba, 0xc0, 0x49425308);
+ ufs_renesas_write_d0_d4(hba, 0x00000104, 0x00000002);
+ if (init108)
+ ufs_renesas_write_d0_d4(hba, 0x00000108, 0x00000002);
+ udelay(1);
+ ufs_renesas_write_d0_d4(hba, 0x00000828, 0x00000200);
+ udelay(1);
+ ufs_renesas_write_d0_d4(hba, 0x00000828, 0x00000000);
+ ufs_renesas_write_d0_d4(hba, 0x00000104, 0x00000001);
+ if (init108)
+ ufs_renesas_write_d0_d4(hba, 0x00000108, 0x00000001);
+ ufs_renesas_write_d0_d4(hba, 0x00000940, 0x00000001);
+ udelay(1);
+ ufs_renesas_write_d0_d4(hba, 0x00000940, 0x00000000);
+
+ ufs_renesas_write(hba, 0xc0, 0x49425308);
+ ufs_renesas_write(hba, 0xc0, 0x41584901);
+}
+
+static void ufs_renesas_init_step4_to_6(struct ufs_hba *hba)
+{
+ ufs_renesas_write_d0_d4(hba, 0x0000080c, 0x00000100);
+ ufs_renesas_write_d0_d4(hba, 0x00000804, 0x00000000);
+ ufs_renesas_write(hba, 0xd0, 0x0000080c);
+ ufs_renesas_poll(hba, 0xd4, BIT(8), BIT(8));
- WARN_ON(p->index >= MAX_INDEX);
-
- switch (p->mode) {
- case MODE_RESTORE:
- ufshcd_writel(hba, save[p->index], p->reg);
- break;
- case MODE_SET:
- save[p->index] |= p->u.set;
- break;
- case MODE_SAVE:
- save[p->index] = ufshcd_readl(hba, p->reg) & p->mask;
- break;
- case MODE_POLL:
- ret = readl_poll_timeout_atomic(hba->mmio_base + p->reg,
- val,
- (val & p->mask) == p->u.expected,
- 10, 1000);
- if (ret)
- dev_err(hba->dev, "%s: poll failed %d (%08x, %08x, %08x)\n",
- __func__, ret, val, p->mask, p->u.expected);
- break;
- case MODE_WAIT:
- if (p->u.delay_us > 1000)
- mdelay(DIV_ROUND_UP(p->u.delay_us, 1000));
- else
- udelay(p->u.delay_us);
- break;
- case MODE_WRITE:
- ufshcd_writel(hba, p->u.val, p->reg);
- break;
- default:
- break;
+ ufs_renesas_write(hba, REG_CONTROLLER_ENABLE, 0x00000001);
+
+ ufs_renesas_write(hba, 0xd0, 0x00000804);
+ ufs_renesas_poll(hba, 0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0));
+}
+
+static u32 ufs_renesas_init_disable_timer(struct ufs_hba *hba)
+{
+ u32 timer_val;
+
+ ufs_renesas_write(hba, 0xd0, 0x00000d00);
+ timer_val = ufs_renesas_read(hba, 0xd4) & 0x0000ffff;
+ ufs_renesas_write(hba, 0xd4, 0x00000000);
+ ufs_renesas_write_d0_d4(hba, 0x0000082c, 0x0f000000);
+ ufs_renesas_write_d0_d4(hba, 0x00000828, 0x08000000);
+ ufs_renesas_write(hba, 0xd0, 0x0000082c);
+ ufs_renesas_poll(hba, 0xd4, BIT(27), BIT(27));
+ ufs_renesas_write(hba, 0xd0, 0x00000d2c);
+ ufs_renesas_poll(hba, 0xd4, BIT(0), BIT(0));
+
+ return timer_val;
+}
+
+static void ufs_renesas_init_enable_timer(struct ufs_hba *hba, u32 timer_val)
+{
+ ufs_renesas_write(hba, 0xf0, 0);
+ ufs_renesas_write(hba, 0xd0, 0x00000d00);
+ ufs_renesas_write(hba, 0xd4, timer_val);
+}
+
+static void ufs_renesas_write_phy_10ad_10af(struct ufs_hba *hba,
+ u32 data_10ad, u32 data_10af)
+{
+ ufs_renesas_write_phy(hba, 0x10ae, 0x0001);
+ ufs_renesas_write_phy(hba, 0x10ad, data_10ad);
+ ufs_renesas_write_phy(hba, 0x10af, data_10af);
+ ufs_renesas_write_phy(hba, 0x10b6, 0x0001);
+ ufs_renesas_write_phy(hba, 0x10ae, 0x0000);
+}
+
+static void ufs_renesas_init_compensation_and_slicers(struct ufs_hba *hba)
+{
+ ufs_renesas_write_phy_10ad_10af(hba, 0x0000, 0x0001);
+ ufs_renesas_write_phy_10ad_10af(hba, 0x0000, 0x0002);
+ ufs_renesas_write_phy_10ad_10af(hba, 0x0080, 0x0000);
+ ufs_renesas_write_phy_10ad_10af(hba, 0x0080, 0x001a);
+}
+
+static void ufs_renesas_r8a779f0_es10_pre_init(struct ufs_hba *hba)
+{
+ u32 timer_val;
+
+ /* This setting is for SERIES B */
+ ufs_renesas_init_step1_to_3(hba, false);
+
+ ufs_renesas_init_step4_to_6(hba);
+
+ timer_val = ufs_renesas_init_disable_timer(hba);
+
+ /* phy setup */
+ ufs_renesas_indirect_write(hba, 1, 0x01, 0x001f);
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x0d, 0x0003);
+ ufs_renesas_indirect_write(hba, 7, 0x0e, 0x0007);
+ ufs_renesas_indirect_write(hba, 7, 0x5f, 0x0003);
+ ufs_renesas_indirect_write(hba, 7, 0x60, 0x0003);
+ ufs_renesas_indirect_write(hba, 7, 0x5b, 0x00a6);
+ ufs_renesas_indirect_write(hba, 7, 0x5c, 0x0003);
+
+ ufs_renesas_indirect_poll(hba, 7, 0x3c, 0, BIT(7));
+ ufs_renesas_indirect_poll(hba, 7, 0x4c, 0, BIT(4));
+
+ ufs_renesas_indirect_write(hba, 1, 0x32, 0x0080);
+ ufs_renesas_indirect_write(hba, 1, 0x1f, 0x0001);
+ ufs_renesas_indirect_write(hba, 0, 0x2c, 0x0001);
+ ufs_renesas_indirect_write(hba, 0, 0x32, 0x0087);
+
+ ufs_renesas_indirect_write(hba, 1, 0x4d, 0x0061);
+ ufs_renesas_indirect_write(hba, 4, 0x9b, 0x0009);
+ ufs_renesas_indirect_write(hba, 4, 0xa6, 0x0005);
+ ufs_renesas_indirect_write(hba, 4, 0xa5, 0x0058);
+ ufs_renesas_indirect_write(hba, 1, 0x39, 0x0027);
+ ufs_renesas_indirect_write(hba, 1, 0x47, 0x004c);
+
+ ufs_renesas_indirect_write(hba, 7, 0x0d, 0x0002);
+ ufs_renesas_indirect_write(hba, 7, 0x0e, 0x0007);
+
+ ufs_renesas_write_phy(hba, 0x0028, 0x0061);
+ ufs_renesas_write_phy(hba, 0x4014, 0x0061);
+ ufs_renesas_set_phy(hba, 0x401c, BIT(2));
+ ufs_renesas_write_phy(hba, 0x4000, 0x0000);
+ ufs_renesas_write_phy(hba, 0x4001, 0x0000);
+
+ ufs_renesas_init_compensation_and_slicers(hba);
+
+ ufs_renesas_indirect_write(hba, 7, 0x70, 0x0016);
+ ufs_renesas_indirect_write(hba, 7, 0x71, 0x0016);
+ ufs_renesas_indirect_write(hba, 7, 0x72, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x73, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x74, 0x0000);
+ ufs_renesas_indirect_write(hba, 7, 0x75, 0x0000);
+ ufs_renesas_indirect_write(hba, 7, 0x76, 0x0010);
+ ufs_renesas_indirect_write(hba, 7, 0x77, 0x0010);
+ ufs_renesas_indirect_write(hba, 7, 0x78, 0x00ff);
+ ufs_renesas_indirect_write(hba, 7, 0x79, 0x0000);
+
+ ufs_renesas_indirect_write(hba, 7, 0x19, 0x0007);
+ ufs_renesas_indirect_write(hba, 7, 0x1a, 0x0007);
+ ufs_renesas_indirect_write(hba, 7, 0x24, 0x000c);
+ ufs_renesas_indirect_write(hba, 7, 0x25, 0x000c);
+ ufs_renesas_indirect_write(hba, 7, 0x62, 0x0000);
+ ufs_renesas_indirect_write(hba, 7, 0x63, 0x0000);
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0017);
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0004);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0017);
+ ufs_renesas_indirect_poll(hba, 7, 0x55, 0, BIT(6));
+ ufs_renesas_indirect_poll(hba, 7, 0x41, 0, BIT(7));
+ /* end of phy setup */
+
+ ufs_renesas_init_enable_timer(hba, timer_val);
+}
+
+static void ufs_renesas_r8a779f0_init_step3_add(struct ufs_hba *hba, bool assert)
+{
+ u32 val_2x = 0, val_3x = 0, val_4x = 0;
+
+ if (assert) {
+ val_2x = 0x0001;
+ val_3x = 0x0003;
+ val_4x = 0x0001;
}
+
+ ufs_renesas_reset_indirect_write(hba, 7, 0x20, val_2x);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x4a, val_4x);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x35, val_3x);
+ ufs_renesas_reset_indirect_update(hba);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x21, val_2x);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x4b, val_4x);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x36, val_3x);
+ ufs_renesas_reset_indirect_update(hba);
}
-static void ufs_renesas_pre_init(struct ufs_hba *hba)
+static void ufs_renesas_r8a779f0_pre_init(struct ufs_hba *hba)
{
- const struct ufs_renesas_init_param *p = ufs_param;
- unsigned int i;
+ struct ufs_renesas_priv *priv = ufshcd_get_variant(hba);
+ u32 timer_val;
+ u32 data;
+ int i;
+
+ /* This setting is for SERIES B */
+ ufs_renesas_init_step1_to_3(hba, true);
+
+ ufs_renesas_r8a779f0_init_step3_add(hba, true);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x5f, 0x0063);
+ ufs_renesas_reset_indirect_update(hba);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x60, 0x0003);
+ ufs_renesas_reset_indirect_update(hba);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x5b, 0x00a6);
+ ufs_renesas_reset_indirect_update(hba);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x5c, 0x0003);
+ ufs_renesas_reset_indirect_update(hba);
+ ufs_renesas_r8a779f0_init_step3_add(hba, false);
+
+ ufs_renesas_init_step4_to_6(hba);
+
+ timer_val = ufs_renesas_init_disable_timer(hba);
+
+ ufs_renesas_indirect_write(hba, 1, 0x01, 0x001f);
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x0d, 0x0007);
+ ufs_renesas_indirect_write(hba, 7, 0x0e, 0x0007);
+
+ ufs_renesas_indirect_poll(hba, 7, 0x3c, 0, BIT(7));
+ ufs_renesas_indirect_poll(hba, 7, 0x4c, 0, BIT(4));
+
+ ufs_renesas_indirect_write(hba, 1, 0x32, 0x0080);
+ ufs_renesas_indirect_write(hba, 1, 0x1f, 0x0001);
+ ufs_renesas_indirect_write(hba, 1, 0x2c, 0x0001);
+ ufs_renesas_indirect_write(hba, 1, 0x32, 0x0087);
+
+ ufs_renesas_indirect_write(hba, 1, 0x4d, priv->calib[2]);
+ ufs_renesas_indirect_write(hba, 1, 0x4e, priv->calib[3]);
+ ufs_renesas_indirect_write(hba, 1, 0x0d, 0x0006);
+ ufs_renesas_indirect_write(hba, 1, 0x0e, 0x0007);
+ ufs_renesas_write_phy(hba, 0x0028, priv->calib[3]);
+ ufs_renesas_write_phy(hba, 0x4014, priv->calib[3]);
+
+ ufs_renesas_set_phy(hba, 0x401c, BIT(2));
+
+ ufs_renesas_write_phy(hba, 0x4000, priv->calib[6]);
+ ufs_renesas_write_phy(hba, 0x4001, priv->calib[7]);
+
+ ufs_renesas_indirect_write(hba, 1, 0x14, 0x0001);
+
+ ufs_renesas_init_compensation_and_slicers(hba);
+
+ ufs_renesas_indirect_write(hba, 7, 0x79, 0x0000);
+ ufs_renesas_indirect_write(hba, 7, 0x24, 0x000c);
+ ufs_renesas_indirect_write(hba, 7, 0x25, 0x000c);
+ ufs_renesas_indirect_write(hba, 7, 0x62, 0x00c0);
+ ufs_renesas_indirect_write(hba, 7, 0x63, 0x0001);
+
+ for (i = 0; i < priv->fw->size / 2; i++) {
+ data = (priv->fw->data[i * 2 + 1] << 8) | priv->fw->data[i * 2];
+ ufs_renesas_write_phy(hba, 0xc000 + i, data);
+ }
- for (i = 0; i < ARRAY_SIZE(ufs_param); i++)
- ufs_renesas_reg_control(hba, &p[i]);
+ ufs_renesas_indirect_write(hba, 7, 0x0d, 0x0002);
+ ufs_renesas_indirect_write(hba, 7, 0x0e, 0x0007);
+
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0017);
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0004);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0017);
+ ufs_renesas_indirect_poll(hba, 7, 0x55, 0, BIT(6));
+ ufs_renesas_indirect_poll(hba, 7, 0x41, 0, BIT(7));
+
+ ufs_renesas_init_enable_timer(hba, timer_val);
}
static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba,
@@ -338,7 +422,7 @@ static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba,
return 0;
if (status == PRE_CHANGE)
- ufs_renesas_pre_init(hba);
+ priv->pre_init(hba);
priv->initialized = true;
@@ -356,20 +440,78 @@ static int ufs_renesas_setup_clocks(struct ufs_hba *hba, bool on,
return 0;
}
+static const struct soc_device_attribute ufs_fallback[] = {
+ { .soc_id = "r8a779f0", .revision = "ES1.[01]" },
+ { /* Sentinel */ }
+};
+
static int ufs_renesas_init(struct ufs_hba *hba)
{
+ const struct soc_device_attribute *attr;
+ struct nvmem_cell *cell = NULL;
+ struct device *dev = hba->dev;
struct ufs_renesas_priv *priv;
+ u8 *data = NULL;
+ size_t len;
+ int ret;
- priv = devm_kzalloc(hba->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ufshcd_set_variant(hba, priv);
hba->quirks |= UFSHCD_QUIRK_HIBERN_FASTAUTO;
+ attr = soc_device_match(ufs_fallback);
+ if (attr)
+ goto fallback;
+
+ ret = request_firmware(&priv->fw, UFS_RENESAS_FIRMWARE_NAME, dev);
+ if (ret) {
+ dev_warn(dev, "Failed to load firmware\n");
+ goto fallback;
+ }
+
+ cell = nvmem_cell_get(dev, "calibration");
+ if (IS_ERR(cell)) {
+ dev_warn(dev, "No calibration data specified\n");
+ goto fallback;
+ }
+
+ data = nvmem_cell_read(cell, &len);
+ if (IS_ERR(data)) {
+ dev_warn(dev, "Failed to read calibration data: %pe\n", data);
+ goto fallback;
+ }
+
+ if (len != EFUSE_CALIB_SIZE) {
+ dev_warn(dev, "Invalid calibration data size %zu\n", len);
+ goto fallback;
+ }
+
+ memcpy(priv->calib, data, EFUSE_CALIB_SIZE);
+ priv->pre_init = ufs_renesas_r8a779f0_pre_init;
+ goto out;
+
+fallback:
+ dev_info(dev, "Using ES1.0 init code\n");
+ priv->pre_init = ufs_renesas_r8a779f0_es10_pre_init;
+
+out:
+ kfree(data);
+ if (!IS_ERR_OR_NULL(cell))
+ nvmem_cell_put(cell);
+
return 0;
}
+static void ufs_renesas_exit(struct ufs_hba *hba)
+{
+ struct ufs_renesas_priv *priv = ufshcd_get_variant(hba);
+
+ release_firmware(priv->fw);
+}
+
static int ufs_renesas_set_dma_mask(struct ufs_hba *hba)
{
return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
@@ -378,6 +520,7 @@ static int ufs_renesas_set_dma_mask(struct ufs_hba *hba)
static const struct ufs_hba_variant_ops ufs_renesas_vops = {
.name = "renesas",
.init = ufs_renesas_init,
+ .exit = ufs_renesas_exit,
.set_dma_mask = ufs_renesas_set_dma_mask,
.setup_clocks = ufs_renesas_setup_clocks,
.hce_enable_notify = ufs_renesas_hce_enable_notify,
diff --git a/drivers/ufs/host/ufs-rockchip.c b/drivers/ufs/host/ufs-rockchip.c
new file mode 100644
index 000000000000..8754085dd0cc
--- /dev/null
+++ b/drivers/ufs/host/ufs-rockchip.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Rockchip UFS Host Controller driver
+ *
+ * Copyright (C) 2025 Rockchip Electronics Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_wakeup.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <ufs/ufshcd.h>
+#include <ufs/unipro.h>
+#include "ufshcd-pltfrm.h"
+#include "ufs-rockchip.h"
+
+static int ufs_rockchip_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int err = 0;
+
+ if (status == POST_CHANGE) {
+ err = ufshcd_dme_reset(hba);
+ if (err)
+ return err;
+
+ err = ufshcd_dme_enable(hba);
+ if (err)
+ return err;
+
+ return ufshcd_vops_phy_initialization(hba);
+ }
+
+ return 0;
+}
+
+static void ufs_rockchip_set_pm_lvl(struct ufs_hba *hba)
+{
+ hba->rpm_lvl = UFS_PM_LVL_5;
+ hba->spm_lvl = UFS_PM_LVL_5;
+}
+
+static int ufs_rockchip_rk3576_phy_init(struct ufs_hba *hba)
+{
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(PA_LOCAL_TX_LCC_ENABLE, 0x0), 0x0);
+ /* enable the mphy DME_SET cfg */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MPHY_CFG, 0x0), MPHY_CFG_ENABLE);
+ for (int i = 0; i < 2; i++) {
+ /* Configuration M - TX */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, SEL_TX_LANE0 + i), 0x06);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, SEL_TX_LANE0 + i), 0x02);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_VALUE, SEL_TX_LANE0 + i), 0x44);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, SEL_TX_LANE0 + i), 0xe6);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, SEL_TX_LANE0 + i), 0x07);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_TASE_VALUE, SEL_TX_LANE0 + i), 0x93);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_BASE_NVALUE, SEL_TX_LANE0 + i), 0xc9);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_POWER_SAVING_CTRL, SEL_TX_LANE0 + i), 0x00);
+ /* Configuration M - RX */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, SEL_RX_LANE0 + i), 0x06);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, SEL_RX_LANE0 + i), 0x00);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE, SEL_RX_LANE0 + i), 0x58);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_PVALUE1, SEL_RX_LANE0 + i), 0x8c);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_PVALUE2, SEL_RX_LANE0 + i), 0x02);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_OPTION, SEL_RX_LANE0 + i), 0xf6);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_POWER_SAVING_CTRL, SEL_RX_LANE0 + i), 0x69);
+ }
+
+ /* disable the mphy DME_SET cfg */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MPHY_CFG, 0x0), MPHY_CFG_DISABLE);
+
+ ufs_sys_writel(host->mphy_base, 0x80, CMN_REG23);
+ ufs_sys_writel(host->mphy_base, 0xB5, TRSV0_REG14);
+ ufs_sys_writel(host->mphy_base, 0xB5, TRSV1_REG14);
+
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV0_REG15);
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV1_REG15);
+
+ ufs_sys_writel(host->mphy_base, 0x38, TRSV0_REG08);
+ ufs_sys_writel(host->mphy_base, 0x38, TRSV1_REG08);
+
+ ufs_sys_writel(host->mphy_base, 0x50, TRSV0_REG29);
+ ufs_sys_writel(host->mphy_base, 0x50, TRSV1_REG29);
+
+ ufs_sys_writel(host->mphy_base, 0x80, TRSV0_REG2E);
+ ufs_sys_writel(host->mphy_base, 0x80, TRSV1_REG2E);
+
+ ufs_sys_writel(host->mphy_base, 0x18, TRSV0_REG3C);
+ ufs_sys_writel(host->mphy_base, 0x18, TRSV1_REG3C);
+
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV0_REG16);
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV1_REG16);
+
+ ufs_sys_writel(host->mphy_base, 0x20, TRSV0_REG17);
+ ufs_sys_writel(host->mphy_base, 0x20, TRSV1_REG17);
+
+ ufs_sys_writel(host->mphy_base, 0xC0, TRSV0_REG18);
+ ufs_sys_writel(host->mphy_base, 0xC0, TRSV1_REG18);
+
+ ufs_sys_writel(host->mphy_base, 0x03, CMN_REG25);
+
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV0_REG3D);
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV1_REG3D);
+
+ ufs_sys_writel(host->mphy_base, 0xC0, CMN_REG23);
+ udelay(1);
+ ufs_sys_writel(host->mphy_base, 0x00, CMN_REG23);
+
+ usleep_range(200, 250);
+ /* start link up */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MIB_T_DBG_CPORT_TX_ENDIAN, 0), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MIB_T_DBG_CPORT_RX_ENDIAN, 0), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(N_DEVICEID, 0), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(N_DEVICEID_VALID, 0), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(T_PEERDEVICEID, 0), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(T_CONNECTIONSTATE, 0), 0x1);
+
+ return 0;
+}
+
+static int ufs_rockchip_common_init(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ufs_rockchip_host *host;
+ int err;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->ufs_sys_ctrl = devm_platform_ioremap_resource_byname(pdev, "hci_grf");
+ if (IS_ERR(host->ufs_sys_ctrl))
+ return dev_err_probe(dev, PTR_ERR(host->ufs_sys_ctrl),
+ "Failed to map HCI system control registers\n");
+
+ host->ufs_phy_ctrl = devm_platform_ioremap_resource_byname(pdev, "mphy_grf");
+ if (IS_ERR(host->ufs_phy_ctrl))
+ return dev_err_probe(dev, PTR_ERR(host->ufs_phy_ctrl),
+ "Failed to map mphy system control registers\n");
+
+ host->mphy_base = devm_platform_ioremap_resource_byname(pdev, "mphy");
+ if (IS_ERR(host->mphy_base))
+ return dev_err_probe(dev, PTR_ERR(host->mphy_base),
+ "Failed to map mphy base registers\n");
+
+ host->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(host->rst))
+ return dev_err_probe(dev, PTR_ERR(host->rst),
+ "failed to get reset control\n");
+
+ reset_control_assert(host->rst);
+ udelay(1);
+ reset_control_deassert(host->rst);
+
+ host->ref_out_clk = devm_clk_get_enabled(dev, "ref_out");
+ if (IS_ERR(host->ref_out_clk))
+ return dev_err_probe(dev, PTR_ERR(host->ref_out_clk),
+ "ref_out clock unavailable\n");
+
+ host->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(host->rst_gpio))
+ return dev_err_probe(dev, PTR_ERR(host->rst_gpio),
+ "failed to get reset gpio\n");
+
+ err = devm_clk_bulk_get_all_enabled(dev, &host->clks);
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to enable clocks\n");
+
+ host->hba = hba;
+
+ ufshcd_set_variant(hba, host);
+
+ return 0;
+}
+
+static int ufs_rockchip_rk3576_init(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ int ret;
+
+ hba->quirks = UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING;
+
+ /* Enable BKOPS when suspend */
+ hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
+ /* Enable putting device into deep sleep */
+ hba->caps |= UFSHCD_CAP_DEEPSLEEP;
+ /* Enable devfreq of UFS */
+ hba->caps |= UFSHCD_CAP_CLK_SCALING;
+ /* Enable WriteBooster */
+ hba->caps |= UFSHCD_CAP_WB_EN;
+
+ /* Set the default desired pm level in case no users set via sysfs */
+ ufs_rockchip_set_pm_lvl(hba);
+
+ ret = ufs_rockchip_common_init(hba);
+ if (ret)
+ return dev_err_probe(dev, ret, "ufs common init fail\n");
+
+ return 0;
+}
+
+static int ufs_rockchip_device_reset(struct ufs_hba *hba)
+{
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+
+ gpiod_set_value_cansleep(host->rst_gpio, 1);
+ usleep_range(20, 25);
+
+ gpiod_set_value_cansleep(host->rst_gpio, 0);
+ usleep_range(20, 25);
+
+ return 0;
+}
+
+static const struct ufs_hba_variant_ops ufs_hba_rk3576_vops = {
+ .name = "rk3576",
+ .init = ufs_rockchip_rk3576_init,
+ .device_reset = ufs_rockchip_device_reset,
+ .hce_enable_notify = ufs_rockchip_hce_enable_notify,
+ .phy_initialization = ufs_rockchip_rk3576_phy_init,
+};
+
+static const struct of_device_id ufs_rockchip_of_match[] = {
+ { .compatible = "rockchip,rk3576-ufshc", .data = &ufs_hba_rk3576_vops },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ufs_rockchip_of_match);
+
+static int ufs_rockchip_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct ufs_hba_variant_ops *vops;
+ int err;
+
+ vops = device_get_match_data(dev);
+ if (!vops)
+ return dev_err_probe(dev, -ENODATA, "ufs_hba_variant_ops not defined.\n");
+
+ err = ufshcd_pltfrm_init(pdev, vops);
+ if (err)
+ return dev_err_probe(dev, err, "ufshcd_pltfrm_init failed\n");
+
+ return 0;
+}
+
+static void ufs_rockchip_remove(struct platform_device *pdev)
+{
+ ufshcd_pltfrm_remove(pdev);
+}
+
+#ifdef CONFIG_PM
+static int ufs_rockchip_runtime_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+
+ clk_disable_unprepare(host->ref_out_clk);
+
+ /* Do not power down the genpd if rpm_lvl is less than level 5 */
+ dev_pm_genpd_rpm_always_on(dev, hba->rpm_lvl < UFS_PM_LVL_5);
+
+ return ufshcd_runtime_suspend(dev);
+}
+
+static int ufs_rockchip_runtime_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+ int err;
+
+ err = clk_prepare_enable(host->ref_out_clk);
+ if (err) {
+ dev_err(hba->dev, "failed to enable ref_out clock %d\n", err);
+ return err;
+ }
+
+ reset_control_assert(host->rst);
+ udelay(1);
+ reset_control_deassert(host->rst);
+
+ return ufshcd_runtime_resume(dev);
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int ufs_rockchip_system_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+ int err;
+
+ /*
+ * If spm_lvl is less than level 5, it means we need to keep the host
+ * controller in powered-on state. So device_set_awake_path() is
+ * calling pm core to notify the genpd provider to meet this requirement
+ */
+ if (hba->spm_lvl < UFS_PM_LVL_5)
+ device_set_awake_path(dev);
+
+ err = ufshcd_system_suspend(dev);
+ if (err) {
+ dev_err(hba->dev, "UFSHCD system suspend failed %d\n", err);
+ return err;
+ }
+
+ clk_disable_unprepare(host->ref_out_clk);
+
+ return 0;
+}
+
+static int ufs_rockchip_system_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+ int err;
+
+ err = clk_prepare_enable(host->ref_out_clk);
+ if (err) {
+ dev_err(hba->dev, "failed to enable ref_out clock %d\n", err);
+ return err;
+ }
+
+ return ufshcd_system_resume(dev);
+}
+#endif
+
+static const struct dev_pm_ops ufs_rockchip_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ufs_rockchip_system_suspend, ufs_rockchip_system_resume)
+ SET_RUNTIME_PM_OPS(ufs_rockchip_runtime_suspend, ufs_rockchip_runtime_resume, NULL)
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
+};
+
+static struct platform_driver ufs_rockchip_pltform = {
+ .probe = ufs_rockchip_probe,
+ .remove = ufs_rockchip_remove,
+ .driver = {
+ .name = "ufshcd-rockchip",
+ .pm = &ufs_rockchip_pm_ops,
+ .of_match_table = ufs_rockchip_of_match,
+ },
+};
+module_platform_driver(ufs_rockchip_pltform);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Rockchip UFS Host Driver");
diff --git a/drivers/ufs/host/ufs-rockchip.h b/drivers/ufs/host/ufs-rockchip.h
new file mode 100644
index 000000000000..3ba6fb9f73ae
--- /dev/null
+++ b/drivers/ufs/host/ufs-rockchip.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Rockchip UFS Host Controller driver
+ *
+ * Copyright (C) 2025 Rockchip Electronics Co., Ltd.
+ */
+
+#ifndef _UFS_ROCKCHIP_H_
+#define _UFS_ROCKCHIP_H_
+
+#define SEL_TX_LANE0 0x0
+#define SEL_TX_LANE1 0x1
+#define SEL_TX_LANE2 0x2
+#define SEL_TX_LANE3 0x3
+#define SEL_RX_LANE0 0x4
+#define SEL_RX_LANE1 0x5
+#define SEL_RX_LANE2 0x6
+#define SEL_RX_LANE3 0x7
+
+#define VND_TX_CLK_PRD 0xAA
+#define VND_TX_CLK_PRD_EN 0xA9
+#define VND_TX_LINERESET_PVALUE2 0xAB
+#define VND_TX_LINERESET_PVALUE1 0xAC
+#define VND_TX_LINERESET_VALUE 0xAD
+#define VND_TX_BASE_NVALUE 0x93
+#define VND_TX_TASE_VALUE 0x94
+#define VND_TX_POWER_SAVING_CTRL 0x7F
+#define VND_RX_CLK_PRD 0x12
+#define VND_RX_CLK_PRD_EN 0x11
+#define VND_RX_LINERESET_PVALUE2 0x1B
+#define VND_RX_LINERESET_PVALUE1 0x1C
+#define VND_RX_LINERESET_VALUE 0x1D
+#define VND_RX_LINERESET_OPTION 0x25
+#define VND_RX_POWER_SAVING_CTRL 0x2F
+#define VND_RX_SAVE_DET_CTRL 0x1E
+
+#define CMN_REG23 0x8C
+#define CMN_REG25 0x94
+#define TRSV0_REG08 0xE0
+#define TRSV1_REG08 0x220
+#define TRSV0_REG14 0x110
+#define TRSV1_REG14 0x250
+#define TRSV0_REG15 0x134
+#define TRSV1_REG15 0x274
+#define TRSV0_REG16 0x128
+#define TRSV1_REG16 0x268
+#define TRSV0_REG17 0x12C
+#define TRSV1_REG17 0x26c
+#define TRSV0_REG18 0x120
+#define TRSV1_REG18 0x260
+#define TRSV0_REG29 0x164
+#define TRSV1_REG29 0x2A4
+#define TRSV0_REG2E 0x178
+#define TRSV1_REG2E 0x2B8
+#define TRSV0_REG3C 0x1B0
+#define TRSV1_REG3C 0x2F0
+#define TRSV0_REG3D 0x1B4
+#define TRSV1_REG3D 0x2F4
+
+#define MPHY_CFG 0x200
+#define MPHY_CFG_ENABLE 0x40
+#define MPHY_CFG_DISABLE 0x0
+
+#define MIB_T_DBG_CPORT_TX_ENDIAN 0xc022
+#define MIB_T_DBG_CPORT_RX_ENDIAN 0xc023
+
+struct ufs_rockchip_host {
+ struct ufs_hba *hba;
+ void __iomem *ufs_phy_ctrl;
+ void __iomem *ufs_sys_ctrl;
+ void __iomem *mphy_base;
+ struct gpio_desc *rst_gpio;
+ struct reset_control *rst;
+ struct clk *ref_out_clk;
+ struct clk_bulk_data *clks;
+ uint64_t caps;
+};
+
+#define ufs_sys_writel(base, val, reg) \
+ writel((val), (base) + (reg))
+#define ufs_sys_readl(base, reg) readl((base) + (reg))
+#define ufs_sys_set_bits(base, mask, reg) \
+ ufs_sys_writel( \
+ (base), ((mask) | (ufs_sys_readl((base), (reg)))), (reg))
+#define ufs_sys_ctrl_clr_bits(base, mask, reg) \
+ ufs_sys_writel((base), \
+ ((~(mask)) & (ufs_sys_readl((base), (reg)))), \
+ (reg))
+
+#endif /* _UFS_ROCKCHIP_H_ */
diff --git a/drivers/ufs/host/ufs-sprd.c b/drivers/ufs/host/ufs-sprd.c
index b1d532363f9d..65bd8fb96b99 100644
--- a/drivers/ufs/host/ufs-sprd.c
+++ b/drivers/ufs/host/ufs-sprd.c
@@ -160,9 +160,9 @@ static int ufs_sprd_common_init(struct ufs_hba *hba)
}
static int sprd_ufs_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ enum ufs_notify_change_status status,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_sprd_host *host = ufshcd_get_variant(hba);
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index 9cfcaad23cf9..996387906aa1 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -157,7 +157,7 @@ static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
+ const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
int err = 0;