diff options
Diffstat (limited to 'drivers/ufs/core')
-rw-r--r-- | drivers/ufs/core/ufs-mcq.c | 12 | ||||
-rw-r--r-- | drivers/ufs/core/ufs-sysfs.c | 64 | ||||
-rw-r--r-- | drivers/ufs/core/ufs_trace.h | 135 | ||||
-rw-r--r-- | drivers/ufs/core/ufshcd-crypto.c | 7 | ||||
-rw-r--r-- | drivers/ufs/core/ufshcd-priv.h | 22 | ||||
-rw-r--r-- | drivers/ufs/core/ufshcd.c | 271 |
6 files changed, 353 insertions, 158 deletions
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 240ce135bbfb..f1294c29f484 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -677,13 +677,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) unsigned long flags; int err; - if (!ufshcd_cmd_inflight(lrbp->cmd)) { - dev_err(hba->dev, - "%s: skip abort. cmd at tag %d already completed.\n", - __func__, tag); - return FAILED; - } - /* Skip task abort in case previous aborts failed and report failure */ if (lrbp->req_abort_skip) { dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n", @@ -692,6 +685,11 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) } hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); + if (!hwq) { + dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n", + __func__, tag); + return FAILED; + } if (ufshcd_mcq_sqe_search(hba, hwq, tag)) { /* diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index 3438269a5440..634cf163f4cb 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -458,6 +458,64 @@ static ssize_t pm_qos_enable_store(struct device *dev, return count; } +static ssize_t critical_health_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", hba->critical_health_count); +} + +static ssize_t device_lvl_exception_count_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (hba->dev_info.wspecversion < 0x410) + return -EOPNOTSUPP; + + return sysfs_emit(buf, "%u\n", atomic_read(&hba->dev_lvl_exception_count)); +} + +static ssize_t device_lvl_exception_count_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned int value; + + if (kstrtouint(buf, 0, &value)) + return -EINVAL; + + /* the only supported usecase is to reset the dev_lvl_exception_count */ + if (value) + return -EINVAL; + + atomic_set(&hba->dev_lvl_exception_count, 0); + + return count; +} + +static ssize_t device_lvl_exception_id_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + u64 exception_id; + int err; + + ufshcd_rpm_get_sync(hba); + err = ufshcd_read_device_lvl_exception_id(hba, &exception_id); + ufshcd_rpm_put_sync(hba); + + if (err) + return err; + + hba->dev_lvl_exception_id = exception_id; + return sysfs_emit(buf, "%llu\n", exception_id); +} + static DEVICE_ATTR_RW(rpm_lvl); static DEVICE_ATTR_RO(rpm_target_dev_state); static DEVICE_ATTR_RO(rpm_target_link_state); @@ -470,6 +528,9 @@ static DEVICE_ATTR_RW(enable_wb_buf_flush); static DEVICE_ATTR_RW(wb_flush_threshold); static DEVICE_ATTR_RW(rtc_update_ms); static DEVICE_ATTR_RW(pm_qos_enable); +static DEVICE_ATTR_RO(critical_health); +static DEVICE_ATTR_RW(device_lvl_exception_count); +static DEVICE_ATTR_RO(device_lvl_exception_id); static struct attribute *ufs_sysfs_ufshcd_attrs[] = { &dev_attr_rpm_lvl.attr, @@ -484,6 +545,9 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = { &dev_attr_wb_flush_threshold.attr, &dev_attr_rtc_update_ms.attr, &dev_attr_pm_qos_enable.attr, + &dev_attr_critical_health.attr, + &dev_attr_device_lvl_exception_count.attr, + &dev_attr_device_lvl_exception_id.attr, NULL }; diff --git a/drivers/ufs/core/ufs_trace.h b/drivers/ufs/core/ufs_trace.h index 84deca2b841d..caa32e23ffa5 100644 --- a/drivers/ufs/core/ufs_trace.h +++ b/drivers/ufs/core/ufs_trace.h @@ -83,34 +83,34 @@ UFS_CMD_TRACE_TSF_TYPES TRACE_EVENT(ufshcd_clk_gating, - TP_PROTO(const char *dev_name, int state), + TP_PROTO(struct ufs_hba *hba, int state), - TP_ARGS(dev_name, state), + TP_ARGS(hba, state), TP_STRUCT__entry( - __string(dev_name, dev_name) + __field(struct ufs_hba *, hba) __field(int, state) ), TP_fast_assign( - __assign_str(dev_name); + __entry->hba = hba; __entry->state = state; ), TP_printk("%s: gating state changed to %s", - __get_str(dev_name), + dev_name(__entry->hba->dev), __print_symbolic(__entry->state, UFSCHD_CLK_GATING_STATES)) ); TRACE_EVENT(ufshcd_clk_scaling, - TP_PROTO(const char *dev_name, const char *state, const char *clk, + TP_PROTO(struct ufs_hba *hba, const char *state, const char *clk, u32 prev_state, u32 curr_state), - TP_ARGS(dev_name, state, clk, prev_state, curr_state), + TP_ARGS(hba, state, clk, prev_state, curr_state), TP_STRUCT__entry( - __string(dev_name, dev_name) + __field(struct ufs_hba *, hba) __string(state, state) __string(clk, clk) __field(u32, prev_state) @@ -118,7 +118,7 @@ TRACE_EVENT(ufshcd_clk_scaling, ), TP_fast_assign( - __assign_str(dev_name); + __entry->hba = hba; __assign_str(state); __assign_str(clk); __entry->prev_state = prev_state; @@ -126,80 +126,80 @@ TRACE_EVENT(ufshcd_clk_scaling, ), TP_printk("%s: %s %s from %u to %u Hz", - __get_str(dev_name), __get_str(state), __get_str(clk), + dev_name(__entry->hba->dev), __get_str(state), __get_str(clk), __entry->prev_state, __entry->curr_state) ); TRACE_EVENT(ufshcd_auto_bkops_state, - TP_PROTO(const char *dev_name, const char *state), + TP_PROTO(struct ufs_hba *hba, const char *state), - TP_ARGS(dev_name, state), + TP_ARGS(hba, state), TP_STRUCT__entry( - __string(dev_name, dev_name) + __field(struct ufs_hba *, hba) __string(state, state) ), TP_fast_assign( - __assign_str(dev_name); + __entry->hba = hba; __assign_str(state); ), TP_printk("%s: auto bkops - %s", - __get_str(dev_name), __get_str(state)) + dev_name(__entry->hba->dev), __get_str(state)) ); DECLARE_EVENT_CLASS(ufshcd_profiling_template, - TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, + TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us, int err), - TP_ARGS(dev_name, profile_info, time_us, err), + TP_ARGS(hba, profile_info, time_us, err), TP_STRUCT__entry( - __string(dev_name, dev_name) + __field(struct ufs_hba *, hba) __string(profile_info, profile_info) __field(s64, time_us) __field(int, err) ), TP_fast_assign( - __assign_str(dev_name); + __entry->hba = hba; __assign_str(profile_info); __entry->time_us = time_us; __entry->err = err; ), TP_printk("%s: %s: took %lld usecs, err %d", - __get_str(dev_name), __get_str(profile_info), + dev_name(__entry->hba->dev), __get_str(profile_info), __entry->time_us, __entry->err) ); DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_hibern8, - TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, + TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us, int err), - TP_ARGS(dev_name, profile_info, time_us, err)); + TP_ARGS(hba, profile_info, time_us, err)); DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_gating, - TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, + TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us, int err), - TP_ARGS(dev_name, profile_info, time_us, err)); + TP_ARGS(hba, profile_info, time_us, err)); DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_scaling, - TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us, + TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us, int err), - TP_ARGS(dev_name, profile_info, time_us, err)); + TP_ARGS(hba, profile_info, time_us, err)); DECLARE_EVENT_CLASS(ufshcd_template, - TP_PROTO(const char *dev_name, int err, s64 usecs, + TP_PROTO(struct ufs_hba *hba, int err, s64 usecs, int dev_state, int link_state), - TP_ARGS(dev_name, err, usecs, dev_state, link_state), + TP_ARGS(hba, err, usecs, dev_state, link_state), TP_STRUCT__entry( __field(s64, usecs) __field(int, err) - __string(dev_name, dev_name) + __field(struct ufs_hba *, hba) __field(int, dev_state) __field(int, link_state) ), @@ -207,14 +207,14 @@ DECLARE_EVENT_CLASS(ufshcd_template, TP_fast_assign( __entry->usecs = usecs; __entry->err = err; - __assign_str(dev_name); + __entry->hba = hba; __entry->dev_state = dev_state; __entry->link_state = link_state; ), TP_printk( "%s: took %lld usecs, dev_state: %s, link_state: %s, err %d", - __get_str(dev_name), + dev_name(__entry->hba->dev), __entry->usecs, __print_symbolic(__entry->dev_state, UFS_PWR_MODES), __print_symbolic(__entry->link_state, UFS_LINK_STATES), @@ -223,60 +223,62 @@ DECLARE_EVENT_CLASS(ufshcd_template, ); DEFINE_EVENT(ufshcd_template, ufshcd_system_suspend, - TP_PROTO(const char *dev_name, int err, s64 usecs, + TP_PROTO(struct ufs_hba *hba, int err, s64 usecs, int dev_state, int link_state), - TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + TP_ARGS(hba, err, usecs, dev_state, link_state)); DEFINE_EVENT(ufshcd_template, ufshcd_system_resume, - TP_PROTO(const char *dev_name, int err, s64 usecs, + TP_PROTO(struct ufs_hba *hba, int err, s64 usecs, int dev_state, int link_state), - TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + TP_ARGS(hba, err, usecs, dev_state, link_state)); DEFINE_EVENT(ufshcd_template, ufshcd_runtime_suspend, - TP_PROTO(const char *dev_name, int err, s64 usecs, + TP_PROTO(struct ufs_hba *hba, int err, s64 usecs, int dev_state, int link_state), - TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + TP_ARGS(hba, err, usecs, dev_state, link_state)); DEFINE_EVENT(ufshcd_template, ufshcd_runtime_resume, - TP_PROTO(const char *dev_name, int err, s64 usecs, + TP_PROTO(struct ufs_hba *hba, int err, s64 usecs, int dev_state, int link_state), - TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + TP_ARGS(hba, err, usecs, dev_state, link_state)); DEFINE_EVENT(ufshcd_template, ufshcd_init, - TP_PROTO(const char *dev_name, int err, s64 usecs, + TP_PROTO(struct ufs_hba *hba, int err, s64 usecs, int dev_state, int link_state), - TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + TP_ARGS(hba, err, usecs, dev_state, link_state)); DEFINE_EVENT(ufshcd_template, ufshcd_wl_suspend, - TP_PROTO(const char *dev_name, int err, s64 usecs, + TP_PROTO(struct ufs_hba *hba, int err, s64 usecs, int dev_state, int link_state), - TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + TP_ARGS(hba, err, usecs, dev_state, link_state)); DEFINE_EVENT(ufshcd_template, ufshcd_wl_resume, - TP_PROTO(const char *dev_name, int err, s64 usecs, + TP_PROTO(struct ufs_hba *hba, int err, s64 usecs, int dev_state, int link_state), - TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + TP_ARGS(hba, err, usecs, dev_state, link_state)); DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_suspend, - TP_PROTO(const char *dev_name, int err, s64 usecs, + TP_PROTO(struct ufs_hba *hba, int err, s64 usecs, int dev_state, int link_state), - TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + TP_ARGS(hba, err, usecs, dev_state, link_state)); DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume, - TP_PROTO(const char *dev_name, int err, s64 usecs, + TP_PROTO(struct ufs_hba *hba, int err, s64 usecs, int dev_state, int link_state), - TP_ARGS(dev_name, err, usecs, dev_state, link_state)); + TP_ARGS(hba, err, usecs, dev_state, link_state)); TRACE_EVENT(ufshcd_command, - TP_PROTO(struct scsi_device *sdev, enum ufs_trace_str_t str_t, + TP_PROTO(struct scsi_device *sdev, struct ufs_hba *hba, + enum ufs_trace_str_t str_t, unsigned int tag, u32 doorbell, u32 hwq_id, int transfer_len, u32 intr, u64 lba, u8 opcode, u8 group_id), - TP_ARGS(sdev, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba, + TP_ARGS(sdev, hba, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba, opcode, group_id), TP_STRUCT__entry( __field(struct scsi_device *, sdev) + __field(struct ufs_hba *, hba) __field(enum ufs_trace_str_t, str_t) __field(unsigned int, tag) __field(u32, doorbell) @@ -290,6 +292,7 @@ TRACE_EVENT(ufshcd_command, TP_fast_assign( __entry->sdev = sdev; + __entry->hba = hba; __entry->str_t = str_t; __entry->tag = tag; __entry->doorbell = doorbell; @@ -312,13 +315,13 @@ TRACE_EVENT(ufshcd_command, ); TRACE_EVENT(ufshcd_uic_command, - TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, u32 cmd, + TP_PROTO(struct ufs_hba *hba, enum ufs_trace_str_t str_t, u32 cmd, u32 arg1, u32 arg2, u32 arg3), - TP_ARGS(dev_name, str_t, cmd, arg1, arg2, arg3), + TP_ARGS(hba, str_t, cmd, arg1, arg2, arg3), TP_STRUCT__entry( - __string(dev_name, dev_name) + __field(struct ufs_hba *, hba) __field(enum ufs_trace_str_t, str_t) __field(u32, cmd) __field(u32, arg1) @@ -327,7 +330,7 @@ TRACE_EVENT(ufshcd_uic_command, ), TP_fast_assign( - __assign_str(dev_name); + __entry->hba = hba; __entry->str_t = str_t; __entry->cmd = cmd; __entry->arg1 = arg1; @@ -337,19 +340,19 @@ TRACE_EVENT(ufshcd_uic_command, TP_printk( "%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x", - show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name), + show_ufs_cmd_trace_str(__entry->str_t), dev_name(__entry->hba->dev), __entry->cmd, __entry->arg1, __entry->arg2, __entry->arg3 ) ); TRACE_EVENT(ufshcd_upiu, - TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, void *hdr, + TP_PROTO(struct ufs_hba *hba, enum ufs_trace_str_t str_t, void *hdr, void *tsf, enum ufs_trace_tsf_t tsf_t), - TP_ARGS(dev_name, str_t, hdr, tsf, tsf_t), + TP_ARGS(hba, str_t, hdr, tsf, tsf_t), TP_STRUCT__entry( - __string(dev_name, dev_name) + __field(struct ufs_hba *, hba) __field(enum ufs_trace_str_t, str_t) __array(unsigned char, hdr, 12) __array(unsigned char, tsf, 16) @@ -357,7 +360,7 @@ TRACE_EVENT(ufshcd_upiu, ), TP_fast_assign( - __assign_str(dev_name); + __entry->hba = hba; __entry->str_t = str_t; memcpy(__entry->hdr, hdr, sizeof(__entry->hdr)); memcpy(__entry->tsf, tsf, sizeof(__entry->tsf)); @@ -366,7 +369,7 @@ TRACE_EVENT(ufshcd_upiu, TP_printk( "%s: %s: HDR:%s, %s:%s", - show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name), + show_ufs_cmd_trace_str(__entry->str_t), dev_name(__entry->hba->dev), __print_hex(__entry->hdr, sizeof(__entry->hdr)), show_ufs_cmd_trace_tsf(__entry->tsf_t), __print_hex(__entry->tsf, sizeof(__entry->tsf)) @@ -375,22 +378,22 @@ TRACE_EVENT(ufshcd_upiu, TRACE_EVENT(ufshcd_exception_event, - TP_PROTO(const char *dev_name, u16 status), + TP_PROTO(struct ufs_hba *hba, u16 status), - TP_ARGS(dev_name, status), + TP_ARGS(hba, status), TP_STRUCT__entry( - __string(dev_name, dev_name) + __field(struct ufs_hba *, hba) __field(u16, status) ), TP_fast_assign( - __assign_str(dev_name); + __entry->hba = hba; __entry->status = status; ), TP_printk("%s: status 0x%x", - __get_str(dev_name), __entry->status + dev_name(__entry->hba->dev), __entry->status ) ); diff --git a/drivers/ufs/core/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c index 694ff7578fc1..9e63a9d3cb7e 100644 --- a/drivers/ufs/core/ufshcd-crypto.c +++ b/drivers/ufs/core/ufshcd-crypto.c @@ -72,11 +72,11 @@ static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile, if (ccap_array[cap_idx].algorithm_id == UFS_CRYPTO_ALG_AES_XTS) { /* In XTS mode, the blk_crypto_key's size is already doubled */ - memcpy(cfg.crypto_key, key->raw, key->size/2); + memcpy(cfg.crypto_key, key->bytes, key->size/2); memcpy(cfg.crypto_key + UFS_CRYPTO_KEY_MAX_SIZE/2, - key->raw + key->size/2, key->size/2); + key->bytes + key->size/2, key->size/2); } else { - memcpy(cfg.crypto_key, key->raw, key->size); + memcpy(cfg.crypto_key, key->bytes, key->size); } ufshcd_program_key(hba, &cfg, slot); @@ -185,6 +185,7 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba) hba->crypto_profile.ll_ops = ufshcd_crypto_ops; /* UFS only supports 8 bytes for any DUN */ hba->crypto_profile.max_dun_bytes_supported = 8; + hba->crypto_profile.key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW; hba->crypto_profile.dev = hba->dev; /* diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index 786f20ef2238..d0a2c963a27d 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -94,6 +94,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, enum query_opcode desc_op); int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable); +int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id); /* Wrapper functions for safely calling variant operations */ static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) @@ -117,11 +118,12 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba) return ufshcd_readl(hba, REG_UFS_VERSION); } -static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, - bool up, enum ufs_notify_change_status status) +static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, bool up, + unsigned long target_freq, + enum ufs_notify_change_status status) { if (hba->vops && hba->vops->clk_scale_notify) - return hba->vops->clk_scale_notify(hba, up, status); + return hba->vops->clk_scale_notify(hba, up, target_freq, status); return 0; } @@ -159,9 +161,9 @@ static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba, } static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, - enum ufs_notify_change_status status, - struct ufs_pa_layer_attr *dev_max_params, - struct ufs_pa_layer_attr *dev_req_params) + enum ufs_notify_change_status status, + const struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) { if (hba->vops && hba->vops->pwr_change_notify) return hba->vops->pwr_change_notify(hba, status, @@ -270,6 +272,14 @@ static inline int ufshcd_mcq_vops_config_esi(struct ufs_hba *hba) return -EOPNOTSUPP; } +static inline u32 ufshcd_vops_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq) +{ + if (hba->vops && hba->vops->freq_to_gear_speed) + return hba->vops->freq_to_gear_speed(hba, freq); + + return 0; +} + extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[]; /** diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 464f13da259a..5cb6132b8147 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -278,6 +278,7 @@ static const struct ufs_dev_quirk ufs_fixups[] = { .model = UFS_ANY_MODEL, .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE | + UFS_DEVICE_QUIRK_PA_HIBER8TIME | UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS }, { .wmanufacturerid = UFS_VENDOR_SKHYNIX, .model = UFS_ANY_MODEL, @@ -369,7 +370,7 @@ static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, else header = &hba->lrb[tag].ucd_rsp_ptr->header; - trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb, + trace_ufshcd_upiu(hba, str_t, header, &rq->sc.cdb, UFS_TSF_CDB); } @@ -380,7 +381,7 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, if (!trace_ufshcd_upiu_enabled()) return; - trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header, + trace_ufshcd_upiu(hba, str_t, &rq_rsp->header, &rq_rsp->qr, UFS_TSF_OSF); } @@ -393,12 +394,12 @@ static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, return; if (str_t == UFS_TM_SEND) - trace_ufshcd_upiu(dev_name(hba->dev), str_t, + trace_ufshcd_upiu(hba, str_t, &descp->upiu_req.req_header, &descp->upiu_req.input_param1, UFS_TSF_TM_INPUT); else - trace_ufshcd_upiu(dev_name(hba->dev), str_t, + trace_ufshcd_upiu(hba, str_t, &descp->upiu_rsp.rsp_header, &descp->upiu_rsp.output_param1, UFS_TSF_TM_OUTPUT); @@ -418,7 +419,7 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba, else cmd = ufshcd_readl(hba, REG_UIC_COMMAND); - trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd, + trace_ufshcd_uic_command(hba, str_t, cmd, ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1), ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2), ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3)); @@ -473,7 +474,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, } else { doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); } - trace_ufshcd_command(cmd->device, str_t, tag, doorbell, hwq_id, + trace_ufshcd_command(cmd->device, hba, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba, opcode, group_id); } @@ -1063,7 +1064,7 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up) clki->max_freq, ret); break; } - trace_ufshcd_clk_scaling(dev_name(hba->dev), + trace_ufshcd_clk_scaling(hba, "scaled up", clki->name, clki->curr_freq, clki->max_freq); @@ -1081,7 +1082,7 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up) clki->min_freq, ret); break; } - trace_ufshcd_clk_scaling(dev_name(hba->dev), + trace_ufshcd_clk_scaling(hba, "scaled down", clki->name, clki->curr_freq, clki->min_freq); @@ -1122,7 +1123,7 @@ int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table, return ret; } - trace_ufshcd_clk_scaling(dev_name(dev), + trace_ufshcd_clk_scaling(hba, (scaling_down ? "scaled down" : "scaled up"), clki->name, hba->clk_scaling.target_freq, freq); } @@ -1162,7 +1163,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, int ret = 0; ktime_t start = ktime_get(); - ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE); + ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, PRE_CHANGE); if (ret) goto out; @@ -1173,7 +1174,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, if (ret) goto out; - ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); + ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, POST_CHANGE); if (ret) { if (hba->use_pm_opp) ufshcd_opp_set_rate(hba, @@ -1186,7 +1187,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, ufshcd_pm_qos_update(hba, scale_up); out: - trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), + trace_ufshcd_profile_clk_scaling(hba, (scale_up ? "up" : "down"), ktime_to_us(ktime_sub(ktime_get(), start)), ret); return ret; @@ -1313,16 +1314,26 @@ out: /** * ufshcd_scale_gear - scale up/down UFS gear * @hba: per adapter instance + * @target_gear: target gear to scale to * @scale_up: True for scaling up gear and false for scaling down * * Return: 0 for success; -EBUSY if scaling can't happen at this time; * non-zero for any other errors. */ -static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up) +static int ufshcd_scale_gear(struct ufs_hba *hba, u32 target_gear, bool scale_up) { int ret = 0; struct ufs_pa_layer_attr new_pwr_info; + if (target_gear) { + new_pwr_info = hba->pwr_info; + new_pwr_info.gear_tx = target_gear; + new_pwr_info.gear_rx = target_gear; + + goto config_pwr_mode; + } + + /* Legacy gear scaling, in case vops_freq_to_gear_speed() is not implemented */ if (scale_up) { memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info, sizeof(struct ufs_pa_layer_attr)); @@ -1343,6 +1354,7 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up) } } +config_pwr_mode: /* check if the power mode needs to be changed or not? */ ret = ufshcd_config_pwr_mode(hba, &new_pwr_info); if (ret) @@ -1387,13 +1399,13 @@ out: return ret; } -static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up) +static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err) { up_write(&hba->clk_scaling_lock); - /* Enable Write Booster if we have scaled up else disable it */ + /* Enable Write Booster if current gear requires it else disable it */ if (ufshcd_enable_wb_if_scaling_up(hba) && !err) - ufshcd_wb_toggle(hba, scale_up); + ufshcd_wb_toggle(hba, hba->pwr_info.gear_rx >= hba->clk_scaling.wb_gear); mutex_unlock(&hba->wb_mutex); @@ -1413,15 +1425,19 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq, bool scale_up) { + u32 old_gear = hba->pwr_info.gear_rx; + u32 new_gear = 0; int ret = 0; + new_gear = ufshcd_vops_freq_to_gear_speed(hba, freq); + ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC); if (ret) return ret; /* scale down the gear before scaling down clocks */ if (!scale_up) { - ret = ufshcd_scale_gear(hba, false); + ret = ufshcd_scale_gear(hba, new_gear, false); if (ret) goto out_unprepare; } @@ -1429,13 +1445,13 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq, ret = ufshcd_scale_clks(hba, freq, scale_up); if (ret) { if (!scale_up) - ufshcd_scale_gear(hba, true); + ufshcd_scale_gear(hba, old_gear, true); goto out_unprepare; } /* scale up the gear after scaling up clocks */ if (scale_up) { - ret = ufshcd_scale_gear(hba, true); + ret = ufshcd_scale_gear(hba, new_gear, true); if (ret) { ufshcd_scale_clks(hba, hba->devfreq->previous_freq, false); @@ -1444,7 +1460,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq, } out_unprepare: - ufshcd_clock_scaling_unprepare(hba, ret, scale_up); + ufshcd_clock_scaling_unprepare(hba, ret); return ret; } @@ -1548,7 +1564,7 @@ static int ufshcd_devfreq_target(struct device *dev, if (!ret) hba->clk_scaling.target_freq = *freq; - trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), + trace_ufshcd_profile_clk_scaling(hba, (scale_up ? "up" : "down"), ktime_to_us(ktime_sub(ktime_get(), start)), ret); @@ -1720,6 +1736,8 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_clk_info *clki; + unsigned long freq; u32 value; int err = 0; @@ -1743,14 +1761,25 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev, if (value) { ufshcd_resume_clkscaling(hba); - } else { - ufshcd_suspend_clkscaling(hba); - err = ufshcd_devfreq_scale(hba, ULONG_MAX, true); - if (err) - dev_err(hba->dev, "%s: failed to scale clocks up %d\n", - __func__, err); + goto out_rel; } + clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list); + freq = clki->max_freq; + + ufshcd_suspend_clkscaling(hba); + + if (!ufshcd_is_devfreq_scaling_required(hba, freq, true)) + goto out_rel; + + err = ufshcd_devfreq_scale(hba, freq, true); + if (err) + dev_err(hba->dev, "%s: failed to scale clocks up %d\n", + __func__, err); + else + hba->clk_scaling.target_freq = freq; + +out_rel: ufshcd_release(hba); ufshcd_rpm_put_sync(hba); out: @@ -1783,6 +1812,10 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba) if (!hba->clk_scaling.min_gear) hba->clk_scaling.min_gear = UFS_HS_G1; + if (!hba->clk_scaling.wb_gear) + /* Use intermediate gear speed HS_G3 as the default wb_gear */ + hba->clk_scaling.wb_gear = UFS_HS_G3; + INIT_WORK(&hba->clk_scaling.suspend_work, ufshcd_clk_scaling_suspend_work); INIT_WORK(&hba->clk_scaling.resume_work, @@ -1881,7 +1914,7 @@ start: case REQ_CLKS_OFF: if (cancel_delayed_work(&hba->clk_gating.gate_work)) { hba->clk_gating.state = CLKS_ON; - trace_ufshcd_clk_gating(dev_name(hba->dev), + trace_ufshcd_clk_gating(hba, hba->clk_gating.state); break; } @@ -1893,7 +1926,7 @@ start: fallthrough; case CLKS_OFF: hba->clk_gating.state = REQ_CLKS_ON; - trace_ufshcd_clk_gating(dev_name(hba->dev), + trace_ufshcd_clk_gating(hba, hba->clk_gating.state); queue_work(hba->clk_gating.clk_gating_workq, &hba->clk_gating.ungate_work); @@ -1933,7 +1966,7 @@ static void ufshcd_gate_work(struct work_struct *work) if (hba->clk_gating.is_suspended || hba->clk_gating.state != REQ_CLKS_OFF) { hba->clk_gating.state = CLKS_ON; - trace_ufshcd_clk_gating(dev_name(hba->dev), + trace_ufshcd_clk_gating(hba, hba->clk_gating.state); return; } @@ -1955,7 +1988,7 @@ static void ufshcd_gate_work(struct work_struct *work) hba->clk_gating.state = CLKS_ON; dev_err(hba->dev, "%s: hibern8 enter failed %d\n", __func__, ret); - trace_ufshcd_clk_gating(dev_name(hba->dev), + trace_ufshcd_clk_gating(hba, hba->clk_gating.state); return; } @@ -1980,7 +2013,7 @@ static void ufshcd_gate_work(struct work_struct *work) guard(spinlock_irqsave)(&hba->clk_gating.lock); if (hba->clk_gating.state == REQ_CLKS_OFF) { hba->clk_gating.state = CLKS_OFF; - trace_ufshcd_clk_gating(dev_name(hba->dev), + trace_ufshcd_clk_gating(hba, hba->clk_gating.state); } } @@ -2006,7 +2039,7 @@ static void __ufshcd_release(struct ufs_hba *hba) } hba->clk_gating.state = REQ_CLKS_OFF; - trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); + trace_ufshcd_clk_gating(hba, hba->clk_gating.state); queue_delayed_work(hba->clk_gating.clk_gating_workq, &hba->clk_gating.gate_work, msecs_to_jiffies(hba->clk_gating.delay_ms)); @@ -3144,16 +3177,10 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, int err; retry: - time_left = wait_for_completion_timeout(hba->dev_cmd.complete, + time_left = wait_for_completion_timeout(&hba->dev_cmd.complete, time_left); if (likely(time_left)) { - /* - * The completion handler called complete() and the caller of - * this function still owns the @lrbp tag so the code below does - * not trigger any race conditions. - */ - hba->dev_cmd.complete = NULL; err = ufshcd_get_tr_ocs(lrbp, NULL); if (!err) err = ufshcd_dev_cmd_completion(hba, lrbp); @@ -3167,7 +3194,6 @@ retry: /* successfully cleared the command, retry if needed */ if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) err = -EAGAIN; - hba->dev_cmd.complete = NULL; return err; } @@ -3183,11 +3209,9 @@ retry: spin_lock_irqsave(&hba->outstanding_lock, flags); pending = test_bit(lrbp->task_tag, &hba->outstanding_reqs); - if (pending) { - hba->dev_cmd.complete = NULL; + if (pending) __clear_bit(lrbp->task_tag, &hba->outstanding_reqs); - } spin_unlock_irqrestore(&hba->outstanding_lock, flags); if (!pending) { @@ -3205,8 +3229,6 @@ retry: spin_lock_irqsave(&hba->outstanding_lock, flags); pending = test_bit(lrbp->task_tag, &hba->outstanding_reqs); - if (pending) - hba->dev_cmd.complete = NULL; spin_unlock_irqrestore(&hba->outstanding_lock, flags); if (!pending) { @@ -3240,13 +3262,9 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba) static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, const u32 tag, int timeout) { - DECLARE_COMPLETION_ONSTACK(wait); int err; - hba->dev_cmd.complete = &wait; - ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); - ufshcd_send_command(hba, tag, hba->dev_cmd_queue); err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); @@ -4005,7 +4023,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba) * * Return: 0 on success, non-zero value on failure. */ -static int ufshcd_dme_reset(struct ufs_hba *hba) +int ufshcd_dme_reset(struct ufs_hba *hba) { struct uic_command uic_cmd = { .command = UIC_CMD_DME_RESET, @@ -4019,6 +4037,7 @@ static int ufshcd_dme_reset(struct ufs_hba *hba) return ret; } +EXPORT_SYMBOL_GPL(ufshcd_dme_reset); int ufshcd_dme_configure_adapt(struct ufs_hba *hba, int agreed_gear, @@ -4044,7 +4063,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt); * * Return: 0 on success, non-zero value on failure. */ -static int ufshcd_dme_enable(struct ufs_hba *hba) +int ufshcd_dme_enable(struct ufs_hba *hba) { struct uic_command uic_cmd = { .command = UIC_CMD_DME_ENABLE, @@ -4058,6 +4077,7 @@ static int ufshcd_dme_enable(struct ufs_hba *hba) return ret; } +EXPORT_SYMBOL_GPL(ufshcd_dme_enable); static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) { @@ -4422,7 +4442,7 @@ int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE); ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); - trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter", + trace_ufshcd_profile_hibern8(hba, "enter", ktime_to_us(ktime_sub(ktime_get(), start)), ret); if (ret) @@ -4447,7 +4467,7 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE); ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); - trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit", + trace_ufshcd_profile_hibern8(hba, "exit", ktime_to_us(ktime_sub(ktime_get(), start)), ret); if (ret) { @@ -5551,12 +5571,12 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, ufshcd_release_scsi_cmd(hba, lrbp); /* Do not touch lrbp after scsi done */ scsi_done(cmd); - } else if (hba->dev_cmd.complete) { + } else { if (cqe) { ocs = le32_to_cpu(cqe->status) & MASK_OCS; lrbp->utr_descriptor_ptr->header.ocs = ocs; } - complete(hba->dev_cmd.complete); + complete(&hba->dev_cmd.complete); } } @@ -5658,6 +5678,8 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba, continue; hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); + if (!hwq) + continue; if (force_compl) { ufshcd_mcq_compl_all_cqes_lock(hba, hwq); @@ -5808,7 +5830,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) } hba->auto_bkops_enabled = true; - trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled"); + trace_ufshcd_auto_bkops_state(hba, "Enabled"); /* No need of URGENT_BKOPS exception from the device */ err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); @@ -5859,7 +5881,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) } hba->auto_bkops_enabled = false; - trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); + trace_ufshcd_auto_bkops_state(hba, "Disabled"); hba->is_urgent_bkops_lvl_checked = false; out: return err; @@ -5979,6 +6001,42 @@ out: __func__, err); } +int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id) +{ + struct utp_upiu_query_v4_0 *upiu_resp; + struct ufs_query_req *request = NULL; + struct ufs_query_res *response = NULL; + int err; + + if (hba->dev_info.wspecversion < 0x410) + return -EOPNOTSUPP; + + ufshcd_hold(hba); + mutex_lock(&hba->dev_cmd.lock); + + ufshcd_init_query(hba, &request, &response, + UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_DEV_LVL_EXCEPTION_ID, 0, 0); + + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); + + if (err) { + dev_err(hba->dev, "%s: failed to read device level exception %d\n", + __func__, err); + goto out; + } + + upiu_resp = (struct utp_upiu_query_v4_0 *)response; + *exception_id = get_unaligned_be64(&upiu_resp->osf3); +out: + mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); + + return err; +} + static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn) { u8 index; @@ -6049,7 +6107,7 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable) return ret; } -static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba, +static bool ufshcd_wb_curr_buff_threshold_check(struct ufs_hba *hba, u32 avail_buf) { u32 cur_buf; @@ -6131,15 +6189,13 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba) } /* - * The ufs device needs the vcc to be ON to flush. * With user-space reduction enabled, it's enough to enable flush * by checking only the available buffer. The threshold * defined here is > 90% full. * With user-space preserved enabled, the current-buffer * should be checked too because the wb buffer size can reduce * when disk tends to be full. This info is provided by current - * buffer (dCurrentWriteBoosterBufferSize). There's no point in - * keeping vcc on when current buffer is empty. + * buffer (dCurrentWriteBoosterBufferSize). */ index = ufshcd_wb_get_query_index(hba); ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, @@ -6154,7 +6210,7 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba) if (!hba->dev_info.b_presrv_uspc_en) return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10); - return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf); + return ufshcd_wb_curr_buff_threshold_check(hba, avail_buf); } static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work) @@ -6193,7 +6249,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) return; } - trace_ufshcd_exception_event(dev_name(hba->dev), status); + trace_ufshcd_exception_event(hba, status); if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS) ufshcd_bkops_exception_event_handler(hba); @@ -6201,6 +6257,16 @@ static void ufshcd_exception_event_handler(struct work_struct *work) if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP) ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP); + if (status & hba->ee_drv_mask & MASK_EE_HEALTH_CRITICAL) { + hba->critical_health_count++; + sysfs_notify(&hba->dev->kobj, NULL, "critical_health"); + } + + if (status & hba->ee_drv_mask & MASK_EE_DEV_LVL_EXCEPTION) { + atomic_inc(&hba->dev_lvl_exception_count); + sysfs_notify(&hba->dev->kobj, NULL, "device_lvl_exception_count"); + } + ufs_debugfs_exception_event(hba, status); } @@ -7652,7 +7718,7 @@ static void ufshcd_process_probe_result(struct ufs_hba *hba, hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; spin_unlock_irqrestore(hba->host->host_lock, flags); - trace_ufshcd_init(dev_name(hba->dev), ret, + trace_ufshcd_init(hba, ret, ktime_to_us(ktime_sub(ktime_get(), probe_start)), hba->curr_dev_pwr_mode, hba->uic_link_state); } @@ -8100,6 +8166,22 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf) } } +static void ufshcd_device_lvl_exception_probe(struct ufs_hba *hba, u8 *desc_buf) +{ + u32 ext_ufs_feature; + + if (hba->dev_info.wspecversion < 0x410) + return; + + ext_ufs_feature = get_unaligned_be32(desc_buf + + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); + if (!(ext_ufs_feature & UFS_DEV_LVL_EXCEPTION_SUP)) + return; + + atomic_set(&hba->dev_lvl_exception_count, 0); + ufshcd_enable_ee(hba, MASK_EE_DEV_LVL_EXCEPTION); +} + static void ufshcd_set_rtt(struct ufs_hba *hba) { struct ufs_dev_info *dev_info = &hba->dev_info; @@ -8293,8 +8375,15 @@ static int ufs_get_device_desc(struct ufs_hba *hba) ufshcd_temp_notif_probe(hba, desc_buf); + if (dev_info->wspecversion >= 0x410) { + hba->critical_health_count = 0; + ufshcd_enable_ee(hba, MASK_EE_HEALTH_CRITICAL); + } + ufs_init_rtc(hba, desc_buf); + ufshcd_device_lvl_exception_probe(hba, desc_buf); + /* * ufshcd_read_string_desc returns size of the string * reset the error value @@ -8384,6 +8473,31 @@ out: return ret; } +/** + * ufshcd_quirk_override_pa_h8time - Ensures proper adjustment of PA_HIBERN8TIME. + * @hba: per-adapter instance + * + * Some UFS devices require specific adjustments to the PA_HIBERN8TIME parameter + * to ensure proper hibernation timing. This function retrieves the current + * PA_HIBERN8TIME value and increments it by 100us. + */ +static void ufshcd_quirk_override_pa_h8time(struct ufs_hba *hba) +{ + u32 pa_h8time; + int ret; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), &pa_h8time); + if (ret) { + dev_err(hba->dev, "Failed to get PA_HIBERN8TIME: %d\n", ret); + return; + } + + /* Increment by 1 to increase hibernation time by 100 µs */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), pa_h8time + 1); + if (ret) + dev_err(hba->dev, "Failed updating PA_HIBERN8TIME: %d\n", ret); +} + static void ufshcd_tune_unipro_params(struct ufs_hba *hba) { ufshcd_vops_apply_dev_quirks(hba); @@ -8394,6 +8508,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba) if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE) ufshcd_quirk_tune_host_pa_tactivate(hba); + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME) + ufshcd_quirk_override_pa_h8time(hba); } static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) @@ -9148,12 +9265,12 @@ out: } else if (!ret && on && hba->clk_gating.is_initialized) { scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) hba->clk_gating.state = CLKS_ON; - trace_ufshcd_clk_gating(dev_name(hba->dev), + trace_ufshcd_clk_gating(hba, hba->clk_gating.state); } if (clk_state_changed) - trace_ufshcd_profile_clk_gating(dev_name(hba->dev), + trace_ufshcd_profile_clk_gating(hba, (on ? "on" : "off"), ktime_to_us(ktime_sub(ktime_get(), start)), ret); return ret; @@ -9853,7 +9970,7 @@ static int ufshcd_wl_runtime_suspend(struct device *dev) if (ret) dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); - trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret, + trace_ufshcd_wl_runtime_suspend(hba, ret, ktime_to_us(ktime_sub(ktime_get(), start)), hba->curr_dev_pwr_mode, hba->uic_link_state); @@ -9873,7 +9990,7 @@ static int ufshcd_wl_runtime_resume(struct device *dev) if (ret) dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); - trace_ufshcd_wl_runtime_resume(dev_name(dev), ret, + trace_ufshcd_wl_runtime_resume(hba, ret, ktime_to_us(ktime_sub(ktime_get(), start)), hba->curr_dev_pwr_mode, hba->uic_link_state); @@ -9905,7 +10022,7 @@ static int ufshcd_wl_suspend(struct device *dev) out: if (!ret) hba->is_sys_suspended = true; - trace_ufshcd_wl_suspend(dev_name(dev), ret, + trace_ufshcd_wl_suspend(hba, ret, ktime_to_us(ktime_sub(ktime_get(), start)), hba->curr_dev_pwr_mode, hba->uic_link_state); @@ -9928,7 +10045,7 @@ static int ufshcd_wl_resume(struct device *dev) if (ret) dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret); out: - trace_ufshcd_wl_resume(dev_name(dev), ret, + trace_ufshcd_wl_resume(hba, ret, ktime_to_us(ktime_sub(ktime_get(), start)), hba->curr_dev_pwr_mode, hba->uic_link_state); if (!ret) @@ -9966,7 +10083,7 @@ static int ufshcd_suspend(struct ufs_hba *hba) } if (ufshcd_is_clkgating_allowed(hba)) { hba->clk_gating.state = CLKS_OFF; - trace_ufshcd_clk_gating(dev_name(hba->dev), + trace_ufshcd_clk_gating(hba, hba->clk_gating.state); } @@ -10039,7 +10156,7 @@ int ufshcd_system_suspend(struct device *dev) ret = ufshcd_suspend(hba); out: - trace_ufshcd_system_suspend(dev_name(hba->dev), ret, + trace_ufshcd_system_suspend(hba, ret, ktime_to_us(ktime_sub(ktime_get(), start)), hba->curr_dev_pwr_mode, hba->uic_link_state); return ret; @@ -10067,7 +10184,7 @@ int ufshcd_system_resume(struct device *dev) ret = ufshcd_resume(hba); out: - trace_ufshcd_system_resume(dev_name(hba->dev), ret, + trace_ufshcd_system_resume(hba, ret, ktime_to_us(ktime_sub(ktime_get(), start)), hba->curr_dev_pwr_mode, hba->uic_link_state); @@ -10093,7 +10210,7 @@ int ufshcd_runtime_suspend(struct device *dev) ret = ufshcd_suspend(hba); - trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret, + trace_ufshcd_runtime_suspend(hba, ret, ktime_to_us(ktime_sub(ktime_get(), start)), hba->curr_dev_pwr_mode, hba->uic_link_state); return ret; @@ -10120,7 +10237,7 @@ int ufshcd_runtime_resume(struct device *dev) ret = ufshcd_resume(hba); - trace_ufshcd_runtime_resume(dev_name(hba->dev), ret, + trace_ufshcd_runtime_resume(hba, ret, ktime_to_us(ktime_sub(ktime_get(), start)), hba->curr_dev_pwr_mode, hba->uic_link_state); return ret; @@ -10446,6 +10563,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE); + init_completion(&hba->dev_cmd.complete); + err = ufshcd_hba_init(hba); if (err) goto out_error; |