summaryrefslogtreecommitdiffstats
path: root/drivers/ufs/core/ufshcd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/core/ufshcd.c')
-rw-r--r--drivers/ufs/core/ufshcd.c145
1 files changed, 71 insertions, 74 deletions
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 5e3c67e96956..abbe7135a977 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -36,10 +36,10 @@
#include "ufs-fault-injection.h"
#include "ufs_bsg.h"
#include "ufshcd-crypto.h"
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/ufs.h>
+#include "ufs_trace.h"
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
@@ -51,8 +51,10 @@
/* UIC command timeout, unit: ms */
-#define UIC_CMD_TIMEOUT 500
-
+enum {
+ UIC_CMD_TIMEOUT_DEFAULT = 500,
+ UIC_CMD_TIMEOUT_MAX = 2000,
+};
/* NOP OUT retries waiting for NOP IN response */
#define NOP_OUT_RETRIES 10
/* Timeout after 50 msecs if NOP OUT hangs without response */
@@ -116,6 +118,23 @@ static bool is_mcq_supported(struct ufs_hba *hba)
module_param(use_mcq_mode, bool, 0644);
MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
+static unsigned int uic_cmd_timeout = UIC_CMD_TIMEOUT_DEFAULT;
+
+static int uic_cmd_timeout_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UIC_CMD_TIMEOUT_DEFAULT,
+ UIC_CMD_TIMEOUT_MAX);
+}
+
+static const struct kernel_param_ops uic_cmd_timeout_ops = {
+ .set = uic_cmd_timeout_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644);
+MODULE_PARM_DESC(uic_cmd_timeout,
+ "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively");
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -1785,8 +1804,6 @@ static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
{
- char wq_name[sizeof("ufs_clkscaling_00")];
-
if (!ufshcd_is_clkscaling_supported(hba))
return;
@@ -1798,9 +1815,8 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
INIT_WORK(&hba->clk_scaling.resume_work,
ufshcd_clk_scaling_resume_work);
- snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
- hba->host->host_no);
- hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+ hba->clk_scaling.workq = alloc_ordered_workqueue(
+ "ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no);
hba->clk_scaling.is_initialized = true;
}
@@ -2124,8 +2140,6 @@ static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
- char wq_name[sizeof("ufs_clk_gating_00")];
-
if (!ufshcd_is_clkgating_allowed(hba))
return;
@@ -2135,10 +2149,9 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
- snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
- hba->host->host_no);
- hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
- WQ_MEM_RECLAIM | WQ_HIGHPRI);
+ hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(
+ "ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI,
+ hba->host->host_no);
ufshcd_init_clk_gating_sysfs(hba);
@@ -2426,7 +2439,11 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
* 0h: legacy single doorbell support is available
* 1h: indicate that legacy single doorbell support has been removed
*/
- hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities);
+ if (!(hba->quirks & UFSHCD_QUIRK_BROKEN_LSDBS_CAP))
+ hba->lsdb_sup = !FIELD_GET(MASK_LSDB_SUPPORT, hba->capabilities);
+ else
+ hba->lsdb_sup = true;
+
if (!hba->mcq_sup)
return 0;
@@ -2448,7 +2465,7 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
{
u32 val;
int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
- 500, UIC_CMD_TIMEOUT * 1000, false, hba,
+ 500, uic_cmd_timeout * 1000, false, hba,
REG_CONTROLLER_STATUS);
return ret == 0;
}
@@ -2508,7 +2525,7 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
lockdep_assert_held(&hba->uic_cmd_mutex);
if (wait_for_completion_timeout(&uic_cmd->done,
- msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ msecs_to_jiffies(uic_cmd_timeout))) {
ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
} else {
ret = -ETIMEDOUT;
@@ -2916,9 +2933,8 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
i * ufshcd_get_ucd_size(hba);
- u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
- response_upiu);
- u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
+ u16 response_offset = le16_to_cpu(utrdlp[i].response_upiu_offset);
+ u16 prdt_offset = le16_to_cpu(utrdlp[i].prd_table_offset);
lrb->utr_descriptor_ptr = utrdlp + i;
lrb->utrd_dma_addr = hba->utrdl_dma_addr +
@@ -4100,11 +4116,16 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
min_sleep_time_us =
MIN_DELAY_BEFORE_DME_CMDS_US - delta;
else
- return; /* no more delay required */
+ min_sleep_time_us = 0; /* no more delay required */
+ }
+
+ if (min_sleep_time_us > 0) {
+ /* allow sleep for extra 50us if needed */
+ usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
}
- /* allow sleep for extra 50us if needed */
- usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
+ /* update the last_dme_cmd_tstamp */
+ hba->last_dme_cmd_tstamp = ktime_get();
}
/**
@@ -4276,7 +4297,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
}
if (!wait_for_completion_timeout(hba->uic_async_done,
- msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+ msecs_to_jiffies(uic_cmd_timeout))) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
cmd->command, cmd->argument3);
@@ -5395,10 +5416,12 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
}
break;
case OCS_ABORTED:
- result |= DID_ABORT << 16;
- break;
case OCS_INVALID_COMMAND_STATUS:
result |= DID_REQUEUE << 16;
+ dev_warn(hba->dev,
+ "OCS %s from controller for tag %d\n",
+ (ocs == OCS_ABORTED ? "aborted" : "invalid"),
+ lrbp->task_tag);
break;
case OCS_INVALID_CMD_TABLE_ATTR:
case OCS_INVALID_PRDT_ATTR:
@@ -5867,12 +5890,11 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
/**
* ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
* @hba: per-adapter instance
- * @status: bkops_status value
*
* Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
* flag in the device to permit background operations if the device
- * bkops_status is greater than or equal to "status" argument passed to
- * this function, disable otherwise.
+ * bkops_status is greater than or equal to the "hba->urgent_bkops_lvl",
+ * disable otherwise.
*
* Return: 0 for success, non-zero in case of failure.
*
@@ -5880,11 +5902,11 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
* to know whether auto bkops is enabled or disabled after this function
* returns control to it.
*/
-static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
- enum bkops_status status)
+static int ufshcd_bkops_ctrl(struct ufs_hba *hba)
{
- int err;
+ enum bkops_status status = hba->urgent_bkops_lvl;
u32 curr_status = 0;
+ int err;
err = ufshcd_get_bkops_status(hba, &curr_status);
if (err) {
@@ -5906,23 +5928,6 @@ out:
return err;
}
-/**
- * ufshcd_urgent_bkops - handle urgent bkops exception event
- * @hba: per-adapter instance
- *
- * Enable fBackgroundOpsEn flag in the device to permit background
- * operations.
- *
- * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
- * and negative error value for any other failure.
- *
- * Return: 0 upon success; < 0 upon failure.
- */
-static int ufshcd_urgent_bkops(struct ufs_hba *hba)
-{
- return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
-}
-
static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
{
return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
@@ -6462,26 +6467,12 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
struct scsi_device *sdev = cmd->device;
struct Scsi_Host *shost = sdev->host;
struct ufs_hba *hba = shost_priv(shost);
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
- struct ufs_hw_queue *hwq;
- unsigned long flags;
*ret = ufshcd_try_to_abort_task(hba, tag);
dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
*ret ? "failed" : "succeeded");
- /* Release cmd in MCQ mode if abort succeeds */
- if (hba->mcq_enabled && (*ret == 0)) {
- hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
- if (!hwq)
- return 0;
- spin_lock_irqsave(&hwq->cq_lock, flags);
- if (ufshcd_cmd_inflight(lrbp->cmd))
- ufshcd_release_scsi_cmd(hba, lrbp);
- spin_unlock_irqrestore(&hwq->cq_lock, flags);
- }
-
return *ret == 0;
}
@@ -8228,7 +8219,7 @@ static void ufshcd_update_rtc(struct ufs_hba *hba)
err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_SECONDS_PASSED,
0, 0, &val);
- ufshcd_rpm_put_sync(hba);
+ ufshcd_rpm_put(hba);
if (err)
dev_err(hba->dev, "%s: Failed to update rtc %d\n", __func__, err);
@@ -8645,6 +8636,14 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
ufshcd_init_clk_scaling_sysfs(hba);
}
+ /*
+ * The RTC update code accesses the hba->ufs_device_wlun->sdev_gendev
+ * pointer and hence must only be started after the WLUN pointer has
+ * been initialized by ufshcd_scsi_add_wlus().
+ */
+ schedule_delayed_work(&hba->ufs_rtc_update_work,
+ msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
+
ufs_bsg_probe(hba);
scsi_scan_host(hba->host);
@@ -8804,8 +8803,6 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
ufshcd_force_reset_auto_bkops(hba);
ufshcd_set_timestamp_attr(hba);
- schedule_delayed_work(&hba->ufs_rtc_update_work,
- msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS));
/* Gear up to HS gear if supported */
if (hba->max_pwr_info.is_valid) {
@@ -9683,7 +9680,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* allow background operations if bkops status shows
* that performance might be impacted.
*/
- ret = ufshcd_urgent_bkops(hba);
+ ret = ufshcd_bkops_ctrl(hba);
if (ret) {
/*
* If return err in suspend flow, IO will hang.
@@ -9872,7 +9869,7 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* If BKOPs operations are urgently needed at this moment then
* keep auto-bkops enabled or else disable it.
*/
- ufshcd_urgent_bkops(hba);
+ ufshcd_bkops_ctrl(hba);
if (hba->ee_usr_mask)
ufshcd_write_ee_control(hba);
@@ -10206,7 +10203,9 @@ static void ufshcd_wl_shutdown(struct device *dev)
shost_for_each_device(sdev, hba->host) {
if (sdev == hba->ufs_device_wlun)
continue;
- scsi_device_quiesce(sdev);
+ mutex_lock(&sdev->state_mutex);
+ scsi_device_set_state(sdev, SDEV_OFFLINE);
+ mutex_unlock(&sdev->state_mutex);
}
__ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
@@ -10386,7 +10385,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
int err;
struct Scsi_Host *host = hba->host;
struct device *dev = hba->dev;
- char eh_wq_name[sizeof("ufs_eh_wq_00")];
/*
* dev_set_drvdata() must be called before any callbacks are registered
@@ -10453,9 +10451,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->max_pwr_info.is_valid = false;
/* Initialize work queues */
- snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
- hba->host->host_no);
- hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
+ hba->eh_wq = alloc_ordered_workqueue("ufs_eh_wq_%d", WQ_MEM_RECLAIM,
+ hba->host->host_no);
if (!hba->eh_wq) {
dev_err(hba->dev, "%s: failed to create eh workqueue\n",
__func__);