summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ufs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-12 12:25:50 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-12 12:25:50 -0800
commit6cbcc7ab2147d721700029a78558dc0ea4207153 (patch)
treeb22821f2a29055dadcd662fb164698633448dce6 /drivers/scsi/ufs
parent030c28a021131c6944d35a4fa727781f9df3a05d (diff)
parent3344b58b53a76199dae48faa396e9fc37bf86992 (diff)
downloadlinux-stable-6cbcc7ab2147d721700029a78558dc0ea4207153.tar.gz
linux-stable-6cbcc7ab2147d721700029a78558dc0ea4207153.tar.bz2
linux-stable-6cbcc7ab2147d721700029a78558dc0ea4207153.zip
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull more SCSI updates from James Bottomley: "This series is all the stragglers that didn't quite make the first merge window pull. It's mostly minor updates and bug fixes of merge window code but it also has two driver updates: ufs and qla2xxx" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (46 commits) scsi: scsi_debug: Don't call kcalloc() if size arg is zero scsi: core: Remove command size deduction from scsi_setup_scsi_cmnd() scsi: scsi_ioctl: Validate command size scsi: ufs: ufshpb: Properly handle max-single-cmd scsi: core: Avoid leaving shost->last_reset with stale value if EH does not run scsi: bsg: Fix errno when scsi_bsg_register_queue() fails scsi: sr: Remove duplicate assignment scsi: ufs: ufs-exynos: Introduce ExynosAuto v9 virtual host scsi: ufs: ufs-exynos: Multi-host configuration for ExynosAuto v9 scsi: ufs: ufs-exynos: Support ExynosAuto v9 UFS scsi: ufs: ufs-exynos: Add pre/post_hce_enable drv callbacks scsi: ufs: ufs-exynos: Factor out priv data init scsi: ufs: ufs-exynos: Add EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR option scsi: ufs: ufs-exynos: Support custom version of ufs_hba_variant_ops scsi: ufs: ufs-exynos: Add setup_clocks callback scsi: ufs: ufs-exynos: Add refclkout_stop control scsi: ufs: ufs-exynos: Simplify drv_data retrieval scsi: ufs: ufs-exynos: Change pclk available max value scsi: ufs: Add quirk to enable host controller without PH configuration scsi: ufs: Add quirk to handle broken UIC command ...
Diffstat (limited to 'drivers/scsi/ufs')
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.c98
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c354
-rw-r--r--drivers/scsi/ufs/ufs-exynos.h27
-rw-r--r--drivers/scsi/ufs/ufshcd.c108
-rw-r--r--drivers/scsi/ufs/ufshcd.h13
-rw-r--r--drivers/scsi/ufs/ufshci.h15
-rw-r--r--drivers/scsi/ufs/ufshpb.c24
-rw-r--r--drivers/scsi/ufs/ufshpb.h1
8 files changed, 557 insertions, 83 deletions
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index 4e1ff209b933..4a0bbcf1757a 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -8,6 +8,18 @@
static struct dentry *ufs_debugfs_root;
+struct ufs_debugfs_attr {
+ const char *name;
+ mode_t mode;
+ const struct file_operations *fops;
+};
+
+/* @file corresponds to a debugfs attribute in directory hba->debugfs_root. */
+static inline struct ufs_hba *hba_from_file(const struct file *file)
+{
+ return d_inode(file->f_path.dentry->d_parent)->i_private;
+}
+
void __init ufs_debugfs_init(void)
{
ufs_debugfs_root = debugfs_create_dir("ufshcd", NULL);
@@ -20,7 +32,7 @@ void ufs_debugfs_exit(void)
static int ufs_debugfs_stats_show(struct seq_file *s, void *data)
{
- struct ufs_hba *hba = s->private;
+ struct ufs_hba *hba = hba_from_file(s->file);
struct ufs_event_hist *e = hba->ufs_stats.event;
#define PRT(fmt, typ) \
@@ -126,13 +138,93 @@ static void ufs_debugfs_restart_ee(struct work_struct *work)
ufs_debugfs_put_user_access(hba);
}
+static int ufs_saved_err_show(struct seq_file *s, void *data)
+{
+ struct ufs_debugfs_attr *attr = s->private;
+ struct ufs_hba *hba = hba_from_file(s->file);
+ const int *p;
+
+ if (strcmp(attr->name, "saved_err") == 0) {
+ p = &hba->saved_err;
+ } else if (strcmp(attr->name, "saved_uic_err") == 0) {
+ p = &hba->saved_uic_err;
+ } else {
+ return -ENOENT;
+ }
+
+ seq_printf(s, "%d\n", *p);
+ return 0;
+}
+
+static ssize_t ufs_saved_err_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ufs_debugfs_attr *attr = file->f_inode->i_private;
+ struct ufs_hba *hba = hba_from_file(file);
+ char val_str[16] = { };
+ int val, ret;
+
+ if (count > sizeof(val_str))
+ return -EINVAL;
+ if (copy_from_user(val_str, buf, count))
+ return -EFAULT;
+ ret = kstrtoint(val_str, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irq(hba->host->host_lock);
+ if (strcmp(attr->name, "saved_err") == 0) {
+ hba->saved_err = val;
+ } else if (strcmp(attr->name, "saved_uic_err") == 0) {
+ hba->saved_uic_err = val;
+ } else {
+ ret = -ENOENT;
+ }
+ if (ret == 0)
+ ufshcd_schedule_eh_work(hba);
+ spin_unlock_irq(hba->host->host_lock);
+
+ return ret < 0 ? ret : count;
+}
+
+static int ufs_saved_err_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ufs_saved_err_show, inode->i_private);
+}
+
+static const struct file_operations ufs_saved_err_fops = {
+ .owner = THIS_MODULE,
+ .open = ufs_saved_err_open,
+ .read = seq_read,
+ .write = ufs_saved_err_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct ufs_debugfs_attr ufs_attrs[] = {
+ { "stats", 0400, &ufs_debugfs_stats_fops },
+ { "saved_err", 0600, &ufs_saved_err_fops },
+ { "saved_uic_err", 0600, &ufs_saved_err_fops },
+ { }
+};
+
void ufs_debugfs_hba_init(struct ufs_hba *hba)
{
+ const struct ufs_debugfs_attr *attr;
+ struct dentry *root;
+
/* Set default exception event rate limit period to 20ms */
hba->debugfs_ee_rate_limit_ms = 20;
INIT_DELAYED_WORK(&hba->debugfs_ee_work, ufs_debugfs_restart_ee);
- hba->debugfs_root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root);
- debugfs_create_file("stats", 0400, hba->debugfs_root, hba, &ufs_debugfs_stats_fops);
+
+ root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root);
+ if (IS_ERR_OR_NULL(root))
+ return;
+ hba->debugfs_root = root;
+ d_inode(root)->i_private = hba;
+ for (attr = ufs_attrs; attr->name; attr++)
+ debugfs_create_file(attr->name, attr->mode, root, (void *)attr,
+ attr->fops);
debugfs_create_file("exception_event_mask", 0600, hba->debugfs_root,
hba, &ee_usr_mask_fops);
debugfs_create_u32("exception_event_rate_limit_ms", 0600, hba->debugfs_root,
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index 30d0c1aba0c7..cd26bc82462e 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -12,8 +12,10 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/mfd/syscon.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
@@ -48,10 +50,11 @@
#define HCI_ERR_EN_T_LAYER 0x84
#define HCI_ERR_EN_DME_LAYER 0x88
#define HCI_CLKSTOP_CTRL 0xB0
+#define REFCLKOUT_STOP BIT(4)
#define REFCLK_STOP BIT(2)
#define UNIPRO_MCLK_STOP BIT(1)
#define UNIPRO_PCLK_STOP BIT(0)
-#define CLK_STOP_MASK (REFCLK_STOP |\
+#define CLK_STOP_MASK (REFCLKOUT_STOP | REFCLK_STOP |\
UNIPRO_MCLK_STOP |\
UNIPRO_PCLK_STOP)
#define HCI_MISC 0xB4
@@ -74,6 +77,52 @@
UIC_TRANSPORT_NO_CONNECTION_RX |\
UIC_TRANSPORT_BAD_TC)
+/* FSYS UFS Shareability */
+#define UFS_WR_SHARABLE BIT(2)
+#define UFS_RD_SHARABLE BIT(1)
+#define UFS_SHARABLE (UFS_WR_SHARABLE | UFS_RD_SHARABLE)
+#define UFS_SHAREABILITY_OFFSET 0x710
+
+/* Multi-host registers */
+#define MHCTRL 0xC4
+#define MHCTRL_EN_VH_MASK (0xE)
+#define MHCTRL_EN_VH(vh) (vh << 1)
+#define PH2VH_MBOX 0xD8
+
+#define MH_MSG_MASK (0xFF)
+
+#define MH_MSG(id, msg) ((id << 8) | (msg & 0xFF))
+#define MH_MSG_PH_READY 0x1
+#define MH_MSG_VH_READY 0x2
+
+#define ALLOW_INQUIRY BIT(25)
+#define ALLOW_MODE_SELECT BIT(24)
+#define ALLOW_MODE_SENSE BIT(23)
+#define ALLOW_PRE_FETCH GENMASK(22, 21)
+#define ALLOW_READ_CMD_ALL GENMASK(20, 18) /* read_6/10/16 */
+#define ALLOW_READ_BUFFER BIT(17)
+#define ALLOW_READ_CAPACITY GENMASK(16, 15)
+#define ALLOW_REPORT_LUNS BIT(14)
+#define ALLOW_REQUEST_SENSE BIT(13)
+#define ALLOW_SYNCHRONIZE_CACHE GENMASK(8, 7)
+#define ALLOW_TEST_UNIT_READY BIT(6)
+#define ALLOW_UNMAP BIT(5)
+#define ALLOW_VERIFY BIT(4)
+#define ALLOW_WRITE_CMD_ALL GENMASK(3, 1) /* write_6/10/16 */
+
+#define ALLOW_TRANS_VH_DEFAULT (ALLOW_INQUIRY | ALLOW_MODE_SELECT | \
+ ALLOW_MODE_SENSE | ALLOW_PRE_FETCH | \
+ ALLOW_READ_CMD_ALL | ALLOW_READ_BUFFER | \
+ ALLOW_READ_CAPACITY | ALLOW_REPORT_LUNS | \
+ ALLOW_REQUEST_SENSE | ALLOW_SYNCHRONIZE_CACHE | \
+ ALLOW_TEST_UNIT_READY | ALLOW_UNMAP | \
+ ALLOW_VERIFY | ALLOW_WRITE_CMD_ALL)
+
+#define HCI_MH_ALLOWABLE_TRAN_OF_VH 0x30C
+#define HCI_MH_IID_IN_TASK_TAG 0X308
+
+#define PH_READY_TIMEOUT_MS (5 * MSEC_PER_SEC)
+
enum {
UNIPRO_L1_5 = 0,/* PHY Adapter */
UNIPRO_L2, /* Data Link */
@@ -149,6 +198,117 @@ static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
return 0;
}
+static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
+{
+ struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+
+ /* IO Coherency setting */
+ if (ufs->sysreg) {
+ return regmap_update_bits(ufs->sysreg,
+ ufs->shareability_reg_offset,
+ UFS_SHARABLE, UFS_SHARABLE);
+ }
+
+ attr->tx_dif_p_nsec = 3200000;
+
+ return 0;
+}
+
+static int exynosauto_ufs_post_hce_enable(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+
+ /* Enable Virtual Host #1 */
+ ufshcd_rmwl(hba, MHCTRL_EN_VH_MASK, MHCTRL_EN_VH(1), MHCTRL);
+ /* Default VH Transfer permissions */
+ hci_writel(ufs, ALLOW_TRANS_VH_DEFAULT, HCI_MH_ALLOWABLE_TRAN_OF_VH);
+ /* IID information is replaced in TASKTAG[7:5] instead of IID in UCD */
+ hci_writel(ufs, 0x1, HCI_MH_IID_IN_TASK_TAG);
+
+ return 0;
+}
+
+static int exynosauto_ufs_pre_link(struct exynos_ufs *ufs)
+{
+ struct ufs_hba *hba = ufs->hba;
+ int i;
+ u32 tx_line_reset_period, rx_line_reset_period;
+
+ rx_line_reset_period = (RX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC;
+ tx_line_reset_period = (TX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC;
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
+ for_each_ufs_rx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, i),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, i), 0x0);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE2, i),
+ (rx_line_reset_period >> 16) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE1, i),
+ (rx_line_reset_period >> 8) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE0, i),
+ (rx_line_reset_period) & 0xFF);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x2f, i), 0x79);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x84, i), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x25, i), 0xf6);
+ }
+
+ for_each_ufs_tx_lane(ufs, i) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, i),
+ DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
+ /* Not to affect VND_TX_LINERESET_PVALUE to VND_TX_CLK_PRD */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, i),
+ 0x02);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, i),
+ (tx_line_reset_period >> 16) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, i),
+ (tx_line_reset_period >> 8) & 0xFF);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE0, i),
+ (tx_line_reset_period) & 0xFF);
+
+ /* TX PWM Gear Capability / PWM_G1_ONLY */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x04, i), 0x1);
+ }
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(0xa011), 0x8000);
+
+ return 0;
+}
+
+static int exynosauto_ufs_pre_pwr_change(struct exynos_ufs *ufs,
+ struct ufs_pa_layer_attr *pwr)
+{
+ struct ufs_hba *hba = ufs->hba;
+
+ /* PACP_PWR_req and delivered to the remote DME */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000);
+
+ return 0;
+}
+
+static int exynosauto_ufs_post_pwr_change(struct exynos_ufs *ufs,
+ struct ufs_pa_layer_attr *pwr)
+{
+ struct ufs_hba *hba = ufs->hba;
+ u32 enabled_vh;
+
+ enabled_vh = ufshcd_readl(hba, MHCTRL) & MHCTRL_EN_VH_MASK;
+
+ /* Send physical host ready message to virtual hosts */
+ ufshcd_writel(hba, MH_MSG(enabled_vh, MH_MSG_PH_READY), PH2VH_MBOX);
+
+ return 0;
+}
+
static int exynos7_ufs_pre_link(struct exynos_ufs *ufs)
{
struct ufs_hba *hba = ufs->hba;
@@ -793,6 +953,27 @@ static void exynos_ufs_config_intr(struct exynos_ufs *ufs, u32 errs, u8 index)
}
}
+static int exynos_ufs_setup_clocks(struct ufs_hba *hba, bool on,
+ enum ufs_notify_change_status status)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+
+ if (!ufs)
+ return 0;
+
+ if (on && status == PRE_CHANGE) {
+ if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
+ exynos_ufs_disable_auto_ctrl_hcc(ufs);
+ exynos_ufs_ungate_clks(ufs);
+ } else if (!on && status == POST_CHANGE) {
+ exynos_ufs_gate_clks(ufs);
+ if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
+ exynos_ufs_enable_auto_ctrl_hcc(ufs);
+ }
+
+ return 0;
+}
+
static int exynos_ufs_pre_link(struct ufs_hba *hba)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
@@ -808,8 +989,12 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
/* m-phy */
exynos_ufs_phy_init(ufs);
- exynos_ufs_config_phy_time_attr(ufs);
- exynos_ufs_config_phy_cap_attr(ufs);
+ if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) {
+ exynos_ufs_config_phy_time_attr(ufs);
+ exynos_ufs_config_phy_cap_attr(ufs);
+ }
+
+ exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
if (ufs->drv_data->pre_link)
ufs->drv_data->pre_link(ufs);
@@ -893,17 +1078,10 @@ static int exynos_ufs_post_link(struct ufs_hba *hba)
static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs)
{
struct device_node *np = dev->of_node;
- struct exynos_ufs_drv_data *drv_data = &exynos_ufs_drvs;
struct exynos_ufs_uic_attr *attr;
int ret = 0;
- while (drv_data->compatible) {
- if (of_device_is_compatible(np, drv_data->compatible)) {
- ufs->drv_data = drv_data;
- break;
- }
- drv_data++;
- }
+ ufs->drv_data = device_get_match_data(dev);
if (ufs->drv_data && ufs->drv_data->uic_attr) {
attr = ufs->drv_data->uic_attr;
@@ -913,6 +1091,17 @@ static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs)
goto out;
}
+ ufs->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg");
+ if (IS_ERR(ufs->sysreg))
+ ufs->sysreg = NULL;
+ else {
+ if (of_property_read_u32_index(np, "samsung,sysreg", 1,
+ &ufs->shareability_reg_offset)) {
+ dev_warn(dev, "can't get an offset from sysreg. Set to default value\n");
+ ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET;
+ }
+ }
+
ufs->pclk_avail_min = PCLK_AVAIL_MIN;
ufs->pclk_avail_max = PCLK_AVAIL_MAX;
@@ -927,6 +1116,18 @@ out:
return ret;
}
+static inline void exynos_ufs_priv_init(struct ufs_hba *hba,
+ struct exynos_ufs *ufs)
+{
+ ufs->hba = hba;
+ ufs->opts = ufs->drv_data->opts;
+ ufs->rx_sel_idx = PA_MAXDATALANES;
+ if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX)
+ ufs->rx_sel_idx = 0;
+ hba->priv = (void *)ufs;
+ hba->quirks = ufs->drv_data->quirks;
+}
+
static int exynos_ufs_init(struct ufs_hba *hba)
{
struct device *dev = hba->dev;
@@ -976,13 +1177,8 @@ static int exynos_ufs_init(struct ufs_hba *hba)
if (ret)
goto phy_off;
- ufs->hba = hba;
- ufs->opts = ufs->drv_data->opts;
- ufs->rx_sel_idx = PA_MAXDATALANES;
- if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX)
- ufs->rx_sel_idx = 0;
- hba->priv = (void *)ufs;
- hba->quirks = ufs->drv_data->quirks;
+ exynos_ufs_priv_init(hba, ufs);
+
if (ufs->drv_data->drv_init) {
ret = ufs->drv_data->drv_init(dev, ufs);
if (ret) {
@@ -1110,6 +1306,12 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
+ if (ufs->drv_data->pre_hce_enable) {
+ ret = ufs->drv_data->pre_hce_enable(ufs);
+ if (ret)
+ return ret;
+ }
+
ret = exynos_ufs_host_reset(hba);
if (ret)
return ret;
@@ -1119,6 +1321,10 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
exynos_ufs_calc_pwm_clk_div(ufs);
if (!(ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL))
exynos_ufs_enable_auto_ctrl_hcc(ufs);
+
+ if (ufs->drv_data->post_hce_enable)
+ ret = ufs->drv_data->post_hce_enable(ufs);
+
break;
}
@@ -1202,12 +1408,77 @@ static int exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
return 0;
}
+static int exynosauto_ufs_vh_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ if (status == POST_CHANGE) {
+ ufshcd_set_link_active(hba);
+ ufshcd_set_ufs_dev_active(hba);
+ }
+
+ return 0;
+}
+
+static int exynosauto_ufs_vh_wait_ph_ready(struct ufs_hba *hba)
+{
+ u32 mbox;
+ ktime_t start, stop;
+
+ start = ktime_get();
+ stop = ktime_add(start, ms_to_ktime(PH_READY_TIMEOUT_MS));
+
+ do {
+ mbox = ufshcd_readl(hba, PH2VH_MBOX);
+ /* TODO: Mailbox message protocols between the PH and VHs are
+ * not implemented yet. This will be supported later
+ */
+ if ((mbox & MH_MSG_MASK) == MH_MSG_PH_READY)
+ return 0;
+
+ usleep_range(40, 50);
+ } while (ktime_before(ktime_get(), stop));
+
+ return -ETIME;
+}
+
+static int exynosauto_ufs_vh_init(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos_ufs *ufs;
+ int ret;
+
+ ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
+ if (!ufs)
+ return -ENOMEM;
+
+ /* exynos-specific hci */
+ ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci");
+ if (IS_ERR(ufs->reg_hci)) {
+ dev_err(dev, "cannot ioremap for hci vendor register\n");
+ return PTR_ERR(ufs->reg_hci);
+ }
+
+ ret = exynosauto_ufs_vh_wait_ph_ready(hba);
+ if (ret)
+ return ret;
+
+ ufs->drv_data = device_get_match_data(dev);
+ if (!ufs->drv_data)
+ return -ENODEV;
+
+ exynos_ufs_priv_init(hba, ufs);
+
+ return 0;
+}
+
static struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.name = "exynos_ufs",
.init = exynos_ufs_init,
.hce_enable_notify = exynos_ufs_hce_enable_notify,
.link_startup_notify = exynos_ufs_link_startup_notify,
.pwr_change_notify = exynos_ufs_pwr_change_notify,
+ .setup_clocks = exynos_ufs_setup_clocks,
.setup_xfer_req = exynos_ufs_specify_nexus_t_xfer_req,
.setup_task_mgmt = exynos_ufs_specify_nexus_t_tm_req,
.hibern8_notify = exynos_ufs_hibern8_notify,
@@ -1215,12 +1486,24 @@ static struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.resume = exynos_ufs_resume,
};
+static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = {
+ .name = "exynosauto_ufs_vh",
+ .init = exynosauto_ufs_vh_init,
+ .link_startup_notify = exynosauto_ufs_vh_link_startup_notify,
+};
+
static int exynos_ufs_probe(struct platform_device *pdev)
{
int err;
struct device *dev = &pdev->dev;
+ const struct ufs_hba_variant_ops *vops = &ufs_hba_exynos_ops;
+ const struct exynos_ufs_drv_data *drv_data =
+ device_get_match_data(dev);
- err = ufshcd_pltfrm_init(pdev, &ufs_hba_exynos_ops);
+ if (drv_data && drv_data->vops)
+ vops = drv_data->vops;
+
+ err = ufshcd_pltfrm_init(pdev, vops);
if (err)
dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
@@ -1261,8 +1544,35 @@ static struct exynos_ufs_uic_attr exynos7_uic_attr = {
.pa_dbg_option_suite = 0x30103,
};
+static struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
+ .uic_attr = &exynos7_uic_attr,
+ .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
+ UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
+ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
+ .opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
+ EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
+ EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
+ .drv_init = exynosauto_ufs_drv_init,
+ .post_hce_enable = exynosauto_ufs_post_hce_enable,
+ .pre_link = exynosauto_ufs_pre_link,
+ .pre_pwr_change = exynosauto_ufs_pre_pwr_change,
+ .post_pwr_change = exynosauto_ufs_post_pwr_change,
+};
+
+static struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = {
+ .vops = &ufs_hba_exynosauto_vh_ops,
+ .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
+ UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
+ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+ UFSHCI_QUIRK_BROKEN_HCE |
+ UFSHCD_QUIRK_BROKEN_UIC_CMD |
+ UFSHCD_QUIRK_SKIP_PH_CONFIGURATION |
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
+ .opts = EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
+};
+
static struct exynos_ufs_drv_data exynos_ufs_drvs = {
- .compatible = "samsung,exynos7-ufs",
.uic_attr = &exynos7_uic_attr,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
@@ -1287,6 +1597,10 @@ static struct exynos_ufs_drv_data exynos_ufs_drvs = {
static const struct of_device_id exynos_ufs_of_match[] = {
{ .compatible = "samsung,exynos7-ufs",
.data = &exynos_ufs_drvs },
+ { .compatible = "samsung,exynosautov9-ufs",
+ .data = &exynosauto_ufs_drvs },
+ { .compatible = "samsung,exynosautov9-ufs-vh",
+ .data = &exynosauto_ufs_vh_drvs },
{},
};
diff --git a/drivers/scsi/ufs/ufs-exynos.h b/drivers/scsi/ufs/ufs-exynos.h
index dadf4fd10dd8..1c33e5466082 100644
--- a/drivers/scsi/ufs/ufs-exynos.h
+++ b/drivers/scsi/ufs/ufs-exynos.h
@@ -56,6 +56,22 @@
#define TX_GRAN_NVAL_10_08 0x0296
#define TX_GRAN_NVAL_H(v) (((v) >> 8) & 0x3)
+#define VND_TX_CLK_PRD 0xAA
+#define VND_TX_CLK_PRD_EN 0xA9
+#define VND_TX_LINERESET_PVALUE0 0xAD
+#define VND_TX_LINERESET_PVALUE1 0xAC
+#define VND_TX_LINERESET_PVALUE2 0xAB
+
+#define TX_LINE_RESET_TIME 3200
+
+#define VND_RX_CLK_PRD 0x12
+#define VND_RX_CLK_PRD_EN 0x11
+#define VND_RX_LINERESET_VALUE0 0x1D
+#define VND_RX_LINERESET_VALUE1 0x1C
+#define VND_RX_LINERESET_VALUE2 0x1B
+
+#define RX_LINE_RESET_TIME 1000
+
#define RX_FILLER_ENABLE 0x0316
#define RX_FILLER_EN (1 << 1)
#define RX_LINERESET_VAL 0x0317
@@ -99,7 +115,7 @@ struct exynos_ufs;
#define PA_HIBERN8TIME_VAL 0x20
#define PCLK_AVAIL_MIN 70000000
-#define PCLK_AVAIL_MAX 133000000
+#define PCLK_AVAIL_MAX 167000000
struct exynos_ufs_uic_attr {
/* TX Attributes */
@@ -142,7 +158,7 @@ struct exynos_ufs_uic_attr {
};
struct exynos_ufs_drv_data {
- char *compatible;
+ const struct ufs_hba_variant_ops *vops;
struct exynos_ufs_uic_attr *uic_attr;
unsigned int quirks;
unsigned int opts;
@@ -154,6 +170,8 @@ struct exynos_ufs_drv_data {
struct ufs_pa_layer_attr *pwr);
int (*post_pwr_change)(struct exynos_ufs *ufs,
struct ufs_pa_layer_attr *pwr);
+ int (*pre_hce_enable)(struct exynos_ufs *ufs);
+ int (*post_hce_enable)(struct exynos_ufs *ufs);
};
struct ufs_phy_time_cfg {
@@ -191,7 +209,9 @@ struct exynos_ufs {
struct ufs_pa_layer_attr dev_req_params;
struct ufs_phy_time_cfg t_cfg;
ktime_t entry_hibern8_t;
- struct exynos_ufs_drv_data *drv_data;
+ const struct exynos_ufs_drv_data *drv_data;
+ struct regmap *sysreg;
+ u32 shareability_reg_offset;
u32 opts;
#define EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL BIT(0)
@@ -199,6 +219,7 @@ struct exynos_ufs {
#define EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL BIT(2)
#define EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX BIT(3)
#define EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER BIT(4)
+#define EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR BIT(5)
};
#define for_each_ufs_rx_lane(ufs, i) \
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 5c6a58a666d2..afd38142b1c0 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -132,6 +132,14 @@ enum {
UFSHCD_CAN_QUEUE = 32,
};
+static const char *const ufshcd_state_name[] = {
+ [UFSHCD_STATE_RESET] = "reset",
+ [UFSHCD_STATE_OPERATIONAL] = "operational",
+ [UFSHCD_STATE_ERROR] = "error",
+ [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal",
+ [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal",
+};
+
/* UFSHCD error handling flags */
enum {
UFSHCD_EH_IN_PROGRESS = (1 << 0),
@@ -236,7 +244,6 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
-static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
@@ -711,7 +718,7 @@ static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
* This function is used to get the OCS field from UTRD
* Returns the OCS field in the UTRD
*/
-static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
+static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
{
return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
}
@@ -2323,6 +2330,9 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
int ret;
unsigned long flags;
+ if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
+ return 0;
+
ufshcd_hold(hba, false);
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
@@ -2367,17 +2377,24 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
sizeof(struct ufshcd_sg_entry)));
else
lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((u16) (sg_segments));
+ cpu_to_le16(sg_segments);
- prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
+ prd_table = lrbp->ucd_prdt_ptr;
scsi_for_each_sg(cmd, sg, sg_segments, i) {
- prd_table[i].size =
- cpu_to_le32(((u32) sg_dma_len(sg))-1);
- prd_table[i].base_addr =
- cpu_to_le32(lower_32_bits(sg->dma_address));
- prd_table[i].upper_addr =
- cpu_to_le32(upper_32_bits(sg->dma_address));
+ const unsigned int len = sg_dma_len(sg);
+
+ /*
+ * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
+ * based value that indicates the length, in bytes, of
+ * the data block. A maximum of length of 256KB may
+ * exist for any entry. Bits 1:0 of this field shall be
+ * 11b to indicate Dword granularity. A value of '3'
+ * indicates 4 bytes, '7' indicates 8 bytes, etc."
+ */
+ WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
+ prd_table[i].size = cpu_to_le32(len - 1);
+ prd_table[i].addr = cpu_to_le64(sg->dma_address);
prd_table[i].reserved = 0;
}
} else {
@@ -2661,7 +2678,7 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
lrb->ucd_req_dma_addr = cmd_desc_element_addr;
lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
- lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+ lrb->ucd_prdt_ptr = cmd_descp[i].prd_table;
lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
}
@@ -5084,7 +5101,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
int result = 0;
int scsi_status;
- int ocs;
+ enum utp_ocs ocs;
/* overall command status of utrd */
ocs = ufshcd_get_tr_ocs(lrbp);
@@ -5243,11 +5260,9 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
* __ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
* @completed_reqs: bitmask that indicates which requests to complete
- * @retry_requests: whether to ask the SCSI core to retry completed requests
*/
static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
- unsigned long completed_reqs,
- bool retry_requests)
+ unsigned long completed_reqs)
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
@@ -5263,8 +5278,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_update_monitor(hba, lrbp);
ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
- result = retry_requests ? DID_BUS_BUSY << 16 :
- ufshcd_transfer_rsp_status(hba, lrbp);
+ result = ufshcd_transfer_rsp_status(hba, lrbp);
scsi_dma_unmap(cmd);
cmd->result = result;
/* Mark completed command as NULL in LRB */
@@ -5290,14 +5304,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
- * @retry_requests: whether or not to ask to retry requests
*
* Returns
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
-static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba,
- bool retry_requests)
+static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
unsigned long completed_reqs, flags;
u32 tr_doorbell;
@@ -5326,8 +5338,7 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba,
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (completed_reqs) {
- __ufshcd_transfer_req_compl(hba, completed_reqs,
- retry_requests);
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
return IRQ_HANDLED;
} else {
return IRQ_NONE;
@@ -5826,13 +5837,7 @@ out:
/* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba)
{
- ufshcd_transfer_req_compl(hba, /*retry_requests=*/false);
- ufshcd_tmc_handler(hba);
-}
-
-static void ufshcd_retry_aborted_requests(struct ufs_hba *hba)
-{
- ufshcd_transfer_req_compl(hba, /*retry_requests=*/true);
+ ufshcd_transfer_req_compl(hba);
ufshcd_tmc_handler(hba);
}
@@ -5914,9 +5919,10 @@ static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
(hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
}
-/* host lock must be held before calling this func */
-static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
+void ufshcd_schedule_eh_work(struct ufs_hba *hba)
{
+ lockdep_assert_held(hba->host->host_lock);
+
/* handle fatal errors only when link is not in error state */
if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
if (hba->force_reset || ufshcd_is_link_broken(hba) ||
@@ -6076,6 +6082,13 @@ static void ufshcd_err_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eh_work);
+ dev_info(hba->dev,
+ "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
+ __func__, ufshcd_state_name[hba->ufshcd_state],
+ hba->is_powered, hba->shutting_down, hba->saved_err,
+ hba->saved_uic_err, hba->force_reset,
+ ufshcd_is_link_broken(hba) ? "; link is broken" : "");
+
down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
if (ufshcd_err_handling_should_stop(hba)) {
@@ -6170,6 +6183,8 @@ again:
err_xfer = true;
goto lock_skip_pending_xfer_clear;
}
+ dev_err(hba->dev, "Aborted tag %d / CDB %#02x\n", tag,
+ hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1);
}
/* Clear pending task management requests */
@@ -6181,7 +6196,8 @@ again:
}
lock_skip_pending_xfer_clear:
- ufshcd_retry_aborted_requests(hba);
+ /* Complete the requests that are cleared by s/w */
+ ufshcd_complete_requests(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
hba->silence_err_logs = false;
@@ -6249,6 +6265,9 @@ skip_err_handling:
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_err_handling_unprepare(hba);
up(&hba->host_sem);
+
+ dev_info(hba->dev, "%s finished; HBA state %s\n", __func__,
+ ufshcd_state_name[hba->ufshcd_state]);
}
/**
@@ -6473,7 +6492,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
- retval |= ufshcd_transfer_req_compl(hba, /*retry_requests=*/false);
+ retval |= ufshcd_transfer_req_compl(hba);
return retval;
}
@@ -6545,6 +6564,10 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
err = ufshcd_wait_for_register(hba,
REG_UTP_TASK_REQ_DOOR_BELL,
mask, 0, 1000, 1000);
+
+ dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
+ tag, err ? "succeeded" : "failed");
+
out:
return err;
}
@@ -6637,7 +6660,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
u8 tm_function, u8 *tm_response)
{
struct utp_task_req_desc treq = { { 0 }, };
- int ocs_value, err;
+ enum utp_ocs ocs_value;
+ int err;
/* Configure task request descriptor */
treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
@@ -6815,7 +6839,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
int err;
enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
struct utp_task_req_desc treq = { { 0 }, };
- int ocs_value;
+ enum utp_ocs ocs_value;
u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
switch (msgcode) {
@@ -6893,7 +6917,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
- __ufshcd_transfer_req_compl(hba, 1U << pos, false);
+ __ufshcd_transfer_req_compl(hba, 1U << pos);
}
}
@@ -7055,7 +7079,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
dev_err(hba->dev,
"%s: cmd was completed, but without a notifying intr, tag = %d",
__func__, tag);
- __ufshcd_transfer_req_compl(hba, 1UL << tag, /*retry_requests=*/false);
+ __ufshcd_transfer_req_compl(hba, 1UL << tag);
goto release;
}
@@ -7121,7 +7145,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
ufshpb_reset_host(hba);
ufshcd_hba_stop(hba);
hba->silence_err_logs = true;
- ufshcd_retry_aborted_requests(hba);
+ ufshcd_complete_requests(hba);
hba->silence_err_logs = false;
/* scale up clocks to max frequency before full reinitialization */
@@ -8002,6 +8026,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
if (ret)
goto out;
+ if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
+ goto out;
+
/* Debug counters initialization */
ufshcd_clear_dbg_ufs_stats(hba);
@@ -9772,6 +9799,11 @@ static int __init ufshcd_core_init(void)
{
int ret;
+ /* Verify that there are no gaps in struct utp_transfer_cmd_desc. */
+ static_assert(sizeof(struct utp_transfer_cmd_desc) ==
+ 2 * ALIGNED_UPIU_SIZE +
+ SG_ALL * sizeof(struct ufshcd_sg_entry));
+
ufs_debugfs_init();
ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index dd765863b05f..54750d72c8fb 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -589,6 +589,18 @@ enum ufshcd_quirks {
* This quirk allows only sg entries aligned with page size.
*/
UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 14,
+
+ /*
+ * This quirk needs to be enabled if the host controller does not
+ * support UIC command
+ */
+ UFSHCD_QUIRK_BROKEN_UIC_CMD = 1 << 15,
+
+ /*
+ * This quirk needs to be enabled if the host controller cannot
+ * support physical host configuration.
+ */
+ UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16,
};
enum ufshcd_caps {
@@ -1023,6 +1035,7 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
void ufshcd_hba_stop(struct ufs_hba *hba);
+void ufshcd_schedule_eh_work(struct ufs_hba *hba);
static inline void check_upiu_size(void)
{
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index de95be5d11d4..6a295c88d850 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -389,7 +389,7 @@ enum {
};
/* Overall command status values */
-enum {
+enum utp_ocs {
OCS_SUCCESS = 0x0,
OCS_INVALID_CMD_TABLE_ATTR = 0x1,
OCS_INVALID_PRDT_ATTR = 0x2,
@@ -402,6 +402,9 @@ enum {
OCS_INVALID_CRYPTO_CONFIG = 0x9,
OCS_GENERAL_CRYPTO_ERROR = 0xA,
OCS_INVALID_COMMAND_STATUS = 0x0F,
+};
+
+enum {
MASK_OCS = 0x0F,
};
@@ -412,20 +415,18 @@ enum {
/**
* struct ufshcd_sg_entry - UFSHCI PRD Entry
- * @base_addr: Lower 32bit physical address DW-0
- * @upper_addr: Upper 32bit physical address DW-1
+ * @addr: Physical address; DW-0 and DW-1.
* @reserved: Reserved for future use DW-2
* @size: size of physical segment DW-3
*/
struct ufshcd_sg_entry {
- __le32 base_addr;
- __le32 upper_addr;
+ __le64 addr;
__le32 reserved;
__le32 size;
};
/**
- * struct utp_transfer_cmd_desc - UFS Command Descriptor structure
+ * struct utp_transfer_cmd_desc - UTP Command Descriptor (UCD)
* @command_upiu: Command UPIU Frame address
* @response_upiu: Response UPIU Frame address
* @prd_table: Physical Region Descriptor
@@ -451,7 +452,7 @@ struct request_desc_header {
};
/**
- * struct utp_transfer_req_desc - UTRD structure
+ * struct utp_transfer_req_desc - UTP Transfer Request Descriptor (UTRD)
* @header: UTRD header DW-0 to DW-3
* @command_desc_base_addr_lo: UCD base address low DW-4
* @command_desc_base_addr_hi: UCD base address high DW-5
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index f4eca441b433..2e31e1413826 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -394,8 +394,6 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
if (!ufshpb_is_supported_chunk(hpb, transfer_len))
return 0;
- WARN_ON_ONCE(transfer_len > HPB_MULTI_CHUNK_HIGH);
-
if (hpb->is_hcm) {
/*
* in host control mode, reads are the main source for
@@ -1572,7 +1570,7 @@ static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
if (ufshpb_is_legacy(hba))
hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
else
- hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
+ hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd;
hpb->lu_pinned_start = hpb_lu_info->pinned_start;
hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
@@ -2582,7 +2580,7 @@ void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
{
struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
int version, ret;
- u32 max_hpb_single_cmd = HPB_MULTI_CHUNK_LOW;
+ int max_single_cmd;
hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
@@ -2598,18 +2596,22 @@ void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
if (version == HPB_SUPPORT_LEGACY_VERSION)
hpb_dev_info->is_legacy = true;
- ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd);
- if (ret)
- dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed",
- __func__);
- hpb_dev_info->max_hpb_single_cmd = max_hpb_single_cmd;
-
/*
* Get the number of user logical unit to check whether all
* scsi_device finish initialization
*/
hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
+
+ if (hpb_dev_info->is_legacy)
+ return;
+
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd);
+
+ if (ret)
+ hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH;
+ else
+ hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH);
}
void ufshpb_init(struct ufs_hba *hba)
diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h
index f15d8fdbce2e..b475dbd78988 100644
--- a/drivers/scsi/ufs/ufshpb.h
+++ b/drivers/scsi/ufs/ufshpb.h
@@ -31,7 +31,6 @@
/* hpb support chunk size */
#define HPB_LEGACY_CHUNK_HIGH 1
-#define HPB_MULTI_CHUNK_LOW 7
#define HPB_MULTI_CHUNK_HIGH 255
/* hpb vender defined opcode */