summaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/core/Makefile1
-rw-r--r--drivers/mmc/core/block.c346
-rw-r--r--drivers/mmc/core/block.h1
-rw-r--r--drivers/mmc/core/bus.c7
-rw-r--r--drivers/mmc/core/core.c262
-rw-r--r--drivers/mmc/core/core.h16
-rw-r--r--drivers/mmc/core/host.c20
-rw-r--r--drivers/mmc/core/host.h7
-rw-r--r--drivers/mmc/core/mmc.c46
-rw-r--r--drivers/mmc/core/mmc_ops.c6
-rw-r--r--drivers/mmc/core/queue.c41
-rw-r--r--drivers/mmc/core/queue.h5
-rw-r--r--drivers/mmc/core/quirks.h1
-rw-r--r--drivers/mmc/core/sd.c51
-rw-r--r--drivers/mmc/core/sd.h1
-rw-r--r--drivers/mmc/core/sdio_irq.c3
-rw-r--r--drivers/mmc/host/Kconfig28
-rw-r--r--drivers/mmc/host/Makefile3
-rw-r--r--drivers/mmc/host/atmel-mci.c13
-rw-r--r--drivers/mmc/host/cavium.c2
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c2
-rw-r--r--drivers/mmc/host/dw_mmc-zx.h1
-rw-r--r--drivers/mmc/host/dw_mmc.c191
-rw-r--r--drivers/mmc/host/dw_mmc.h3
-rw-r--r--drivers/mmc/host/jz4740_mmc.c7
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c2
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c768
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c285
-rw-r--r--drivers/mmc/host/mvsdio.c6
-rw-r--r--drivers/mmc/host/mxcmmc.c11
-rw-r--r--drivers/mmc/host/omap.c20
-rw-r--r--drivers/mmc/host/omap_hsmmc.c35
-rw-r--r--drivers/mmc/host/pxamci.h1
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c18
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c5
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c38
-rw-r--r--drivers/mmc/host/sdhci-acpi.c174
-rw-r--r--drivers/mmc/host/sdhci-cadence.c28
-rw-r--r--drivers/mmc/host/sdhci-msm.c326
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c3
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c58
-rw-r--r--drivers/mmc/host/sdhci-omap.c607
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c13
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c35
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.h73
-rw-r--r--drivers/mmc/host/sdhci-pci.h14
-rw-r--r--drivers/mmc/host/sdhci-s3c.c18
-rw-r--r--drivers/mmc/host/sdhci-tegra.c10
-rw-r--r--drivers/mmc/host/sdhci.c15
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c14
-rw-r--r--drivers/mmc/host/sunxi-mmc.c5
-rw-r--r--drivers/mmc/host/tifm_sd.c6
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c49
-rw-r--r--drivers/mmc/host/usdhi6rol0.c2
-rw-r--r--drivers/mmc/host/via-sdmmc.c8
-rw-r--r--drivers/mmc/host/vub300.c41
-rw-r--r--drivers/mmc/host/wbsd.c8
58 files changed, 3180 insertions, 582 deletions
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 7e3ed1aeada2..abba078f7f49 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel mmc core.
#
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 2ad7b5c69156..ea80ff4cd7f9 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -28,6 +28,7 @@
#include <linux/hdreg.h>
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
+#include <linux/cdev.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
@@ -86,6 +87,7 @@ static int max_devices;
#define MAX_DEVICES 256
static DEFINE_IDA(mmc_blk_ida);
+static DEFINE_IDA(mmc_rpmb_ida);
/*
* There is one mmc_blk_data per slot.
@@ -96,6 +98,7 @@ struct mmc_blk_data {
struct gendisk *disk;
struct mmc_queue queue;
struct list_head part;
+ struct list_head rpmbs;
unsigned int flags;
#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
@@ -121,6 +124,32 @@ struct mmc_blk_data {
int area_type;
};
+/* Device type for RPMB character devices */
+static dev_t mmc_rpmb_devt;
+
+/* Bus type for RPMB character devices */
+static struct bus_type mmc_rpmb_bus_type = {
+ .name = "mmc_rpmb",
+};
+
+/**
+ * struct mmc_rpmb_data - special RPMB device type for these areas
+ * @dev: the device for the RPMB area
+ * @chrdev: character device for the RPMB area
+ * @id: unique device ID number
+ * @part_index: partition index (0 on first)
+ * @md: parent MMC block device
+ * @node: list item, so we can put this device on a list
+ */
+struct mmc_rpmb_data {
+ struct device dev;
+ struct cdev chrdev;
+ int id;
+ unsigned int part_index;
+ struct mmc_blk_data *md;
+ struct list_head node;
+};
+
static DEFINE_MUTEX(open_lock);
module_param(perdev_minors, int, 0444);
@@ -299,6 +328,7 @@ struct mmc_blk_ioc_data {
struct mmc_ioc_cmd ic;
unsigned char *buf;
u64 buf_bytes;
+ struct mmc_rpmb_data *rpmb;
};
static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
@@ -437,14 +467,25 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_request mrq = {};
struct scatterlist sg;
int err;
- bool is_rpmb = false;
+ unsigned int target_part;
u32 status = 0;
if (!card || !md || !idata)
return -EINVAL;
- if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
- is_rpmb = true;
+ /*
+ * The RPMB accesses comes in from the character device, so we
+ * need to target these explicitly. Else we just target the
+ * partition type for the block device the ioctl() was issued
+ * on.
+ */
+ if (idata->rpmb) {
+ /* Support multiple RPMB partitions */
+ target_part = idata->rpmb->part_index;
+ target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB;
+ } else {
+ target_part = md->part_type;
+ }
cmd.opcode = idata->ic.opcode;
cmd.arg = idata->ic.arg;
@@ -488,7 +529,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
mrq.cmd = &cmd;
- err = mmc_blk_part_switch(card, md->part_type);
+ err = mmc_blk_part_switch(card, target_part);
if (err)
return err;
@@ -498,7 +539,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
return err;
}
- if (is_rpmb) {
+ if (idata->rpmb) {
err = mmc_set_blockcount(card, data.blocks,
idata->ic.write_flag & (1 << 31));
if (err)
@@ -538,7 +579,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
- if (is_rpmb) {
+ if (idata->rpmb) {
/*
* Ensure RPMB command has completed by polling CMD13
* "Send Status".
@@ -554,7 +595,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
}
static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
- struct mmc_ioc_cmd __user *ic_ptr)
+ struct mmc_ioc_cmd __user *ic_ptr,
+ struct mmc_rpmb_data *rpmb)
{
struct mmc_blk_ioc_data *idata;
struct mmc_blk_ioc_data *idatas[1];
@@ -566,6 +608,8 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
if (IS_ERR(idata))
return PTR_ERR(idata);
+ /* This will be NULL on non-RPMB ioctl():s */
+ idata->rpmb = rpmb;
card = md->queue.card;
if (IS_ERR(card)) {
@@ -581,7 +625,8 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
__GFP_RECLAIM);
idatas[0] = idata;
- req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op =
+ rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idatas;
req_to_mmc_queue_req(req)->ioc_count = 1;
blk_execute_rq(mq->queue, NULL, req, 0);
@@ -596,7 +641,8 @@ cmd_done:
}
static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
- struct mmc_ioc_multi_cmd __user *user)
+ struct mmc_ioc_multi_cmd __user *user,
+ struct mmc_rpmb_data *rpmb)
{
struct mmc_blk_ioc_data **idata = NULL;
struct mmc_ioc_cmd __user *cmds = user->cmds;
@@ -627,6 +673,8 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
num_of_cmds = i;
goto cmd_err;
}
+ /* This will be NULL on non-RPMB ioctl():s */
+ idata[i]->rpmb = rpmb;
}
card = md->queue.card;
@@ -643,7 +691,8 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
req = blk_get_request(mq->queue,
idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
__GFP_RECLAIM);
- req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op =
+ rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idata;
req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
blk_execute_rq(mq->queue, NULL, req, 0);
@@ -691,7 +740,8 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
if (!md)
return -EINVAL;
ret = mmc_blk_ioctl_cmd(md,
- (struct mmc_ioc_cmd __user *)arg);
+ (struct mmc_ioc_cmd __user *)arg,
+ NULL);
mmc_blk_put(md);
return ret;
case MMC_IOC_MULTI_CMD:
@@ -702,7 +752,8 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
if (!md)
return -EINVAL;
ret = mmc_blk_ioctl_multi_cmd(md,
- (struct mmc_ioc_multi_cmd __user *)arg);
+ (struct mmc_ioc_multi_cmd __user *)arg,
+ NULL);
mmc_blk_put(md);
return ret;
default:
@@ -1152,18 +1203,6 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
md->reset_done &= ~type;
}
-int mmc_access_rpmb(struct mmc_queue *mq)
-{
- struct mmc_blk_data *md = mq->blkdata;
- /*
- * If this is a RPMB partition access, return ture
- */
- if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
- return true;
-
- return false;
-}
-
/*
* The non-block commands come back from the block layer after it queued it and
* processed it with all other requests and then they get issued in this
@@ -1174,17 +1213,19 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
struct mmc_queue_req *mq_rq;
struct mmc_card *card = mq->card;
struct mmc_blk_data *md = mq->blkdata;
- struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
struct mmc_blk_ioc_data **idata;
+ bool rpmb_ioctl;
u8 **ext_csd;
u32 status;
int ret;
int i;
mq_rq = req_to_mmc_queue_req(req);
+ rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB);
switch (mq_rq->drv_op) {
case MMC_DRV_OP_IOCTL:
+ case MMC_DRV_OP_IOCTL_RPMB:
idata = mq_rq->drv_op_data;
for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
@@ -1192,8 +1233,8 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
break;
}
/* Always switch back to main area after RPMB access */
- if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
- mmc_blk_part_switch(card, main_md->part_type);
+ if (rpmb_ioctl)
+ mmc_blk_part_switch(card, 0);
break;
case MMC_DRV_OP_BOOT_WP:
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
@@ -1534,25 +1575,27 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
}
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
- int disable_multi, bool *do_rel_wr,
- bool *do_data_tag)
+ int disable_multi, bool *do_rel_wr_p,
+ bool *do_data_tag_p)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request *brq = &mqrq->brq;
struct request *req = mmc_queue_req_to_req(mqrq);
+ bool do_rel_wr, do_data_tag;
/*
* Reliable writes are used to implement Forced Unit Access and
* are supported only on MMCs.
*/
- *do_rel_wr = (req->cmd_flags & REQ_FUA) &&
- rq_data_dir(req) == WRITE &&
- (md->flags & MMC_BLK_REL_WR);
+ do_rel_wr = (req->cmd_flags & REQ_FUA) &&
+ rq_data_dir(req) == WRITE &&
+ (md->flags & MMC_BLK_REL_WR);
memset(brq, 0, sizeof(struct mmc_blk_request));
brq->mrq.data = &brq->data;
+ brq->mrq.tag = req->tag;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
@@ -1567,6 +1610,14 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blksz = 512;
brq->data.blocks = blk_rq_sectors(req);
+ brq->data.blk_addr = blk_rq_pos(req);
+
+ /*
+ * The command queue supports 2 priorities: "high" (1) and "simple" (0).
+ * The eMMC will give "high" priority tasks priority over "simple"
+ * priority tasks. Here we always set "simple" priority by not setting
+ * MMC_DATA_PRIO.
+ */
/*
* The block layer doesn't support all sector count
@@ -1596,18 +1647,23 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blocks);
}
- if (*do_rel_wr)
+ if (do_rel_wr) {
mmc_apply_rel_rw(brq, card, req);
+ brq->data.flags |= MMC_DATA_REL_WR;
+ }
/*
* Data tag is used only during writing meta data to speed
* up write and any subsequent read of this meta data
*/
- *do_data_tag = card->ext_csd.data_tag_unit_size &&
- (req->cmd_flags & REQ_META) &&
- (rq_data_dir(req) == WRITE) &&
- ((brq->data.blocks * brq->data.blksz) >=
- card->ext_csd.data_tag_unit_size);
+ do_data_tag = card->ext_csd.data_tag_unit_size &&
+ (req->cmd_flags & REQ_META) &&
+ (rq_data_dir(req) == WRITE) &&
+ ((brq->data.blocks * brq->data.blksz) >=
+ card->ext_csd.data_tag_unit_size);
+
+ if (do_data_tag)
+ brq->data.flags |= MMC_DATA_DAT_TAG;
mmc_set_data_timeout(&brq->data, card);
@@ -1634,6 +1690,12 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
}
mqrq->areq.mrq = &brq->mrq;
+
+ if (do_rel_wr_p)
+ *do_rel_wr_p = do_rel_wr;
+
+ if (do_data_tag_p)
+ *do_data_tag_p = do_data_tag;
}
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
@@ -1948,7 +2010,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
if (req && !mq->qcnt)
/* claim host only for the first request */
- mmc_get_card(card);
+ mmc_get_card(card, NULL);
ret = mmc_blk_part_switch(card, md->part_type);
if (ret) {
@@ -2011,7 +2073,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
out:
if (!mq->qcnt)
- mmc_put_card(card);
+ mmc_put_card(card, NULL);
}
static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -2068,6 +2130,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
spin_lock_init(&md->lock);
INIT_LIST_HEAD(&md->part);
+ INIT_LIST_HEAD(&md->rpmbs);
md->usage = 1;
ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
@@ -2186,6 +2249,158 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
return 0;
}
+/**
+ * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev
+ * @filp: the character device file
+ * @cmd: the ioctl() command
+ * @arg: the argument from userspace
+ *
+ * This will essentially just redirect the ioctl()s coming in over to
+ * the main block device spawning the RPMB character device.
+ */
+static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mmc_rpmb_data *rpmb = filp->private_data;
+ int ret;
+
+ switch (cmd) {
+ case MMC_IOC_CMD:
+ ret = mmc_blk_ioctl_cmd(rpmb->md,
+ (struct mmc_ioc_cmd __user *)arg,
+ rpmb);
+ break;
+ case MMC_IOC_MULTI_CMD:
+ ret = mmc_blk_ioctl_multi_cmd(rpmb->md,
+ (struct mmc_ioc_multi_cmd __user *)arg,
+ rpmb);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp)
+{
+ struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
+ struct mmc_rpmb_data, chrdev);
+
+ get_device(&rpmb->dev);
+ filp->private_data = rpmb;
+ mmc_blk_get(rpmb->md->disk);
+
+ return nonseekable_open(inode, filp);
+}
+
+static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
+{
+ struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
+ struct mmc_rpmb_data, chrdev);
+
+ put_device(&rpmb->dev);
+ mmc_blk_put(rpmb->md);
+
+ return 0;
+}
+
+static const struct file_operations mmc_rpmb_fileops = {
+ .release = mmc_rpmb_chrdev_release,
+ .open = mmc_rpmb_chrdev_open,
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = mmc_rpmb_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mmc_rpmb_ioctl_compat,
+#endif
+};
+
+static void mmc_blk_rpmb_device_release(struct device *dev)
+{
+ struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
+
+ ida_simple_remove(&mmc_rpmb_ida, rpmb->id);
+ kfree(rpmb);
+}
+
+static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
+ struct mmc_blk_data *md,
+ unsigned int part_index,
+ sector_t size,
+ const char *subname)
+{
+ int devidx, ret;
+ char rpmb_name[DISK_NAME_LEN];
+ char cap_str[10];
+ struct mmc_rpmb_data *rpmb;
+
+ /* This creates the minor number for the RPMB char device */
+ devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL);
+ if (devidx < 0)
+ return devidx;
+
+ rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL);
+ if (!rpmb) {
+ ida_simple_remove(&mmc_rpmb_ida, devidx);
+ return -ENOMEM;
+ }
+
+ snprintf(rpmb_name, sizeof(rpmb_name),
+ "mmcblk%u%s", card->host->index, subname ? subname : "");
+
+ rpmb->id = devidx;
+ rpmb->part_index = part_index;
+ rpmb->dev.init_name = rpmb_name;
+ rpmb->dev.bus = &mmc_rpmb_bus_type;
+ rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id);
+ rpmb->dev.parent = &card->dev;
+ rpmb->dev.release = mmc_blk_rpmb_device_release;
+ device_initialize(&rpmb->dev);
+ dev_set_drvdata(&rpmb->dev, rpmb);
+ rpmb->md = md;
+
+ cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops);
+ rpmb->chrdev.owner = THIS_MODULE;
+ ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev);
+ if (ret) {
+ pr_err("%s: could not add character device\n", rpmb_name);
+ goto out_put_device;
+ }
+
+ list_add(&rpmb->node, &md->rpmbs);
+
+ string_get_size((u64)size, 512, STRING_UNITS_2,
+ cap_str, sizeof(cap_str));
+
+ pr_info("%s: %s %s partition %u %s, chardev (%d:%d)\n",
+ rpmb_name, mmc_card_id(card),
+ mmc_card_name(card), EXT_CSD_PART_CONFIG_ACC_RPMB, cap_str,
+ MAJOR(mmc_rpmb_devt), rpmb->id);
+
+ return 0;
+
+out_put_device:
+ put_device(&rpmb->dev);
+ return ret;
+}
+
+static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb)
+
+{
+ cdev_device_del(&rpmb->chrdev, &rpmb->dev);
+ put_device(&rpmb->dev);
+}
+
/* MMC Physical partitions consist of two boot partitions and
* up to four general purpose partitions.
* For each partition enabled in EXT_CSD a block device will be allocatedi
@@ -2194,13 +2409,26 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
{
- int idx, ret = 0;
+ int idx, ret;
if (!mmc_card_mmc(card))
return 0;
for (idx = 0; idx < card->nr_parts; idx++) {
- if (card->part[idx].size) {
+ if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) {
+ /*
+ * RPMB partitions does not provide block access, they
+ * are only accessed using ioctl():s. Thus create
+ * special RPMB block devices that do not have a
+ * backing block queue for these.
+ */
+ ret = mmc_blk_alloc_rpmb_part(card, md,
+ card->part[idx].part_cfg,
+ card->part[idx].size >> 9,
+ card->part[idx].name);
+ if (ret)
+ return ret;
+ } else if (card->part[idx].size) {
ret = mmc_blk_alloc_part(card, md,
card->part[idx].part_cfg,
card->part[idx].size >> 9,
@@ -2212,7 +2440,7 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
}
}
- return ret;
+ return 0;
}
static void mmc_blk_remove_req(struct mmc_blk_data *md)
@@ -2249,7 +2477,15 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
{
struct list_head *pos, *q;
struct mmc_blk_data *part_md;
+ struct mmc_rpmb_data *rpmb;
+ /* Remove RPMB partitions */
+ list_for_each_safe(pos, q, &md->rpmbs) {
+ rpmb = list_entry(pos, struct mmc_rpmb_data, node);
+ list_del(pos);
+ mmc_blk_remove_rpmb_part(rpmb);
+ }
+ /* Remove block partitions */
list_for_each_safe(pos, q, &md->part) {
part_md = list_entry(pos, struct mmc_blk_data, part);
list_del(pos);
@@ -2568,6 +2804,17 @@ static int __init mmc_blk_init(void)
{
int res;
+ res = bus_register(&mmc_rpmb_bus_type);
+ if (res < 0) {
+ pr_err("mmcblk: could not register RPMB bus type\n");
+ return res;
+ }
+ res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb");
+ if (res < 0) {
+ pr_err("mmcblk: failed to allocate rpmb chrdev region\n");
+ goto out_bus_unreg;
+ }
+
if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
pr_info("mmcblk: using %d minors per device\n", perdev_minors);
@@ -2575,16 +2822,20 @@ static int __init mmc_blk_init(void)
res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
if (res)
- goto out;
+ goto out_chrdev_unreg;
res = mmc_register_driver(&mmc_driver);
if (res)
- goto out2;
+ goto out_blkdev_unreg;
return 0;
- out2:
+
+out_blkdev_unreg:
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
- out:
+out_chrdev_unreg:
+ unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
+out_bus_unreg:
+ bus_unregister(&mmc_rpmb_bus_type);
return res;
}
@@ -2592,6 +2843,7 @@ static void __exit mmc_blk_exit(void)
{
mmc_unregister_driver(&mmc_driver);
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
+ unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
}
module_init(mmc_blk_init);
diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h
index 860ca7c8df86..5946636101ef 100644
--- a/drivers/mmc/core/block.h
+++ b/drivers/mmc/core/block.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MMC_CORE_BLOCK_H
#define _MMC_CORE_BLOCK_H
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 301246513a37..a4b49e25fe96 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -369,10 +369,17 @@ int mmc_add_card(struct mmc_card *card)
*/
void mmc_remove_card(struct mmc_card *card)
{
+ struct mmc_host *host = card->host;
+
#ifdef CONFIG_DEBUG_FS
mmc_remove_card_debugfs(card);
#endif
+ if (host->cqe_enabled) {
+ host->cqe_ops->cqe_disable(host);
+ host->cqe_enabled = false;
+ }
+
if (mmc_card_present(card)) {
if (mmc_host_is_spi(card->host)) {
pr_info("%s: SPI card removed\n",
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 66c9cf49ad2f..1f0f44f4dd5f 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -266,7 +266,8 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
host->ops->request(host, mrq);
}
-static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
+static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
+ bool cqe)
{
if (mrq->sbc) {
pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
@@ -275,9 +276,12 @@ static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
}
if (mrq->cmd) {
- pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
- mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg,
- mrq->cmd->flags);
+ pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
+ mmc_hostname(host), cqe ? "CQE direct " : "",
+ mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
+ } else if (cqe) {
+ pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
+ mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
}
if (mrq->data) {
@@ -333,7 +337,7 @@ static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
return 0;
}
-static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
+int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
@@ -342,7 +346,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
if (mmc_card_removed(host->card))
return -ENOMEDIUM;
- mmc_mrq_pr_debug(host, mrq);
+ mmc_mrq_pr_debug(host, mrq, false);
WARN_ON(!host->claimed);
@@ -355,6 +359,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
return 0;
}
+EXPORT_SYMBOL(mmc_start_request);
/*
* mmc_wait_data_done() - done callback for data request
@@ -482,6 +487,155 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
}
EXPORT_SYMBOL(mmc_wait_for_req_done);
+/*
+ * mmc_cqe_start_req - Start a CQE request.
+ * @host: MMC host to start the request
+ * @mrq: request to start
+ *
+ * Start the request, re-tuning if needed and it is possible. Returns an error
+ * code if the request fails to start or -EBUSY if CQE is busy.
+ */
+int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int err;
+
+ /*
+ * CQE cannot process re-tuning commands. Caller must hold retuning
+ * while CQE is in use. Re-tuning can happen here only when CQE has no
+ * active requests i.e. this is the first. Note, re-tuning will call
+ * ->cqe_off().
+ */
+ err = mmc_retune(host);
+ if (err)
+ goto out_err;
+
+ mrq->host = host;
+
+ mmc_mrq_pr_debug(host, mrq, true);
+
+ err = mmc_mrq_prep(host, mrq);
+ if (err)
+ goto out_err;
+
+ err = host->cqe_ops->cqe_request(host, mrq);
+ if (err)
+ goto out_err;
+
+ trace_mmc_request_start(host, mrq);
+
+ return 0;
+
+out_err:
+ if (mrq->cmd) {
+ pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
+ mmc_hostname(host), mrq->cmd->opcode, err);
+ } else {
+ pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
+ mmc_hostname(host), mrq->tag, err);
+ }
+ return err;
+}
+EXPORT_SYMBOL(mmc_cqe_start_req);
+
+/**
+ * mmc_cqe_request_done - CQE has finished processing an MMC request
+ * @host: MMC host which completed request
+ * @mrq: MMC request which completed
+ *
+ * CQE drivers should call this function when they have completed
+ * their processing of a request.
+ */
+void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
+{
+ mmc_should_fail_request(host, mrq);
+
+ /* Flag re-tuning needed on CRC errors */
+ if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
+ (mrq->data && mrq->data->error == -EILSEQ))
+ mmc_retune_needed(host);
+
+ trace_mmc_request_done(host, mrq);
+
+ if (mrq->cmd) {
+ pr_debug("%s: CQE req done (direct CMD%u): %d\n",
+ mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
+ } else {
+ pr_debug("%s: CQE transfer done tag %d\n",
+ mmc_hostname(host), mrq->tag);
+ }
+
+ if (mrq->data) {
+ pr_debug("%s: %d bytes transferred: %d\n",
+ mmc_hostname(host),
+ mrq->data->bytes_xfered, mrq->data->error);
+ }
+
+ mrq->done(mrq);
+}
+EXPORT_SYMBOL(mmc_cqe_request_done);
+
+/**
+ * mmc_cqe_post_req - CQE post process of a completed MMC request
+ * @host: MMC host
+ * @mrq: MMC request to be processed
+ */
+void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ if (host->cqe_ops->cqe_post_req)
+ host->cqe_ops->cqe_post_req(host, mrq);
+}
+EXPORT_SYMBOL(mmc_cqe_post_req);
+
+/* Arbitrary 1 second timeout */
+#define MMC_CQE_RECOVERY_TIMEOUT 1000
+
+/*
+ * mmc_cqe_recovery - Recover from CQE errors.
+ * @host: MMC host to recover
+ *
+ * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in
+ * in eMMC, and discarding the queue in CQE. CQE must call
+ * mmc_cqe_request_done() on all requests. An error is returned if the eMMC
+ * fails to discard its queue.
+ */
+int mmc_cqe_recovery(struct mmc_host *host)
+{
+ struct mmc_command cmd;
+ int err;
+
+ mmc_retune_hold_now(host);
+
+ /*
+ * Recovery is expected seldom, if at all, but it reduces performance,
+ * so make sure it is not completely silent.
+ */
+ pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
+
+ host->cqe_ops->cqe_recovery_start(host);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = MMC_STOP_TRANSMISSION,
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC,
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
+ mmc_wait_for_cmd(host, &cmd, 0);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = MMC_CMDQ_TASK_MGMT;
+ cmd.arg = 1; /* Discard entire queue */
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+
+ host->cqe_ops->cqe_recovery_finish(host);
+
+ mmc_retune_release(host);
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_cqe_recovery);
+
/**
* mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
* @host: MMC host
@@ -832,9 +986,36 @@ unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
}
EXPORT_SYMBOL(mmc_align_data_size);
+/*
+ * Allow claiming an already claimed host if the context is the same or there is
+ * no context but the task is the same.
+ */
+static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
+ struct task_struct *task)
+{
+ return host->claimer == ctx ||
+ (!ctx && task && host->claimer->task == task);
+}
+
+static inline void mmc_ctx_set_claimer(struct mmc_host *host,
+ struct mmc_ctx *ctx,
+ struct task_struct *task)
+{
+ if (!host->claimer) {
+ if (ctx)
+ host->claimer = ctx;
+ else
+ host->claimer = &host->default_ctx;
+ }
+ if (task)
+ host->claimer->task = task;
+}
+
/**
* __mmc_claim_host - exclusively claim a host
* @host: mmc host to claim
+ * @ctx: context that claims the host or NULL in which case the default
+ * context will be used
* @abort: whether or not the operation should be aborted
*
* Claim a host for a set of operations. If @abort is non null and
@@ -842,8 +1023,10 @@ EXPORT_SYMBOL(mmc_align_data_size);
* that non-zero value without acquiring the lock. Returns zero
* with the lock held otherwise.
*/
-int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
+int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
+ atomic_t *abort)
{
+ struct task_struct *task = ctx ? NULL : current;
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
int stop;
@@ -856,7 +1039,7 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
stop = abort ? atomic_read(abort) : 0;
- if (stop || !host->claimed || host->claimer == current)
+ if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
break;
spin_unlock_irqrestore(&host->lock, flags);
schedule();
@@ -865,7 +1048,7 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
set_current_state(TASK_RUNNING);
if (!stop) {
host->claimed = 1;
- host->claimer = current;
+ mmc_ctx_set_claimer(host, ctx, task);
host->claim_cnt += 1;
if (host->claim_cnt == 1)
pm = true;
@@ -900,6 +1083,7 @@ void mmc_release_host(struct mmc_host *host)
spin_unlock_irqrestore(&host->lock, flags);
} else {
host->claimed = 0;
+ host->claimer->task = NULL;
host->claimer = NULL;
spin_unlock_irqrestore(&host->lock, flags);
wake_up(&host->wq);
@@ -913,10 +1097,10 @@ EXPORT_SYMBOL(mmc_release_host);
* This is a helper function, which fetches a runtime pm reference for the
* card device and also claims the host.
*/
-void mmc_get_card(struct mmc_card *card)
+void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
{
pm_runtime_get_sync(&card->dev);
- mmc_claim_host(card->host);
+ __mmc_claim_host(card->host, ctx, NULL);
}
EXPORT_SYMBOL(mmc_get_card);
@@ -924,9 +1108,13 @@ EXPORT_SYMBOL(mmc_get_card);
* This is a helper function, which releases the host and drops the runtime
* pm reference for the card device.
*/
-void mmc_put_card(struct mmc_card *card)
+void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
{
- mmc_release_host(card->host);
+ struct mmc_host *host = card->host;
+
+ WARN_ON(ctx && host->claimer != ctx);
+
+ mmc_release_host(host);
pm_runtime_mark_last_busy(&card->dev);
pm_runtime_put_autosuspend(&card->dev);
}
@@ -1400,6 +1588,16 @@ EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
#endif /* CONFIG_REGULATOR */
+/**
+ * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host
+ * @mmc: the host to regulate
+ *
+ * Returns 0 or errno. errno should be handled, it is either a critical error
+ * or -EPROBE_DEFER. 0 means no critical error but it does not mean all
+ * regulators have been found because they all are optional. If you require
+ * certain regulators, you need to check separately in your driver if they got
+ * populated after calling this function.
+ */
int mmc_regulator_get_supply(struct mmc_host *mmc)
{
struct device *dev = mmc_dev(mmc);
@@ -1484,11 +1682,33 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
}
+int mmc_host_set_uhs_voltage(struct mmc_host *host)
+{
+ u32 clock;
+
+ /*
+ * During a signal voltage level switch, the clock must be gated
+ * for 5 ms according to the SD spec
+ */
+ clock = host->ios.clock;
+ host->ios.clock = 0;
+ mmc_set_ios(host);
+
+ if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
+ return -EAGAIN;
+
+ /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
+ mmc_delay(10);
+ host->ios.clock = clock;
+ mmc_set_ios(host);
+
+ return 0;
+}
+
int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
{
struct mmc_command cmd = {};
int err = 0;
- u32 clock;
/*
* If we cannot switch voltages, return failure so the caller
@@ -1520,15 +1740,8 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
err = -EAGAIN;
goto power_cycle;
}
- /*
- * During a signal voltage level switch, the clock must be gated
- * for 5 ms according to the SD spec
- */
- clock = host->ios.clock;
- host->ios.clock = 0;
- mmc_set_ios(host);
- if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) {
+ if (mmc_host_set_uhs_voltage(host)) {
/*
* Voltages may not have been switched, but we've already
* sent CMD11, so a power cycle is required anyway
@@ -1537,11 +1750,6 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
goto power_cycle;
}
- /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
- mmc_delay(10);
- host->ios.clock = clock;
- mmc_set_ios(host);
-
/* Wait for at least 1 ms according to spec */
mmc_delay(1);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index ca861091a776..71e6c6d7ceb7 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -49,6 +49,7 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr);
+int mmc_host_set_uhs_voltage(struct mmc_host *host);
int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
void mmc_set_timing(struct mmc_host *host, unsigned int timing);
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
@@ -107,6 +108,8 @@ static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { }
void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq);
bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
+int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq);
+
struct mmc_async_req;
struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
@@ -128,10 +131,11 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
bool is_rel_write);
-int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
+int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
+ atomic_t *abort);
void mmc_release_host(struct mmc_host *host);
-void mmc_get_card(struct mmc_card *card);
-void mmc_put_card(struct mmc_card *card);
+void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx);
+void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx);
/**
* mmc_claim_host - exclusively claim a host
@@ -141,7 +145,11 @@ void mmc_put_card(struct mmc_card *card);
*/
static inline void mmc_claim_host(struct mmc_host *host)
{
- __mmc_claim_host(host, NULL);
+ __mmc_claim_host(host, NULL, NULL);
}
+int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq);
+void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq);
+int mmc_cqe_recovery(struct mmc_host *host);
+
#endif
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index ad88deb2e8f3..35a9e4fd1a9f 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -111,12 +111,6 @@ void mmc_retune_hold(struct mmc_host *host)
host->hold_retune += 1;
}
-void mmc_retune_hold_now(struct mmc_host *host)
-{
- host->retune_now = 0;
- host->hold_retune += 1;
-}
-
void mmc_retune_release(struct mmc_host *host)
{
if (host->hold_retune)
@@ -124,6 +118,7 @@ void mmc_retune_release(struct mmc_host *host)
else
WARN_ON(1);
}
+EXPORT_SYMBOL(mmc_retune_release);
int mmc_retune(struct mmc_host *host)
{
@@ -184,7 +179,7 @@ static void mmc_retune_timer(unsigned long data)
int mmc_of_parse(struct mmc_host *host)
{
struct device *dev = host->parent;
- u32 bus_width;
+ u32 bus_width, drv_type;
int ret;
bool cd_cap_invert, cd_gpio_invert = false;
bool ro_cap_invert, ro_gpio_invert = false;
@@ -326,6 +321,15 @@ int mmc_of_parse(struct mmc_host *host)
if (device_property_read_bool(dev, "no-mmc"))
host->caps2 |= MMC_CAP2_NO_MMC;
+ /* Must be after "non-removable" check */
+ if (device_property_read_u32(dev, "fixed-emmc-driver-type", &drv_type) == 0) {
+ if (host->caps & MMC_CAP_NONREMOVABLE)
+ host->fixed_drv_type = drv_type;
+ else
+ dev_err(host->parent,
+ "can't use fixed driver type, media is removable\n");
+ }
+
host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
if (host->dsr_req && (host->dsr & ~0xffff)) {
dev_err(host->parent,
@@ -398,6 +402,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->max_blk_size = 512;
host->max_blk_count = PAGE_SIZE / 512;
+ host->fixed_drv_type = -EINVAL;
+
return host;
}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 77d6f60d1bf9..fb689a1065ed 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -19,12 +19,17 @@ void mmc_unregister_host_class(void);
void mmc_retune_enable(struct mmc_host *host);
void mmc_retune_disable(struct mmc_host *host);
void mmc_retune_hold(struct mmc_host *host);
-void mmc_retune_hold_now(struct mmc_host *host);
void mmc_retune_release(struct mmc_host *host);
int mmc_retune(struct mmc_host *host);
void mmc_retune_pause(struct mmc_host *host);
void mmc_retune_unpause(struct mmc_host *host);
+static inline void mmc_retune_hold_now(struct mmc_host *host)
+{
+ host->retune_now = 0;
+ host->hold_retune += 1;
+}
+
static inline void mmc_retune_recheck(struct mmc_host *host)
{
if (host->hold_retune <= 1)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 36217ad5e9b1..a552f61060d2 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -780,6 +780,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
+MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
card->ext_csd.device_life_time_est_typ_a,
@@ -838,6 +839,7 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_name.attr,
&dev_attr_oemid.attr,
&dev_attr_prv.attr,
+ &dev_attr_rev.attr,
&dev_attr_pre_eol_info.attr,
&dev_attr_life_time.attr,
&dev_attr_serial.attr,
@@ -1289,13 +1291,18 @@ out_err:
static void mmc_select_driver_type(struct mmc_card *card)
{
int card_drv_type, drive_strength, drv_type;
+ int fixed_drv_type = card->host->fixed_drv_type;
card_drv_type = card->ext_csd.raw_driver_strength |
mmc_driver_type_mask(0);
- drive_strength = mmc_select_drive_strength(card,
- card->ext_csd.hs200_max_dtr,
- card_drv_type, &drv_type);
+ if (fixed_drv_type >= 0)
+ drive_strength = card_drv_type & mmc_driver_type_mask(fixed_drv_type)
+ ? fixed_drv_type : 0;
+ else
+ drive_strength = mmc_select_drive_strength(card,
+ card->ext_csd.hs200_max_dtr,
+ card_drv_type, &drv_type);
card->drive_strength = drive_strength;
@@ -1786,12 +1793,41 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/*
+ * Enable Command Queue if supported. Note that Packed Commands cannot
+ * be used with Command Queue.
+ */
+ card->ext_csd.cmdq_en = false;
+ if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) {
+ err = mmc_cmdq_enable(card);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warn("%s: Enabling CMDQ failed\n",
+ mmc_hostname(card->host));
+ card->ext_csd.cmdq_support = false;
+ card->ext_csd.cmdq_depth = 0;
+ err = 0;
+ }
+ }
+ /*
* In some cases (e.g. RPMB or mmc_test), the Command Queue must be
* disabled for a time, so a flag is needed to indicate to re-enable the
* Command Queue.
*/
card->reenable_cmdq = card->ext_csd.cmdq_en;
+ if (card->ext_csd.cmdq_en && !host->cqe_enabled) {
+ err = host->cqe_ops->cqe_enable(host, card);
+ if (err) {
+ pr_err("%s: Failed to enable CQE, error %d\n",
+ mmc_hostname(host), err);
+ } else {
+ host->cqe_enabled = true;
+ pr_info("%s: Command Queue Engine enabled\n",
+ mmc_hostname(host));
+ }
+ }
+
if (!oldcard)
host->card = card;
@@ -1911,14 +1947,14 @@ static void mmc_detect(struct mmc_host *host)
{
int err;
- mmc_get_card(host->card);
+ mmc_get_card(host->card, NULL);
/*
* Just check if our card has been removed.
*/
err = _mmc_detect_card_removed(host);
- mmc_put_card(host->card);
+ mmc_put_card(host->card, NULL);
if (err) {
mmc_remove(host);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 54686ca4bfb7..908e4db03535 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -977,7 +977,6 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
from_exception)
return;
- mmc_claim_host(card->host);
if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
timeout = MMC_OPS_TIMEOUT_MS;
use_busy_signal = true;
@@ -995,7 +994,7 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
pr_warn("%s: Error %d starting bkops\n",
mmc_hostname(card->host), err);
mmc_retune_release(card->host);
- goto out;
+ return;
}
/*
@@ -1007,9 +1006,8 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
mmc_card_set_doing_bkops(card);
else
mmc_retune_release(card->host);
-out:
- mmc_release_host(card->host);
}
+EXPORT_SYMBOL(mmc_start_bkops);
/*
* Flush the cache to the non-volatile storage.
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 0a4e77a5ba33..4f33d277b125 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -30,7 +30,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
{
struct mmc_queue *mq = q->queuedata;
- if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
+ if (mq && mmc_card_removed(mq->card))
return BLKPREP_KILL;
req->rq_flags |= RQF_DONTPREP;
@@ -177,6 +177,29 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
mq_rq->sg = NULL;
}
+static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u64 limit = BLK_BOUNCE_HIGH;
+
+ if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
+ limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
+ if (mmc_can_erase(card))
+ mmc_queue_setup_discard(mq->queue, card);
+
+ blk_queue_bounce_limit(mq->queue, limit);
+ blk_queue_max_hw_sectors(mq->queue,
+ min(host->max_blk_count, host->max_req_size / 512));
+ blk_queue_max_segments(mq->queue, host->max_segs);
+ blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+
+ /* Initialize thread_sem even if it is not used */
+ sema_init(&mq->thread_sem, 1);
+}
+
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
@@ -190,12 +213,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
spinlock_t *lock, const char *subname)
{
struct mmc_host *host = card->host;
- u64 limit = BLK_BOUNCE_HIGH;
int ret = -ENOMEM;
- if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
- limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
-
mq->card = card;
mq->queue = blk_alloc_queue(GFP_KERNEL);
if (!mq->queue)
@@ -214,18 +233,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
}
blk_queue_prep_rq(mq->queue, mmc_prep_request);
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
- queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
- if (mmc_can_erase(card))
- mmc_queue_setup_discard(mq->queue, card);
- blk_queue_bounce_limit(mq->queue, limit);
- blk_queue_max_hw_sectors(mq->queue,
- min(host->max_blk_count, host->max_req_size / 512));
- blk_queue_max_segments(mq->queue, host->max_segs);
- blk_queue_max_segment_size(mq->queue, host->max_seg_size);
-
- sema_init(&mq->thread_sem, 1);
+ mmc_setup_queue(mq, card);
mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
host->index, subname ? subname : "");
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index f18d3f656baa..547b457c4251 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef MMC_QUEUE_H
#define MMC_QUEUE_H
@@ -35,12 +36,14 @@ struct mmc_blk_request {
/**
* enum mmc_drv_op - enumerates the operations in the mmc_queue_req
* @MMC_DRV_OP_IOCTL: ioctl operation
+ * @MMC_DRV_OP_IOCTL_RPMB: RPMB-oriented ioctl operation
* @MMC_DRV_OP_BOOT_WP: write protect boot partitions
* @MMC_DRV_OP_GET_CARD_STATUS: get card status
* @MMC_DRV_OP_GET_EXT_CSD: get the EXT CSD from an eMMC card
*/
enum mmc_drv_op {
MMC_DRV_OP_IOCTL,
+ MMC_DRV_OP_IOCTL_RPMB,
MMC_DRV_OP_BOOT_WP,
MMC_DRV_OP_GET_CARD_STATUS,
MMC_DRV_OP_GET_EXT_CSD,
@@ -81,6 +84,4 @@ extern void mmc_queue_resume(struct mmc_queue *);
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
struct mmc_queue_req *);
-extern int mmc_access_rpmb(struct mmc_queue *);
-
#endif
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index fb725934fa21..f664e9cbc9f8 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file contains work-arounds for many known SD/MMC
* and SDIO hardware bugs.
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 4fd1620b732d..45bf78f32716 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -908,6 +908,18 @@ unsigned mmc_sd_get_max_clock(struct mmc_card *card)
return max_dtr;
}
+static bool mmc_sd_card_using_v18(struct mmc_card *card)
+{
+ /*
+ * According to the SD spec., the Bus Speed Mode (function group 1) bits
+ * 2 to 4 are zero if the card is initialized at 3.3V signal level. Thus
+ * they can be used to determine if the card has already switched to
+ * 1.8V signaling.
+ */
+ return card->sw_caps.sd3_bus_mode &
+ (SD_MODE_UHS_SDR50 | SD_MODE_UHS_SDR104 | SD_MODE_UHS_DDR50);
+}
+
/*
* Handle the detection and initialisation of a card.
*
@@ -921,9 +933,10 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
int err;
u32 cid[4];
u32 rocr = 0;
+ bool v18_fixup_failed = false;
WARN_ON(!host->claimed);
-
+retry:
err = mmc_sd_get_cid(host, ocr, cid, &rocr);
if (err)
return err;
@@ -989,6 +1002,36 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
if (err)
goto free_card;
+ /*
+ * If the card has not been power cycled, it may still be using 1.8V
+ * signaling. Detect that situation and try to initialize a UHS-I (1.8V)
+ * transfer mode.
+ */
+ if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) &&
+ mmc_sd_card_using_v18(card) &&
+ host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
+ /*
+ * Re-read switch information in case it has changed since
+ * oldcard was initialized.
+ */
+ if (oldcard) {
+ err = mmc_read_switch(card);
+ if (err)
+ goto free_card;
+ }
+ if (mmc_sd_card_using_v18(card)) {
+ if (mmc_host_set_uhs_voltage(host) ||
+ mmc_sd_init_uhs_card(card)) {
+ v18_fixup_failed = true;
+ mmc_power_cycle(host, ocr);
+ if (!oldcard)
+ mmc_remove_card(card);
+ goto retry;
+ }
+ goto done;
+ }
+ }
+
/* Initialization sequence for UHS-I cards */
if (rocr & SD_ROCR_S18A) {
err = mmc_sd_init_uhs_card(card);
@@ -1021,7 +1064,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
}
}
-
+done:
host->card = card;
return 0;
@@ -1056,14 +1099,14 @@ static void mmc_sd_detect(struct mmc_host *host)
{
int err;
- mmc_get_card(host->card);
+ mmc_get_card(host->card, NULL);
/*
* Just check if our card has been removed.
*/
err = _mmc_detect_card_removed(host);
- mmc_put_card(host->card);
+ mmc_put_card(host->card, NULL);
if (err) {
mmc_sd_remove(host);
diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h
index 1ada9808c329..497c026a5c5a 100644
--- a/drivers/mmc/core/sd.h
+++ b/drivers/mmc/core/sd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MMC_CORE_SD_H
#define _MMC_CORE_SD_H
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index c771843e4c15..7a2eaf8410a3 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -155,7 +155,8 @@ static int sdio_irq_thread(void *_host)
* holding of the host lock does not cover too much work
* that doesn't require that lock to be held.
*/
- ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
+ ret = __mmc_claim_host(host, NULL,
+ &host->sdio_irq_thread_abort);
if (ret)
break;
ret = process_sdio_pending_irqs(host);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 8c15637178ff..567028c9219a 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -352,6 +352,19 @@ config MMC_MESON_GX
If you have a controller with this interface, say Y here.
+config MMC_MESON_MX_SDIO
+ tristate "Amlogic Meson6/Meson8/Meson8b SD/MMC Host Controller support"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on COMMON_CLK
+ depends on HAS_DMA
+ depends on OF
+ help
+ This selects support for the SD/MMC Host Controller on
+ Amlogic Meson6, Meson8 and Meson8b SoCs.
+
+ If you have a controller with this interface, say Y or M here.
+ If unsure, say N.
+
config MMC_MOXART
tristate "MOXART SD/MMC Host Controller support"
depends on ARCH_MOXART && MMC
@@ -429,6 +442,7 @@ config MMC_SDHCI_MSM
tristate "Qualcomm SDHCI Controller Support"
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on MMC_SDHCI_PLTFM
+ select MMC_SDHCI_IO_ACCESSORS
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in Qualcomm SOCs. The controller supports
@@ -663,7 +677,7 @@ config MMC_CAVIUM_OCTEON
config MMC_CAVIUM_THUNDERX
tristate "Cavium ThunderX SD/MMC Card Interface support"
depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
- depends on GPIOLIB
+ depends on GPIO_THUNDERX
depends on OF_ADDRESS
help
This selects Cavium ThunderX SD/MMC Card Interface.
@@ -899,3 +913,15 @@ config MMC_SDHCI_XENON
This selects Marvell Xenon eMMC/SD/SDIO SDHCI.
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+
+config MMC_SDHCI_OMAP
+ tristate "TI SDHCI Controller Support"
+ depends on MMC_SDHCI_PLTFM && OF
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ support present in TI's DRA7 SOCs. The controller supports
+ SD/MMC/SDIO devices.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 303f5cd46cd9..a43cf0d5a5d3 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for MMC/SD host controller drivers
#
@@ -64,6 +65,7 @@ obj-$(CONFIG_MMC_VUB300) += vub300.o
obj-$(CONFIG_MMC_USHC) += ushc.o
obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o
obj-$(CONFIG_MMC_MESON_GX) += meson-gx-mmc.o
+obj-$(CONFIG_MMC_MESON_MX_SDIO) += meson-mx-sdio.o
obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o
obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o
obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o
@@ -89,6 +91,7 @@ obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o
obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
+obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 0a0ebf3a096d..e55f3932d580 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -732,11 +732,11 @@ static inline unsigned int atmci_convert_chksize(struct atmel_mci *host,
return 0;
}
-static void atmci_timeout_timer(unsigned long data)
+static void atmci_timeout_timer(struct timer_list *t)
{
struct atmel_mci *host;
- host = (struct atmel_mci *)data;
+ host = from_timer(host, t, timer);
dev_dbg(&host->pdev->dev, "software timeout\n");
@@ -1661,9 +1661,9 @@ static void atmci_command_complete(struct atmel_mci *host,
cmd->error = 0;
}
-static void atmci_detect_change(unsigned long data)
+static void atmci_detect_change(struct timer_list *t)
{
- struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data;
+ struct atmel_mci_slot *slot = from_timer(slot, t, detect_timer);
bool present;
bool present_old;
@@ -2349,8 +2349,7 @@ static int atmci_init_slot(struct atmel_mci *host,
if (gpio_is_valid(slot->detect_pin)) {
int ret;
- setup_timer(&slot->detect_timer, atmci_detect_change,
- (unsigned long)slot);
+ timer_setup(&slot->detect_timer, atmci_detect_change, 0);
ret = request_irq(gpio_to_irq(slot->detect_pin),
atmci_detect_interrupt,
@@ -2563,7 +2562,7 @@ static int atmci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
+ timer_setup(&host->timer, atmci_timeout_timer, 0);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index fbd29f00fca0..ed5cefb83768 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -967,7 +967,7 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
}
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
return ret;
/*
* Legacy Octeon firmware has no regulator entry, fall-back to
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 64cda84b2302..73fd75c3c824 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -75,7 +75,7 @@ struct hs_timing {
u32 smpl_phase_min;
};
-struct hs_timing hs_timing_cfg[TIMING_MODE][TIMING_CFG_NUM] = {
+static struct hs_timing hs_timing_cfg[TIMING_MODE][TIMING_CFG_NUM] = {
{ /* reserved */ },
{ /* SD */
{7, 0, 15, 15,}, /* 0: LEGACY 400k */
diff --git a/drivers/mmc/host/dw_mmc-zx.h b/drivers/mmc/host/dw_mmc-zx.h
index f369997a39ec..09ac52766f14 100644
--- a/drivers/mmc/host/dw_mmc-zx.h
+++ b/drivers/mmc/host/dw_mmc-zx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DW_MMC_ZX_H_
#define _DW_MMC_ZX_H_
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 860313bd952a..0aa39975f33b 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -401,16 +401,37 @@ static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
static inline void dw_mci_set_cto(struct dw_mci *host)
{
unsigned int cto_clks;
+ unsigned int cto_div;
unsigned int cto_ms;
+ unsigned long irqflags;
cto_clks = mci_readl(host, TMOUT) & 0xff;
- cto_ms = DIV_ROUND_UP(cto_clks, host->bus_hz / 1000);
+ cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
+ if (cto_div == 0)
+ cto_div = 1;
+ cto_ms = DIV_ROUND_UP(MSEC_PER_SEC * cto_clks * cto_div, host->bus_hz);
/* add a bit spare time */
cto_ms += 10;
- mod_timer(&host->cto_timer,
- jiffies + msecs_to_jiffies(cto_ms) + 1);
+ /*
+ * The durations we're working with are fairly short so we have to be
+ * extra careful about synchronization here. Specifically in hardware a
+ * command timeout is _at most_ 5.1 ms, so that means we expect an
+ * interrupt (either command done or timeout) to come rather quickly
+ * after the mci_writel. ...but just in case we have a long interrupt
+ * latency let's add a bit of paranoia.
+ *
+ * In general we'll assume that at least an interrupt will be asserted
+ * in hardware by the time the cto_timer runs. ...and if it hasn't
+ * been asserted in hardware by that time then we'll assume it'll never
+ * come.
+ */
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+ if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
+ mod_timer(&host->cto_timer,
+ jiffies + msecs_to_jiffies(cto_ms) + 1);
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
static void dw_mci_start_command(struct dw_mci *host,
@@ -425,11 +446,11 @@ static void dw_mci_start_command(struct dw_mci *host,
wmb(); /* drain writebuffer */
dw_mci_wait_while_busy(host, cmd_flags);
+ mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
+
/* response expected command only */
if (cmd_flags & SDMMC_CMD_RESP_EXP)
dw_mci_set_cto(host);
-
- mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
}
static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
@@ -796,7 +817,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host,
struct dma_slave_config cfg;
struct dma_async_tx_descriptor *desc = NULL;
struct scatterlist *sgl = host->data->sg;
- const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
+ static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
u32 sg_elems = host->data->sg_len;
u32 fifoth_val;
u32 fifo_offset = host->fifo_reg - host->regs;
@@ -1003,7 +1024,7 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
{
unsigned int blksz = data->blksz;
- const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
+ static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
u32 fifo_width = 1 << host->data_shift;
u32 blksz_depth = blksz / fifo_width, fifoth_val;
u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
@@ -1915,15 +1936,55 @@ static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
static void dw_mci_set_drto(struct dw_mci *host)
{
unsigned int drto_clks;
+ unsigned int drto_div;
unsigned int drto_ms;
+ unsigned long irqflags;
drto_clks = mci_readl(host, TMOUT) >> 8;
- drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000);
+ drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
+ if (drto_div == 0)
+ drto_div = 1;
+ drto_ms = DIV_ROUND_UP(MSEC_PER_SEC * drto_clks * drto_div,
+ host->bus_hz);
/* add a bit spare time */
drto_ms += 10;
- mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms));
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+ if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
+ mod_timer(&host->dto_timer,
+ jiffies + msecs_to_jiffies(drto_ms));
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+}
+
+static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
+{
+ if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
+ return false;
+
+ /*
+ * Really be certain that the timer has stopped. This is a bit of
+ * paranoia and could only really happen if we had really bad
+ * interrupt latency and the interrupt routine and timeout were
+ * running concurrently so that the del_timer() in the interrupt
+ * handler couldn't run.
+ */
+ WARN_ON(del_timer_sync(&host->cto_timer));
+ clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+
+ return true;
+}
+
+static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
+{
+ if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
+ return false;
+
+ /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
+ WARN_ON(del_timer_sync(&host->dto_timer));
+ clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+
+ return true;
}
static void dw_mci_tasklet_func(unsigned long priv)
@@ -1952,8 +2013,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
case STATE_SENDING_CMD11:
case STATE_SENDING_CMD:
- if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
- &host->pending_events))
+ if (!dw_mci_clear_pending_cmd_complete(host))
break;
cmd = host->cmd;
@@ -2068,8 +2128,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
/* fall through */
case STATE_DATA_BUSY:
- if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
- &host->pending_events)) {
+ if (!dw_mci_clear_pending_data_complete(host)) {
/*
* If data error interrupt comes but data over
* interrupt doesn't come within the given time.
@@ -2122,8 +2181,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
/* fall through */
case STATE_SENDING_STOP:
- if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
- &host->pending_events))
+ if (!dw_mci_clear_pending_cmd_complete(host))
break;
/* CMD error in data command */
@@ -2570,6 +2628,8 @@ done:
static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
{
+ del_timer(&host->cto_timer);
+
if (!host->cmd_status)
host->cmd_status = status;
@@ -2594,6 +2654,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
struct dw_mci *host = dev_id;
u32 pending;
struct dw_mci_slot *slot = host->slot;
+ unsigned long irqflags;
pending = mci_readl(host, MINTSTS); /* read-only mask reg */
@@ -2601,8 +2662,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
/* Check volt switch first, since it can look like an error */
if ((host->state == STATE_SENDING_CMD11) &&
(pending & SDMMC_INT_VOLT_SWITCH)) {
- unsigned long irqflags;
-
mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
pending &= ~SDMMC_INT_VOLT_SWITCH;
@@ -2618,11 +2677,15 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
del_timer(&host->cto_timer);
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
host->cmd_status = pending;
smp_wmb(); /* drain writebuffer */
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
if (pending & DW_MCI_DATA_ERROR_FLAGS) {
@@ -2635,6 +2698,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
if (pending & SDMMC_INT_DATA_OVER) {
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
del_timer(&host->dto_timer);
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
@@ -2647,6 +2712,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
if (pending & SDMMC_INT_RXDR) {
@@ -2662,9 +2729,12 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
if (pending & SDMMC_INT_CMD_DONE) {
- del_timer(&host->cto_timer);
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
dw_mci_cmd_interrupt(host, pending);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
if (pending & SDMMC_INT_CD) {
@@ -2741,7 +2811,7 @@ static int dw_mci_init_slot(struct dw_mci *host)
/*if there are external regulators, get them*/
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto err_host_allocated;
if (!mmc->ocr_avail)
@@ -2921,9 +2991,9 @@ no_dma:
host->use_dma = TRANS_MODE_PIO;
}
-static void dw_mci_cmd11_timer(unsigned long arg)
+static void dw_mci_cmd11_timer(struct timer_list *t)
{
- struct dw_mci *host = (struct dw_mci *)arg;
+ struct dw_mci *host = from_timer(host, t, cmd11_timer);
if (host->state != STATE_SENDING_CMD11) {
dev_warn(host->dev, "Unexpected CMD11 timeout\n");
@@ -2935,10 +3005,38 @@ static void dw_mci_cmd11_timer(unsigned long arg)
tasklet_schedule(&host->tasklet);
}
-static void dw_mci_cto_timer(unsigned long arg)
+static void dw_mci_cto_timer(struct timer_list *t)
{
- struct dw_mci *host = (struct dw_mci *)arg;
+ struct dw_mci *host = from_timer(host, t, cto_timer);
+ unsigned long irqflags;
+ u32 pending;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ /*
+ * If somehow we have very bad interrupt latency it's remotely possible
+ * that the timer could fire while the interrupt is still pending or
+ * while the interrupt is midway through running. Let's be paranoid
+ * and detect those two cases. Note that this is paranoia is somewhat
+ * justified because in this function we don't actually cancel the
+ * pending command in the controller--we just assume it will never come.
+ */
+ pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+ if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
+ /* The interrupt should fire; no need to act but we can warn */
+ dev_warn(host->dev, "Unexpected interrupt latency\n");
+ goto exit;
+ }
+ if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
+ /* Presumably interrupt handler couldn't delete the timer */
+ dev_warn(host->dev, "CTO timeout when already completed\n");
+ goto exit;
+ }
+ /*
+ * Continued paranoia to make sure we're in the state we expect.
+ * This paranoia isn't really justified but it seems good to be safe.
+ */
switch (host->state) {
case STATE_SENDING_CMD11:
case STATE_SENDING_CMD:
@@ -2957,12 +3055,39 @@ static void dw_mci_cto_timer(unsigned long arg)
host->state);
break;
}
+
+exit:
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
-static void dw_mci_dto_timer(unsigned long arg)
+static void dw_mci_dto_timer(struct timer_list *t)
{
- struct dw_mci *host = (struct dw_mci *)arg;
+ struct dw_mci *host = from_timer(host, t, dto_timer);
+ unsigned long irqflags;
+ u32 pending;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ /*
+ * The DTO timer is much longer than the CTO timer, so it's even less
+ * likely that we'll these cases, but it pays to be paranoid.
+ */
+ pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+ if (pending & SDMMC_INT_DATA_OVER) {
+ /* The interrupt should fire; no need to act but we can warn */
+ dev_warn(host->dev, "Unexpected data interrupt latency\n");
+ goto exit;
+ }
+ if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
+ /* Presumably interrupt handler couldn't delete the timer */
+ dev_warn(host->dev, "DTO timeout when already completed\n");
+ goto exit;
+ }
+ /*
+ * Continued paranoia to make sure we're in the state we expect.
+ * This paranoia isn't really justified but it seems good to be safe.
+ */
switch (host->state) {
case STATE_SENDING_DATA:
case STATE_DATA_BUSY:
@@ -2977,8 +3102,13 @@ static void dw_mci_dto_timer(unsigned long arg)
tasklet_schedule(&host->tasklet);
break;
default:
+ dev_warn(host->dev, "Unexpected data timeout, state %d\n",
+ host->state);
break;
}
+
+exit:
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
#ifdef CONFIG_OF
@@ -3127,14 +3257,9 @@ int dw_mci_probe(struct dw_mci *host)
}
}
- setup_timer(&host->cmd11_timer,
- dw_mci_cmd11_timer, (unsigned long)host);
-
- setup_timer(&host->cto_timer,
- dw_mci_cto_timer, (unsigned long)host);
-
- setup_timer(&host->dto_timer,
- dw_mci_dto_timer, (unsigned long)host);
+ timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
+ timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
+ timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
spin_lock_init(&host->lock);
spin_lock_init(&host->irq_lock);
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 34474ad731aa..e3124f06a47e 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -74,7 +74,8 @@ struct dw_mci_dma_slave {
* @stop_abort: The command currently prepared for stoping transfer.
* @prev_blksz: The former transfer blksz record.
* @timing: Record of current ios timing.
- * @use_dma: Whether DMA channel is initialized or not.
+ * @use_dma: Which DMA channel is in use for the current transfer, zero
+ * denotes PIO mode.
* @using_dma: Whether DMA is in use for the current transfer.
* @dma_64bit_address: Whether DMA supports 64-bit address mode or not.
* @sg_dma: Bus address of DMA buffer.
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 7db8c7a8d38d..712e08d9a45e 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -586,9 +586,9 @@ poll_timeout:
return true;
}
-static void jz4740_mmc_timeout(unsigned long data)
+static void jz4740_mmc_timeout(struct timer_list *t)
{
- struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)data;
+ struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
if (!test_and_clear_bit(0, &host->waiting))
return;
@@ -1036,8 +1036,7 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
jz4740_mmc_reset(host);
jz4740_mmc_clock_disable(host);
- setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
- (unsigned long)host);
+ timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0);
host->use_dma = true;
if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 85745ef179e2..e0862d3f65b3 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1190,7 +1190,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
/* Get regulators and the supported OCR mask */
host->vqmmc_enabled = false;
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto free_host;
ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
new file mode 100644
index 000000000000..09cb89645d06
--- /dev/null
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -0,0 +1,768 @@
+/*
+ * meson-mx-sdio.c - Meson6, Meson8 and Meson8b SDIO/MMC Host Controller
+ *
+ * Copyright (C) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ * Copyright (C) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
+
+#define MESON_MX_SDIO_ARGU 0x00
+
+#define MESON_MX_SDIO_SEND 0x04
+ #define MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK GENMASK(7, 0)
+ #define MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK GENMASK(15, 8)
+ #define MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7 BIT(16)
+ #define MESON_MX_SDIO_SEND_RESP_HAS_DATA BIT(17)
+ #define MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8 BIT(18)
+ #define MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY BIT(19)
+ #define MESON_MX_SDIO_SEND_DATA BIT(20)
+ #define MESON_MX_SDIO_SEND_USE_INT_WINDOW BIT(21)
+ #define MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK GENMASK(31, 24)
+
+#define MESON_MX_SDIO_CONF 0x08
+ #define MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT 0
+ #define MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH 10
+ #define MESON_MX_SDIO_CONF_CMD_DISABLE_CRC BIT(10)
+ #define MESON_MX_SDIO_CONF_CMD_OUT_AT_POSITIVE_EDGE BIT(11)
+ #define MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK GENMASK(17, 12)
+ #define MESON_MX_SDIO_CONF_RESP_LATCH_AT_NEGATIVE_EDGE BIT(18)
+ #define MESON_MX_SDIO_CONF_DATA_LATCH_AT_NEGATIVE_EDGE BIT(19)
+ #define MESON_MX_SDIO_CONF_BUS_WIDTH BIT(20)
+ #define MESON_MX_SDIO_CONF_M_ENDIAN_MASK GENMASK(22, 21)
+ #define MESON_MX_SDIO_CONF_WRITE_NWR_MASK GENMASK(28, 23)
+ #define MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK GENMASK(31, 29)
+
+#define MESON_MX_SDIO_IRQS 0x0c
+ #define MESON_MX_SDIO_IRQS_STATUS_STATE_MACHINE_MASK GENMASK(3, 0)
+ #define MESON_MX_SDIO_IRQS_CMD_BUSY BIT(4)
+ #define MESON_MX_SDIO_IRQS_RESP_CRC7_OK BIT(5)
+ #define MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK BIT(6)
+ #define MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK BIT(7)
+ #define MESON_MX_SDIO_IRQS_IF_INT BIT(8)
+ #define MESON_MX_SDIO_IRQS_CMD_INT BIT(9)
+ #define MESON_MX_SDIO_IRQS_STATUS_INFO_MASK GENMASK(15, 12)
+ #define MESON_MX_SDIO_IRQS_TIMING_OUT_INT BIT(16)
+ #define MESON_MX_SDIO_IRQS_AMRISC_TIMING_OUT_INT_EN BIT(17)
+ #define MESON_MX_SDIO_IRQS_ARC_TIMING_OUT_INT_EN BIT(18)
+ #define MESON_MX_SDIO_IRQS_TIMING_OUT_COUNT_MASK GENMASK(31, 19)
+
+#define MESON_MX_SDIO_IRQC 0x10
+ #define MESON_MX_SDIO_IRQC_ARC_IF_INT_EN BIT(3)
+ #define MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN BIT(4)
+ #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13)
+ #define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15)
+ #define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30)
+ #define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31)
+
+#define MESON_MX_SDIO_MULT 0x14
+ #define MESON_MX_SDIO_MULT_PORT_SEL_MASK GENMASK(1, 0)
+ #define MESON_MX_SDIO_MULT_MEMORY_STICK_ENABLE BIT(2)
+ #define MESON_MX_SDIO_MULT_MEMORY_STICK_SCLK_ALWAYS BIT(3)
+ #define MESON_MX_SDIO_MULT_STREAM_ENABLE BIT(4)
+ #define MESON_MX_SDIO_MULT_STREAM_8BITS_MODE BIT(5)
+ #define MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX BIT(8)
+ #define MESON_MX_SDIO_MULT_DAT0_DAT1_SWAPPED BIT(10)
+ #define MESON_MX_SDIO_MULT_DAT1_DAT0_SWAPPED BIT(11)
+ #define MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK GENMASK(15, 12)
+
+#define MESON_MX_SDIO_ADDR 0x18
+
+#define MESON_MX_SDIO_EXT 0x1c
+ #define MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK GENMASK(29, 16)
+
+#define MESON_MX_SDIO_BOUNCE_REQ_SIZE (128 * 1024)
+#define MESON_MX_SDIO_RESPONSE_CRC16_BITS (16 - 1)
+#define MESON_MX_SDIO_MAX_SLOTS 3
+
+struct meson_mx_mmc_host {
+ struct device *controller_dev;
+
+ struct clk *parent_clk;
+ struct clk *core_clk;
+ struct clk_divider cfg_div;
+ struct clk *cfg_div_clk;
+ struct clk_fixed_factor fixed_factor;
+ struct clk *fixed_factor_clk;
+
+ void __iomem *base;
+ int irq;
+ spinlock_t irq_lock;
+
+ struct timer_list cmd_timeout;
+
+ unsigned int slot_id;
+ struct mmc_host *mmc;
+
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ int error;
+};
+
+static void meson_mx_mmc_mask_bits(struct mmc_host *mmc, char reg, u32 mask,
+ u32 val)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ u32 regval;
+
+ regval = readl(host->base + reg);
+ regval &= ~mask;
+ regval |= (val & mask);
+
+ writel(regval, host->base + reg);
+}
+
+static void meson_mx_mmc_soft_reset(struct meson_mx_mmc_host *host)
+{
+ writel(MESON_MX_SDIO_IRQC_SOFT_RESET, host->base + MESON_MX_SDIO_IRQC);
+ udelay(2);
+}
+
+static struct mmc_command *meson_mx_mmc_get_next_cmd(struct mmc_command *cmd)
+{
+ if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error)
+ return cmd->mrq->cmd;
+ else if (mmc_op_multi(cmd->opcode) &&
+ (!cmd->mrq->sbc || cmd->error || cmd->data->error))
+ return cmd->mrq->stop;
+ else
+ return NULL;
+}
+
+static void meson_mx_mmc_start_cmd(struct mmc_host *mmc,
+ struct mmc_command *cmd)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ unsigned int pack_size;
+ unsigned long irqflags, timeout;
+ u32 mult, send = 0, ext = 0;
+
+ host->cmd = cmd;
+
+ if (cmd->busy_timeout)
+ timeout = msecs_to_jiffies(cmd->busy_timeout);
+ else
+ timeout = msecs_to_jiffies(1000);
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_R1:
+ case MMC_RSP_R1B:
+ case MMC_RSP_R3:
+ /* 7 (CMD) + 32 (response) + 7 (CRC) -1 */
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 45);
+ break;
+ case MMC_RSP_R2:
+ /* 7 (CMD) + 120 (response) + 7 (CRC) -1 */
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 133);
+ send |= MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8;
+ break;
+ default:
+ break;
+ }
+
+ if (!(cmd->flags & MMC_RSP_CRC))
+ send |= MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7;
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ send |= MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY;
+
+ if (cmd->data) {
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK,
+ (cmd->data->blocks - 1));
+
+ pack_size = cmd->data->blksz * BITS_PER_BYTE;
+ if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
+ pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 4;
+ else
+ pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 1;
+
+ ext |= FIELD_PREP(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK,
+ pack_size);
+
+ if (cmd->data->flags & MMC_DATA_WRITE)
+ send |= MESON_MX_SDIO_SEND_DATA;
+ else
+ send |= MESON_MX_SDIO_SEND_RESP_HAS_DATA;
+
+ cmd->data->bytes_xfered = 0;
+ }
+
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK,
+ (0x40 | cmd->opcode));
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ mult = readl(host->base + MESON_MX_SDIO_MULT);
+ mult &= ~MESON_MX_SDIO_MULT_PORT_SEL_MASK;
+ mult |= FIELD_PREP(MESON_MX_SDIO_MULT_PORT_SEL_MASK, host->slot_id);
+ mult |= BIT(31);
+ writel(mult, host->base + MESON_MX_SDIO_MULT);
+
+ /* enable the CMD done interrupt */
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQC,
+ MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN,
+ MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN);
+
+ /* clear pending interrupts */
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQS,
+ MESON_MX_SDIO_IRQS_CMD_INT,
+ MESON_MX_SDIO_IRQS_CMD_INT);
+
+ writel(cmd->arg, host->base + MESON_MX_SDIO_ARGU);
+ writel(ext, host->base + MESON_MX_SDIO_EXT);
+ writel(send, host->base + MESON_MX_SDIO_SEND);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ mod_timer(&host->cmd_timeout, jiffies + timeout);
+}
+
+static void meson_mx_mmc_request_done(struct meson_mx_mmc_host *host)
+{
+ struct mmc_request *mrq;
+
+ mrq = host->mrq;
+
+ host->mrq = NULL;
+ host->cmd = NULL;
+
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void meson_mx_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ unsigned short vdd = ios->vdd;
+ unsigned long clk_rate = ios->clock;
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF,
+ MESON_MX_SDIO_CONF_BUS_WIDTH, 0);
+ break;
+
+ case MMC_BUS_WIDTH_4:
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF,
+ MESON_MX_SDIO_CONF_BUS_WIDTH,
+ MESON_MX_SDIO_CONF_BUS_WIDTH);
+ break;
+
+ case MMC_BUS_WIDTH_8:
+ default:
+ dev_err(mmc_dev(mmc), "unsupported bus width: %d\n",
+ ios->bus_width);
+ host->error = -EINVAL;
+ return;
+ }
+
+ host->error = clk_set_rate(host->cfg_div_clk, ios->clock);
+ if (host->error) {
+ dev_warn(mmc_dev(mmc),
+ "failed to set MMC clock to %lu: %d\n",
+ clk_rate, host->error);
+ return;
+ }
+
+ mmc->actual_clock = clk_get_rate(host->cfg_div_clk);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ vdd = 0;
+ /* fall-through: */
+ case MMC_POWER_UP:
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ host->error = mmc_regulator_set_ocr(mmc,
+ mmc->supply.vmmc,
+ vdd);
+ if (host->error)
+ return;
+ }
+ break;
+ }
+}
+
+static int meson_mx_mmc_map_dma(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+ int dma_len;
+ struct scatterlist *sg;
+
+ if (!data)
+ return 0;
+
+ sg = data->sg;
+ if (sg->offset & 3 || sg->length & 3) {
+ dev_err(mmc_dev(mmc),
+ "unaligned scatterlist: offset %x length %d\n",
+ sg->offset, sg->length);
+ return -EINVAL;
+ }
+
+ dma_len = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ if (dma_len <= 0) {
+ dev_err(mmc_dev(mmc), "dma_map_sg failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd = mrq->cmd;
+
+ if (!host->error)
+ host->error = meson_mx_mmc_map_dma(mmc, mrq);
+
+ if (host->error) {
+ cmd->error = host->error;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ host->mrq = mrq;
+
+ if (mrq->data)
+ writel(sg_dma_address(mrq->data->sg),
+ host->base + MESON_MX_SDIO_ADDR);
+
+ if (mrq->sbc)
+ meson_mx_mmc_start_cmd(mmc, mrq->sbc);
+ else
+ meson_mx_mmc_start_cmd(mmc, mrq->cmd);
+}
+
+static int meson_mx_mmc_card_busy(struct mmc_host *mmc)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC);
+
+ return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK);
+}
+
+static void meson_mx_mmc_read_response(struct mmc_host *mmc,
+ struct mmc_command *cmd)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ u32 mult;
+ int i, resp[4];
+
+ mult = readl(host->base + MESON_MX_SDIO_MULT);
+ mult |= MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX;
+ mult &= ~MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK;
+ mult |= FIELD_PREP(MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK, 0);
+ writel(mult, host->base + MESON_MX_SDIO_MULT);
+
+ if (cmd->flags & MMC_RSP_136) {
+ for (i = 0; i <= 3; i++)
+ resp[3 - i] = readl(host->base + MESON_MX_SDIO_ARGU);
+ cmd->resp[0] = (resp[0] << 8) | ((resp[1] >> 24) & 0xff);
+ cmd->resp[1] = (resp[1] << 8) | ((resp[2] >> 24) & 0xff);
+ cmd->resp[2] = (resp[2] << 8) | ((resp[3] >> 24) & 0xff);
+ cmd->resp[3] = (resp[3] << 8);
+ } else if (cmd->flags & MMC_RSP_PRESENT) {
+ cmd->resp[0] = readl(host->base + MESON_MX_SDIO_ARGU);
+ }
+}
+
+static irqreturn_t meson_mx_mmc_process_cmd_irq(struct meson_mx_mmc_host *host,
+ u32 irqs, u32 send)
+{
+ struct mmc_command *cmd = host->cmd;
+
+ /*
+ * NOTE: even though it shouldn't happen we sometimes get command
+ * interrupts twice (at least this is what it looks like). Ideally
+ * we find out why this happens and warn here as soon as it occurs.
+ */
+ if (!cmd)
+ return IRQ_HANDLED;
+
+ cmd->error = 0;
+ meson_mx_mmc_read_response(host->mmc, cmd);
+
+ if (cmd->data) {
+ if (!((irqs & MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK) ||
+ (irqs & MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK)))
+ cmd->error = -EILSEQ;
+ } else {
+ if (!((irqs & MESON_MX_SDIO_IRQS_RESP_CRC7_OK) ||
+ (send & MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7)))
+ cmd->error = -EILSEQ;
+ }
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t meson_mx_mmc_irq(int irq, void *data)
+{
+ struct meson_mx_mmc_host *host = (void *) data;
+ u32 irqs, send;
+ unsigned long irqflags;
+ irqreturn_t ret;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ irqs = readl(host->base + MESON_MX_SDIO_IRQS);
+ send = readl(host->base + MESON_MX_SDIO_SEND);
+
+ if (irqs & MESON_MX_SDIO_IRQS_CMD_INT)
+ ret = meson_mx_mmc_process_cmd_irq(host, irqs, send);
+ else
+ ret = IRQ_HANDLED;
+
+ /* finally ACK all pending interrupts */
+ writel(irqs, host->base + MESON_MX_SDIO_IRQS);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ return ret;
+}
+
+static irqreturn_t meson_mx_mmc_irq_thread(int irq, void *irq_data)
+{
+ struct meson_mx_mmc_host *host = (void *) irq_data;
+ struct mmc_command *cmd = host->cmd, *next_cmd;
+
+ if (WARN_ON(!cmd))
+ return IRQ_HANDLED;
+
+ del_timer_sync(&host->cmd_timeout);
+
+ if (cmd->data) {
+ dma_unmap_sg(mmc_dev(host->mmc), cmd->data->sg,
+ cmd->data->sg_len,
+ mmc_get_dma_dir(cmd->data));
+
+ cmd->data->bytes_xfered = cmd->data->blksz * cmd->data->blocks;
+ }
+
+ next_cmd = meson_mx_mmc_get_next_cmd(cmd);
+ if (next_cmd)
+ meson_mx_mmc_start_cmd(host->mmc, next_cmd);
+ else
+ meson_mx_mmc_request_done(host);
+
+ return IRQ_HANDLED;
+}
+
+static void meson_mx_mmc_timeout(struct timer_list *t)
+{
+ struct meson_mx_mmc_host *host = from_timer(host, t, cmd_timeout);
+ unsigned long irqflags;
+ u32 irqc;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ /* disable the CMD interrupt */
+ irqc = readl(host->base + MESON_MX_SDIO_IRQC);
+ irqc &= ~MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN;
+ writel(irqc, host->base + MESON_MX_SDIO_IRQC);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ /*
+ * skip the timeout handling if the interrupt handler already processed
+ * the command.
+ */
+ if (!host->cmd)
+ return;
+
+ dev_dbg(mmc_dev(host->mmc),
+ "Timeout on CMD%u (IRQS = 0x%08x, ARGU = 0x%08x)\n",
+ host->cmd->opcode, readl(host->base + MESON_MX_SDIO_IRQS),
+ readl(host->base + MESON_MX_SDIO_ARGU));
+
+ host->cmd->error = -ETIMEDOUT;
+
+ meson_mx_mmc_request_done(host);
+}
+
+static struct mmc_host_ops meson_mx_mmc_ops = {
+ .request = meson_mx_mmc_request,
+ .set_ios = meson_mx_mmc_set_ios,
+ .card_busy = meson_mx_mmc_card_busy,
+ .get_cd = mmc_gpio_get_cd,
+ .get_ro = mmc_gpio_get_ro,
+};
+
+static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent)
+{
+ struct device_node *slot_node;
+
+ /*
+ * TODO: the MMC core framework currently does not support
+ * controllers with multiple slots properly. So we only register
+ * the first slot for now
+ */
+ slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot");
+ if (!slot_node) {
+ dev_warn(parent, "no 'mmc-slot' sub-node found\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ return of_platform_device_create(slot_node, NULL, parent);
+}
+
+static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ struct device *slot_dev = mmc_dev(mmc);
+ int ret;
+
+ if (of_property_read_u32(slot_dev->of_node, "reg", &host->slot_id)) {
+ dev_err(slot_dev, "missing 'reg' property\n");
+ return -EINVAL;
+ }
+
+ if (host->slot_id >= MESON_MX_SDIO_MAX_SLOTS) {
+ dev_err(slot_dev, "invalid 'reg' property value %d\n",
+ host->slot_id);
+ return -EINVAL;
+ }
+
+ /* Get regulators and the supported OCR mask */
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret)
+ return ret;
+
+ mmc->max_req_size = MESON_MX_SDIO_BOUNCE_REQ_SIZE;
+ mmc->max_seg_size = mmc->max_req_size;
+ mmc->max_blk_count =
+ FIELD_GET(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK,
+ 0xffffffff);
+ mmc->max_blk_size = FIELD_GET(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK,
+ 0xffffffff);
+ mmc->max_blk_size -= (4 * MESON_MX_SDIO_RESPONSE_CRC16_BITS);
+ mmc->max_blk_size /= BITS_PER_BYTE;
+
+ /* Get the min and max supported clock rates */
+ mmc->f_min = clk_round_rate(host->cfg_div_clk, 1);
+ mmc->f_max = clk_round_rate(host->cfg_div_clk,
+ clk_get_rate(host->parent_clk));
+
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
+ mmc->ops = &meson_mx_mmc_ops;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ return ret;
+
+ ret = mmc_add_host(mmc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host)
+{
+ struct clk_init_data init;
+ const char *clk_div_parent, *clk_fixed_factor_parent;
+
+ clk_fixed_factor_parent = __clk_get_name(host->parent_clk);
+ init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
+ "%s#fixed_factor",
+ dev_name(host->controller_dev));
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = 0;
+ init.parent_names = &clk_fixed_factor_parent;
+ init.num_parents = 1;
+ host->fixed_factor.div = 2;
+ host->fixed_factor.mult = 1;
+ host->fixed_factor.hw.init = &init;
+
+ host->fixed_factor_clk = devm_clk_register(host->controller_dev,
+ &host->fixed_factor.hw);
+ if (WARN_ON(IS_ERR(host->fixed_factor_clk)))
+ return PTR_ERR(host->fixed_factor_clk);
+
+ clk_div_parent = __clk_get_name(host->fixed_factor_clk);
+ init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
+ "%s#div", dev_name(host->controller_dev));
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &clk_div_parent;
+ init.num_parents = 1;
+ host->cfg_div.reg = host->base + MESON_MX_SDIO_CONF;
+ host->cfg_div.shift = MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT;
+ host->cfg_div.width = MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH;
+ host->cfg_div.hw.init = &init;
+ host->cfg_div.flags = CLK_DIVIDER_ALLOW_ZERO;
+
+ host->cfg_div_clk = devm_clk_register(host->controller_dev,
+ &host->cfg_div.hw);
+ if (WARN_ON(IS_ERR(host->cfg_div_clk)))
+ return PTR_ERR(host->cfg_div_clk);
+
+ return 0;
+}
+
+static int meson_mx_mmc_probe(struct platform_device *pdev)
+{
+ struct platform_device *slot_pdev;
+ struct mmc_host *mmc;
+ struct meson_mx_mmc_host *host;
+ struct resource *res;
+ int ret, irq;
+ u32 conf;
+
+ slot_pdev = meson_mx_mmc_slot_pdev(&pdev->dev);
+ if (!slot_pdev)
+ return -ENODEV;
+ else if (IS_ERR(slot_pdev))
+ return PTR_ERR(slot_pdev);
+
+ mmc = mmc_alloc_host(sizeof(*host), &slot_pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto error_unregister_slot_pdev;
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->controller_dev = &pdev->dev;
+
+ spin_lock_init(&host->irq_lock);
+ timer_setup(&host->cmd_timeout, meson_mx_mmc_timeout, 0);
+
+ platform_set_drvdata(pdev, host);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->base = devm_ioremap_resource(host->controller_dev, res);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
+ goto error_free_mmc;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_threaded_irq(host->controller_dev, irq,
+ meson_mx_mmc_irq,
+ meson_mx_mmc_irq_thread, IRQF_ONESHOT,
+ NULL, host);
+ if (ret)
+ goto error_free_mmc;
+
+ host->core_clk = devm_clk_get(host->controller_dev, "core");
+ if (IS_ERR(host->core_clk)) {
+ ret = PTR_ERR(host->core_clk);
+ goto error_free_mmc;
+ }
+
+ host->parent_clk = devm_clk_get(host->controller_dev, "clkin");
+ if (IS_ERR(host->parent_clk)) {
+ ret = PTR_ERR(host->parent_clk);
+ goto error_free_mmc;
+ }
+
+ ret = meson_mx_mmc_register_clks(host);
+ if (ret)
+ goto error_free_mmc;
+
+ ret = clk_prepare_enable(host->core_clk);
+ if (ret) {
+ dev_err(host->controller_dev, "Failed to enable core clock\n");
+ goto error_free_mmc;
+ }
+
+ ret = clk_prepare_enable(host->cfg_div_clk);
+ if (ret) {
+ dev_err(host->controller_dev, "Failed to enable MMC clock\n");
+ goto error_disable_core_clk;
+ }
+
+ conf = 0;
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK, 39);
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_M_ENDIAN_MASK, 0x3);
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_NWR_MASK, 0x2);
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK, 0x2);
+ writel(conf, host->base + MESON_MX_SDIO_CONF);
+
+ meson_mx_mmc_soft_reset(host);
+
+ ret = meson_mx_mmc_add_host(host);
+ if (ret)
+ goto error_disable_clks;
+
+ return 0;
+
+error_disable_clks:
+ clk_disable_unprepare(host->cfg_div_clk);
+error_disable_core_clk:
+ clk_disable_unprepare(host->core_clk);
+error_free_mmc:
+ mmc_free_host(mmc);
+error_unregister_slot_pdev:
+ of_platform_device_destroy(&slot_pdev->dev, NULL);
+ return ret;
+}
+
+static int meson_mx_mmc_remove(struct platform_device *pdev)
+{
+ struct meson_mx_mmc_host *host = platform_get_drvdata(pdev);
+ struct device *slot_dev = mmc_dev(host->mmc);
+
+ del_timer_sync(&host->cmd_timeout);
+
+ mmc_remove_host(host->mmc);
+
+ of_platform_device_destroy(slot_dev, NULL);
+
+ clk_disable_unprepare(host->cfg_div_clk);
+ clk_disable_unprepare(host->core_clk);
+
+ mmc_free_host(host->mmc);
+
+ return 0;
+}
+
+static const struct of_device_id meson_mx_mmc_of_match[] = {
+ { .compatible = "amlogic,meson8-sdio", },
+ { .compatible = "amlogic,meson8b-sdio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, meson_mx_mmc_of_match);
+
+static struct platform_driver meson_mx_mmc_driver = {
+ .probe = meson_mx_mmc_probe,
+ .remove = meson_mx_mmc_remove,
+ .driver = {
+ .name = "meson-mx-sdio",
+ .of_match_table = of_match_ptr(meson_mx_mmc_of_match),
+ },
+};
+
+module_platform_driver(meson_mx_mmc_driver);
+
+MODULE_DESCRIPTION("Meson6, Meson8 and Meson8b SDIO/MMC Host Driver");
+MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index f1f54a818489..e8a1bb1ae694 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1658,7 +1658,7 @@ static int mmci_probe(struct amba_device *dev,
/* Get regulators and the supported OCR mask */
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto clk_disable;
if (!mmc->ocr_avail)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 267f7ab08420..6457a7d8880f 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -67,6 +67,7 @@
#define SDC_RESP2 0x48
#define SDC_RESP3 0x4c
#define SDC_BLK_NUM 0x50
+#define SDC_ADV_CFG0 0x64
#define EMMC_IOCON 0x7c
#define SDC_ACMD_RESP 0x80
#define MSDC_DMA_SA 0x90
@@ -74,10 +75,14 @@
#define MSDC_DMA_CFG 0x9c
#define MSDC_PATCH_BIT 0xb0
#define MSDC_PATCH_BIT1 0xb4
+#define MSDC_PATCH_BIT2 0xb8
#define MSDC_PAD_TUNE 0xec
+#define MSDC_PAD_TUNE0 0xf0
#define PAD_DS_TUNE 0x188
#define PAD_CMD_TUNE 0x18c
#define EMMC50_CFG0 0x208
+#define EMMC50_CFG3 0x220
+#define SDC_FIFO_CFG 0x228
/*--------------------------------------------------------------------------*/
/* Register Mask */
@@ -95,6 +100,9 @@
#define MSDC_CFG_CKDIV (0xff << 8) /* RW */
#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */
#define MSDC_CFG_HS400_CK_MODE (0x1 << 18) /* RW */
+#define MSDC_CFG_HS400_CK_MODE_EXTRA (0x1 << 22) /* RW */
+#define MSDC_CFG_CKDIV_EXTRA (0xfff << 8) /* RW */
+#define MSDC_CFG_CKMOD_EXTRA (0x3 << 20) /* RW */
/* MSDC_IOCON mask */
#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */
@@ -183,6 +191,9 @@
#define SDC_STS_CMDBUSY (0x1 << 1) /* RW */
#define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */
+/* SDC_ADV_CFG0 mask */
+#define SDC_RX_ENHANCE_EN (0x1 << 20) /* RW */
+
/* MSDC_DMA_CTRL mask */
#define MSDC_DMA_CTRL_START (0x1 << 0) /* W */
#define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */
@@ -212,11 +223,22 @@
#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
+#define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */
+
+#define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */
+#define MSDC_PATCH_BIT2_CFGCRCSTS (0x1 << 28) /* RW */
+#define MSDC_PB2_RESPWAIT (0x3 << 2) /* RW */
+#define MSDC_PB2_RESPSTSENSEL (0x7 << 16) /* RW */
+#define MSDC_PB2_CRCSTSENSEL (0x7 << 29) /* RW */
+
#define MSDC_PAD_TUNE_DATWRDLY (0x1f << 0) /* RW */
#define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */
#define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */
#define MSDC_PAD_TUNE_CMDRRDLY (0x1f << 22) /* RW */
#define MSDC_PAD_TUNE_CLKTDLY (0x1f << 27) /* RW */
+#define MSDC_PAD_TUNE_RXDLYSEL (0x1 << 15) /* RW */
+#define MSDC_PAD_TUNE_RD_SEL (0x1 << 13) /* RW */
+#define MSDC_PAD_TUNE_CMD_SEL (0x1 << 21) /* RW */
#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */
#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */
@@ -228,6 +250,11 @@
#define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */
#define EMMC50_CFG_CFCSTS_SEL (0x1 << 4) /* RW */
+#define EMMC50_CFG3_OUTS_WR (0x1f << 0) /* RW */
+
+#define SDC_FIFO_CFG_WRVALIDSEL (0x1 << 24) /* RW */
+#define SDC_FIFO_CFG_RDVALIDSEL (0x1 << 25) /* RW */
+
#define REQ_CMD_EIO (0x1 << 0)
#define REQ_CMD_TMO (0x1 << 1)
#define REQ_DAT_ERR (0x1 << 2)
@@ -290,9 +317,23 @@ struct msdc_save_para {
u32 pad_tune;
u32 patch_bit0;
u32 patch_bit1;
+ u32 patch_bit2;
u32 pad_ds_tune;
u32 pad_cmd_tune;
u32 emmc50_cfg0;
+ u32 emmc50_cfg3;
+ u32 sdc_fifo_cfg;
+};
+
+struct mtk_mmc_compatible {
+ u8 clk_div_bits;
+ bool hs400_tune; /* only used for MT8173 */
+ u32 pad_tune_reg;
+ bool async_fifo;
+ bool data_tune;
+ bool busy_check;
+ bool stop_clk_fix;
+ bool enhance_rx;
};
struct msdc_tune_para {
@@ -309,6 +350,7 @@ struct msdc_delay_phase {
struct msdc_host {
struct device *dev;
+ const struct mtk_mmc_compatible *dev_comp;
struct mmc_host *mmc; /* mmc structure */
int cmd_rsp;
@@ -334,11 +376,13 @@ struct msdc_host {
struct clk *src_clk; /* msdc source clock */
struct clk *h_clk; /* msdc h_clk */
+ struct clk *src_clk_cg; /* msdc source clock control gate */
u32 mclk; /* mmc subsystem clock frequency */
u32 src_clk_freq; /* source clock frequency */
u32 sclk; /* SD/MS bus clock frequency */
unsigned char timing;
bool vqmmc_enabled;
+ u32 latch_ck;
u32 hs400_ds_delay;
u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */
u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */
@@ -350,6 +394,59 @@ struct msdc_host {
struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */
};
+static const struct mtk_mmc_compatible mt8135_compat = {
+ .clk_div_bits = 8,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+};
+
+static const struct mtk_mmc_compatible mt8173_compat = {
+ .clk_div_bits = 8,
+ .hs400_tune = true,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+};
+
+static const struct mtk_mmc_compatible mt2701_compat = {
+ .clk_div_bits = 12,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+};
+
+static const struct mtk_mmc_compatible mt2712_compat = {
+ .clk_div_bits = 12,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
+ .busy_check = true,
+ .stop_clk_fix = true,
+ .enhance_rx = true,
+};
+
+static const struct of_device_id msdc_of_ids[] = {
+ { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
+ { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
+ { .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat},
+ { .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat},
+ {}
+};
+MODULE_DEVICE_TABLE(of, msdc_of_ids);
+
static void sdr_set_bits(void __iomem *reg, u32 bs)
{
u32 val = readl(reg);
@@ -509,7 +606,12 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
timeout = (ns + clk_ns - 1) / clk_ns + clks;
/* in 1048576 sclk cycle unit */
timeout = (timeout + (0x1 << 20) - 1) >> 20;
- sdr_get_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD, &mode);
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_get_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD, &mode);
+ else
+ sdr_get_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD_EXTRA, &mode);
/*DDR mode will double the clk cycles for data timeout */
timeout = mode >= 2 ? timeout * 2 : timeout;
timeout = timeout > 1 ? timeout - 1 : 0;
@@ -520,6 +622,7 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
static void msdc_gate_clock(struct msdc_host *host)
{
+ clk_disable_unprepare(host->src_clk_cg);
clk_disable_unprepare(host->src_clk);
clk_disable_unprepare(host->h_clk);
}
@@ -528,6 +631,7 @@ static void msdc_ungate_clock(struct msdc_host *host)
{
clk_prepare_enable(host->h_clk);
clk_prepare_enable(host->src_clk);
+ clk_prepare_enable(host->src_clk_cg);
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
}
@@ -538,6 +642,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
u32 flags;
u32 div;
u32 sclk;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
if (!hz) {
dev_dbg(host->dev, "set mclk to 0\n");
@@ -548,7 +653,11 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
flags = readl(host->base + MSDC_INTEN);
sdr_clr_bits(host->base + MSDC_INTEN, flags);
- sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
+ else
+ sdr_clr_bits(host->base + MSDC_CFG,
+ MSDC_CFG_HS400_CK_MODE_EXTRA);
if (timing == MMC_TIMING_UHS_DDR50 ||
timing == MMC_TIMING_MMC_DDR52 ||
timing == MMC_TIMING_MMC_HS400) {
@@ -568,8 +677,12 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
if (timing == MMC_TIMING_MMC_HS400 &&
hz >= (host->src_clk_freq >> 1)) {
- sdr_set_bits(host->base + MSDC_CFG,
- MSDC_CFG_HS400_CK_MODE);
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_set_bits(host->base + MSDC_CFG,
+ MSDC_CFG_HS400_CK_MODE);
+ else
+ sdr_set_bits(host->base + MSDC_CFG,
+ MSDC_CFG_HS400_CK_MODE_EXTRA);
sclk = host->src_clk_freq >> 1;
div = 0; /* div is ignore when bit18 is set */
}
@@ -587,11 +700,31 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
sclk = (host->src_clk_freq >> 2) / div;
}
}
- sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
- (mode << 8) | div);
- sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
+ sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
+ /*
+ * As src_clk/HCLK use the same bit to gate/ungate,
+ * So if want to only gate src_clk, need gate its parent(mux).
+ */
+ if (host->src_clk_cg)
+ clk_disable_unprepare(host->src_clk_cg);
+ else
+ clk_disable_unprepare(clk_get_parent(host->src_clk));
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_set_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
+ (mode << 8) | div);
+ else
+ sdr_set_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD_EXTRA | MSDC_CFG_CKDIV_EXTRA,
+ (mode << 12) | div);
+ if (host->src_clk_cg)
+ clk_prepare_enable(host->src_clk_cg);
+ else
+ clk_prepare_enable(clk_get_parent(host->src_clk));
+
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
+ sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
host->sclk = sclk;
host->mclk = hz;
host->timing = timing;
@@ -605,15 +738,16 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
*/
if (host->sclk <= 52000000) {
writel(host->def_tune_para.iocon, host->base + MSDC_IOCON);
- writel(host->def_tune_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ writel(host->def_tune_para.pad_tune, host->base + tune_reg);
} else {
writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON);
- writel(host->saved_tune_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ writel(host->saved_tune_para.pad_tune, host->base + tune_reg);
writel(host->saved_tune_para.pad_cmd_tune,
host->base + PAD_CMD_TUNE);
}
- if (timing == MMC_TIMING_MMC_HS400)
+ if (timing == MMC_TIMING_MMC_HS400 &&
+ host->dev_comp->hs400_tune)
sdr_set_field(host->base + PAD_CMD_TUNE,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs400_cmd_int_delay);
@@ -1165,6 +1299,7 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
static void msdc_init_hw(struct msdc_host *host)
{
u32 val;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
/* Configure to MMC/SD mode, clock free running */
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN);
@@ -1180,14 +1315,53 @@ static void msdc_init_hw(struct msdc_host *host)
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
- writel(0, host->base + MSDC_PAD_TUNE);
+ writel(0, host->base + tune_reg);
writel(0, host->base + MSDC_IOCON);
sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
writel(0x403c0046, host->base + MSDC_PATCH_BIT);
sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1);
- writel(0xffff0089, host->base + MSDC_PATCH_BIT1);
+ writel(0xffff4089, host->base + MSDC_PATCH_BIT1);
sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL);
+ if (host->dev_comp->stop_clk_fix) {
+ sdr_set_field(host->base + MSDC_PATCH_BIT1,
+ MSDC_PATCH_BIT1_STOP_DLY, 3);
+ sdr_clr_bits(host->base + SDC_FIFO_CFG,
+ SDC_FIFO_CFG_WRVALIDSEL);
+ sdr_clr_bits(host->base + SDC_FIFO_CFG,
+ SDC_FIFO_CFG_RDVALIDSEL);
+ }
+
+ if (host->dev_comp->busy_check)
+ sdr_clr_bits(host->base + MSDC_PATCH_BIT1, (1 << 7));
+
+ if (host->dev_comp->async_fifo) {
+ sdr_set_field(host->base + MSDC_PATCH_BIT2,
+ MSDC_PB2_RESPWAIT, 3);
+ if (host->dev_comp->enhance_rx) {
+ sdr_set_bits(host->base + SDC_ADV_CFG0,
+ SDC_RX_ENHANCE_EN);
+ } else {
+ sdr_set_field(host->base + MSDC_PATCH_BIT2,
+ MSDC_PB2_RESPSTSENSEL, 2);
+ sdr_set_field(host->base + MSDC_PATCH_BIT2,
+ MSDC_PB2_CRCSTSENSEL, 2);
+ }
+ /* use async fifo, then no need tune internal delay */
+ sdr_clr_bits(host->base + MSDC_PATCH_BIT2,
+ MSDC_PATCH_BIT2_CFGRESP);
+ sdr_set_bits(host->base + MSDC_PATCH_BIT2,
+ MSDC_PATCH_BIT2_CFGCRCSTS);
+ }
+
+ if (host->dev_comp->data_tune) {
+ sdr_set_bits(host->base + tune_reg,
+ MSDC_PAD_TUNE_RD_SEL | MSDC_PAD_TUNE_CMD_SEL);
+ } else {
+ /* choose clock tune */
+ sdr_set_bits(host->base + tune_reg, MSDC_PAD_TUNE_RXDLYSEL);
+ }
+
/* Configure to enable SDIO mode.
* it's must otherwise sdio cmd5 failed
*/
@@ -1200,7 +1374,9 @@ static void msdc_init_hw(struct msdc_host *host)
sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
host->def_tune_para.iocon = readl(host->base + MSDC_IOCON);
- host->def_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
+ host->def_tune_para.pad_tune = readl(host->base + tune_reg);
+ host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
+ host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
dev_dbg(host->dev, "init hardware done!");
}
@@ -1343,18 +1519,19 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
struct msdc_delay_phase internal_delay_phase;
u8 final_delay, final_maxlen;
u32 internal_delay = 0;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
int cmd_err;
int i, j;
if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
mmc->ios.timing == MMC_TIMING_UHS_SDR104)
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs200_cmd_int_delay);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRDLY, i);
/*
* Using the same parameters, it may sometimes pass the test,
@@ -1373,12 +1550,13 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
}
final_rise_delay = get_best_delay(host, rise_delay);
/* if rising edge has enough margin, then do not scan falling edge */
- if (final_rise_delay.maxlen >= 12 && final_rise_delay.start < 4)
+ if (final_rise_delay.maxlen >= 12 ||
+ (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
goto skip_fall;
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRDLY, i);
/*
* Using the same parameters, it may sometimes pass the test,
@@ -1403,20 +1581,20 @@ skip_fall:
final_maxlen = final_fall_delay.maxlen;
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
final_rise_delay.final_phase);
final_delay = final_rise_delay.final_phase;
} else {
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
final_fall_delay.final_phase);
final_delay = final_fall_delay.final_phase;
}
- if (host->hs200_cmd_int_delay)
+ if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay)
goto skip_internal;
for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY, i);
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err)
@@ -1424,7 +1602,7 @@ skip_fall:
}
dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay);
internal_delay_phase = get_best_delay(host, internal_delay);
- sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY,
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY,
internal_delay_phase.final_phase);
skip_internal:
dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay);
@@ -1486,12 +1664,15 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
u32 rise_delay = 0, fall_delay = 0;
struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
u8 final_delay, final_maxlen;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
int i, ret;
+ sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL,
+ host->latch_ck);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
@@ -1506,7 +1687,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
@@ -1519,14 +1700,14 @@ skip_fall:
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY,
final_rise_delay.final_phase);
final_delay = final_rise_delay.final_phase;
} else {
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY,
final_fall_delay.final_phase);
final_delay = final_fall_delay.final_phase;
@@ -1540,8 +1721,10 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
int ret;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
- if (host->hs400_mode)
+ if (host->hs400_mode &&
+ host->dev_comp->hs400_tune)
ret = hs400_tune_response(mmc, opcode);
else
ret = msdc_tune_response(mmc, opcode);
@@ -1556,7 +1739,7 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
}
host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
- host->saved_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
+ host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
return ret;
}
@@ -1567,6 +1750,11 @@ static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
host->hs400_mode = true;
writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
+ /* hs400 mode must set it to 0 */
+ sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
+ /* to improve read performance, set outstanding to 2 */
+ sdr_set_field(host->base + EMMC50_CFG3, EMMC50_CFG3_OUTS_WR, 2);
+
return 0;
}
@@ -1596,6 +1784,9 @@ static const struct mmc_host_ops mt_msdc_ops = {
static void msdc_of_property_parse(struct platform_device *pdev,
struct msdc_host *host)
{
+ of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck",
+ &host->latch_ck);
+
of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
&host->hs400_ds_delay);
@@ -1617,12 +1808,17 @@ static int msdc_drv_probe(struct platform_device *pdev)
struct mmc_host *mmc;
struct msdc_host *host;
struct resource *res;
+ const struct of_device_id *of_id;
int ret;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No DT found\n");
return -EINVAL;
}
+
+ of_id = of_match_node(msdc_of_ids, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
/* Allocate MMC host for this device */
mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
if (!mmc)
@@ -1641,7 +1837,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
}
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto host_free;
host->src_clk = devm_clk_get(&pdev->dev, "source");
@@ -1656,6 +1852,11 @@ static int msdc_drv_probe(struct platform_device *pdev)
goto host_free;
}
+ /*source clock control gate is optional clock*/
+ host->src_clk_cg = devm_clk_get(&pdev->dev, "source_cg");
+ if (IS_ERR(host->src_clk_cg))
+ host->src_clk_cg = NULL;
+
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
ret = -EINVAL;
@@ -1686,11 +1887,15 @@ static int msdc_drv_probe(struct platform_device *pdev)
msdc_of_property_parse(pdev, host);
host->dev = &pdev->dev;
+ host->dev_comp = of_id->data;
host->mmc = mmc;
host->src_clk_freq = clk_get_rate(host->src_clk);
/* Set host parameters to mmc */
mmc->ops = &mt_msdc_ops;
- mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
+ if (host->dev_comp->clk_div_bits == 8)
+ mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
+ else
+ mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095);
mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
/* MMC core transfer sizes tunable parameters */
@@ -1788,28 +1993,38 @@ static int msdc_drv_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static void msdc_save_reg(struct msdc_host *host)
{
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
+
host->save_para.msdc_cfg = readl(host->base + MSDC_CFG);
host->save_para.iocon = readl(host->base + MSDC_IOCON);
host->save_para.sdc_cfg = readl(host->base + SDC_CFG);
- host->save_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
+ host->save_para.pad_tune = readl(host->base + tune_reg);
host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT);
host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1);
+ host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2);
host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE);
host->save_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0);
+ host->save_para.emmc50_cfg3 = readl(host->base + EMMC50_CFG3);
+ host->save_para.sdc_fifo_cfg = readl(host->base + SDC_FIFO_CFG);
}
static void msdc_restore_reg(struct msdc_host *host)
{
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
+
writel(host->save_para.msdc_cfg, host->base + MSDC_CFG);
writel(host->save_para.iocon, host->base + MSDC_IOCON);
writel(host->save_para.sdc_cfg, host->base + SDC_CFG);
- writel(host->save_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ writel(host->save_para.pad_tune, host->base + tune_reg);
writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT);
writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1);
+ writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2);
writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE);
writel(host->save_para.pad_cmd_tune, host->base + PAD_CMD_TUNE);
writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0);
+ writel(host->save_para.emmc50_cfg3, host->base + EMMC50_CFG3);
+ writel(host->save_para.sdc_fifo_cfg, host->base + SDC_FIFO_CFG);
}
static int msdc_runtime_suspend(struct device *dev)
@@ -1839,12 +2054,6 @@ static const struct dev_pm_ops msdc_dev_pm_ops = {
SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
};
-static const struct of_device_id msdc_of_ids[] = {
- { .compatible = "mediatek,mt8135-mmc", },
- {}
-};
-MODULE_DEVICE_TABLE(of, msdc_of_ids);
-
static struct platform_driver mt_msdc_driver = {
.probe = msdc_drv_probe,
.remove = msdc_drv_remove,
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 58d74b8d6c79..210247b3d11a 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -508,9 +508,9 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
return IRQ_NONE;
}
-static void mvsd_timeout_timer(unsigned long data)
+static void mvsd_timeout_timer(struct timer_list *t)
{
- struct mvsd_host *host = (struct mvsd_host *)data;
+ struct mvsd_host *host = from_timer(host, t, timer);
void __iomem *iobase = host->base;
struct mmc_request *mrq;
unsigned long flags;
@@ -776,7 +776,7 @@ static int mvsd_probe(struct platform_device *pdev)
goto out;
}
- setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host);
+ timer_setup(&host->timer, mvsd_timeout_timer, 0);
platform_set_drvdata(pdev, mmc);
ret = mmc_add_host(mmc);
if (ret)
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 1d5418e4efae..5ff8ef7223cc 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -963,10 +963,9 @@ static bool filter(struct dma_chan *chan, void *param)
return true;
}
-static void mxcmci_watchdog(unsigned long data)
+static void mxcmci_watchdog(struct timer_list *t)
{
- struct mmc_host *mmc = (struct mmc_host *)data;
- struct mxcmci_host *host = mmc_priv(mmc);
+ struct mxcmci_host *host = from_timer(host, t, watchdog);
struct mmc_request *req = host->req;
unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS);
@@ -1075,7 +1074,7 @@ static int mxcmci_probe(struct platform_device *pdev)
dat3_card_detect = true;
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto out_free;
if (!mmc->ocr_avail) {
@@ -1165,9 +1164,7 @@ static int mxcmci_probe(struct platform_device *pdev)
goto out_free_dma;
}
- init_timer(&host->watchdog);
- host->watchdog.function = &mxcmci_watchdog;
- host->watchdog.data = (unsigned long)mmc;
+ timer_setup(&host->watchdog, mxcmci_watchdog, 0);
mmc_add_host(mmc);
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index bd49f34d7654..adf32682f27a 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -625,9 +625,9 @@ static void mmc_omap_abort_command(struct work_struct *work)
}
static void
-mmc_omap_cmd_timer(unsigned long data)
+mmc_omap_cmd_timer(struct timer_list *t)
{
- struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+ struct mmc_omap_host *host = from_timer(host, t, cmd_abort_timer);
unsigned long flags;
spin_lock_irqsave(&host->slot_lock, flags);
@@ -654,9 +654,9 @@ mmc_omap_sg_to_buf(struct mmc_omap_host *host)
}
static void
-mmc_omap_clk_timer(unsigned long data)
+mmc_omap_clk_timer(struct timer_list *t)
{
- struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+ struct mmc_omap_host *host = from_timer(host, t, clk_timer);
mmc_omap_fclk_enable(host, 0);
}
@@ -874,9 +874,9 @@ void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed)
tasklet_hi_schedule(&slot->cover_tasklet);
}
-static void mmc_omap_cover_timer(unsigned long arg)
+static void mmc_omap_cover_timer(struct timer_list *t)
{
- struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg;
+ struct mmc_omap_slot *slot = from_timer(slot, t, cover_timer);
tasklet_schedule(&slot->cover_tasklet);
}
@@ -1264,8 +1264,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
mmc->max_seg_size = mmc->max_req_size;
if (slot->pdata->get_cover_state != NULL) {
- setup_timer(&slot->cover_timer, mmc_omap_cover_timer,
- (unsigned long)slot);
+ timer_setup(&slot->cover_timer, mmc_omap_cover_timer, 0);
tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
(unsigned long)slot);
}
@@ -1352,11 +1351,10 @@ static int mmc_omap_probe(struct platform_device *pdev)
INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
- setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
- (unsigned long) host);
+ timer_setup(&host->cmd_abort_timer, mmc_omap_cmd_timer, 0);
spin_lock_init(&host->clk_lock);
- setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
+ timer_setup(&host->clk_timer, mmc_omap_clk_timer, 0);
spin_lock_init(&host->dma_lock);
spin_lock_init(&host->slot_lock);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 3b5e6d11069b..071693ebfe18 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -147,10 +147,6 @@
#define OMAP_MMC_MAX_CLOCK 52000000
#define DRIVER_NAME "omap_hsmmc"
-#define VDD_1V8 1800000 /* 180000 uV */
-#define VDD_3V0 3000000 /* 300000 uV */
-#define VDD_165_195 (ffs(MMC_VDD_165_195) - 1)
-
/*
* One controller can have multiple slots, like on some omap boards using
* omap.c controller driver. Luckily this is not currently done on any known
@@ -308,8 +304,7 @@ err_set_ocr:
return ret;
}
-static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
- int vdd)
+static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on)
{
int ret;
@@ -317,17 +312,6 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
return 0;
if (power_on) {
- if (vdd <= VDD_165_195)
- ret = regulator_set_voltage(host->pbias, VDD_1V8,
- VDD_1V8);
- else
- ret = regulator_set_voltage(host->pbias, VDD_3V0,
- VDD_3V0);
- if (ret < 0) {
- dev_err(host->dev, "pbias set voltage fail\n");
- return ret;
- }
-
if (host->pbias_enabled == 0) {
ret = regulator_enable(host->pbias);
if (ret) {
@@ -350,8 +334,7 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
return 0;
}
-static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
- int vdd)
+static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on)
{
struct mmc_host *mmc = host->mmc;
int ret = 0;
@@ -363,7 +346,7 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
if (IS_ERR(mmc->supply.vmmc))
return 0;
- ret = omap_hsmmc_set_pbias(host, false, 0);
+ ret = omap_hsmmc_set_pbias(host, false);
if (ret)
return ret;
@@ -385,7 +368,7 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
if (ret)
return ret;
- ret = omap_hsmmc_set_pbias(host, true, vdd);
+ ret = omap_hsmmc_set_pbias(host, true);
if (ret)
goto err_set_voltage;
} else {
@@ -462,7 +445,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
return ret;
/* Allow an aux regulator */
@@ -1220,11 +1203,11 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
clk_disable_unprepare(host->dbclk);
/* Turn the power off */
- ret = omap_hsmmc_set_power(host, 0, 0);
+ ret = omap_hsmmc_set_power(host, 0);
/* Turn the power ON with given VDD 1.8 or 3.0v */
if (!ret)
- ret = omap_hsmmc_set_power(host, 1, vdd);
+ ret = omap_hsmmc_set_power(host, 1);
if (host->dbclk)
clk_prepare_enable(host->dbclk);
@@ -1621,10 +1604,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->power_mode != host->power_mode) {
switch (ios->power_mode) {
case MMC_POWER_OFF:
- omap_hsmmc_set_power(host, 0, 0);
+ omap_hsmmc_set_power(host, 0);
break;
case MMC_POWER_UP:
- omap_hsmmc_set_power(host, 1, ios->vdd);
+ omap_hsmmc_set_power(host, 1);
break;
case MMC_POWER_ON:
do_send_init_stream = 1;
diff --git a/drivers/mmc/host/pxamci.h b/drivers/mmc/host/pxamci.h
index f6c2e2fcce37..d301ca18c5d4 100644
--- a/drivers/mmc/host/pxamci.h
+++ b/drivers/mmc/host/pxamci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define MMC_STRPCL 0x0000
#define STOP_CLOCK (1 << 0)
#define START_CLOCK (2 << 0)
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index f905f2361d12..41cbe84c1d18 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -88,6 +88,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
{},
};
MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match);
@@ -146,11 +147,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
WARN_ON(host->sg_len > 1);
/* This DMAC cannot handle if buffer is not 8-bytes alignment */
- if (!IS_ALIGNED(sg->offset, 8)) {
- host->force_pio = true;
- renesas_sdhi_internal_dmac_enable_dma(host, false);
- return;
- }
+ if (!IS_ALIGNED(sg->offset, 8))
+ goto force_pio;
if (data->flags & MMC_DATA_READ) {
dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
@@ -163,8 +161,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
}
ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, dir);
- if (ret < 0)
- return;
+ if (ret == 0)
+ goto force_pio;
renesas_sdhi_internal_dmac_enable_dma(host, true);
@@ -176,6 +174,12 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
dtran_mode);
renesas_sdhi_internal_dmac_dm_write(host, DM_DTRAN_ADDR,
sg->dma_address);
+
+ return;
+
+force_pio:
+ host->force_pio = true;
+ renesas_sdhi_internal_dmac_enable_dma(host, false);
}
static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index df4465439e13..9ab10436e4b8 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -91,7 +91,6 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
};
static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
- { .compatible = "renesas,sdhi-shmobile" },
{ .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
@@ -107,6 +106,10 @@ static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,rcar-gen1-sdhi", .data = &of_rcar_gen1_compatible, },
+ { .compatible = "renesas,rcar-gen2-sdhi", .data = &of_rcar_gen2_compatible, },
+ { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,sdhi-shmobile" },
{},
};
MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match);
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 41b57713b620..0848dc0f882e 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -618,29 +618,22 @@ static int sd_change_phase(struct realtek_pci_sdmmc *host,
u8 sample_point, bool rx)
{
struct rtsx_pcr *pcr = host->pcr;
- int err;
dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n",
__func__, rx ? "RX" : "TX", sample_point);
- rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
+ rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
if (rx)
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- SD_VPRX_CTL, 0x1F, sample_point);
+ rtsx_pci_write_register(pcr, SD_VPRX_CTL,
+ PHASE_SELECT_MASK, sample_point);
else
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- SD_VPTX_CTL, 0x1F, sample_point);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
- PHASE_NOT_RESET, PHASE_NOT_RESET);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, 0);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
-
- err = rtsx_pci_send_cmd(pcr, 100);
- if (err < 0)
- return err;
+ rtsx_pci_write_register(pcr, SD_VPTX_CTL,
+ PHASE_SELECT_MASK, sample_point);
+ rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
+ rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET,
+ PHASE_NOT_RESET);
+ rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0);
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
return 0;
}
@@ -708,10 +701,12 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host,
{
int err;
struct mmc_command cmd = {};
+ struct rtsx_pcr *pcr = host->pcr;
- err = sd_change_phase(host, sample_point, true);
- if (err < 0)
- return err;
+ sd_change_phase(host, sample_point, true);
+
+ rtsx_pci_write_register(pcr, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
+ SD_RSP_80CLK_TIMEOUT_EN);
cmd.opcode = opcode;
err = sd_read_data(host, &cmd, 0x40, NULL, 0, 100);
@@ -719,9 +714,12 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host,
/* Wait till SD DATA IDLE */
sd_wait_data_idle(host);
sd_clear_error(host);
+ rtsx_pci_write_register(pcr, SD_CFG3,
+ SD_RSP_80CLK_TIMEOUT_EN, 0);
return err;
}
+ rtsx_pci_write_register(pcr, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
return 0;
}
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 08ae0ff13513..b988997a1e80 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -73,6 +73,7 @@ struct sdhci_acpi_slot {
unsigned int caps2;
mmc_pm_flag_t pm_caps;
unsigned int flags;
+ size_t priv_size;
int (*probe_slot)(struct platform_device *, const char *, const char *);
int (*remove_slot)(struct platform_device *);
};
@@ -82,13 +83,118 @@ struct sdhci_acpi_host {
const struct sdhci_acpi_slot *slot;
struct platform_device *pdev;
bool use_runtime_pm;
+ unsigned long private[0] ____cacheline_aligned;
};
+static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c)
+{
+ return (void *)c->private;
+}
+
static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
{
return c->slot && (c->slot->flags & flag);
}
+enum {
+ INTEL_DSM_FNS = 0,
+ INTEL_DSM_V18_SWITCH = 3,
+ INTEL_DSM_V33_SWITCH = 4,
+};
+
+struct intel_host {
+ u32 dsm_fns;
+};
+
+static const guid_t intel_dsm_guid =
+ GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
+ 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
+
+static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
+ unsigned int fn, u32 *result)
+{
+ union acpi_object *obj;
+ int err = 0;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
+ if (!obj)
+ return -EOPNOTSUPP;
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ *result = obj->integer.value;
+ } else if (obj->type == ACPI_TYPE_BUFFER && obj->buffer.length > 0) {
+ size_t len = min_t(size_t, obj->buffer.length, 4);
+
+ *result = 0;
+ memcpy(result, obj->buffer.pointer, len);
+ } else {
+ dev_err(dev, "%s DSM fn %u obj->type %d obj->buffer.length %d\n",
+ __func__, fn, obj->type, obj->buffer.length);
+ err = -EINVAL;
+ }
+
+ ACPI_FREE(obj);
+
+ return err;
+}
+
+static int intel_dsm(struct intel_host *intel_host, struct device *dev,
+ unsigned int fn, u32 *result)
+{
+ if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
+ return -EOPNOTSUPP;
+
+ return __intel_dsm(intel_host, dev, fn, result);
+}
+
+static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
+ struct mmc_host *mmc)
+{
+ int err;
+
+ err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
+ if (err) {
+ pr_debug("%s: DSM not supported, error %d\n",
+ mmc_hostname(mmc), err);
+ return;
+ }
+
+ pr_debug("%s: DSM function mask %#x\n",
+ mmc_hostname(mmc), intel_host->dsm_fns);
+}
+
+static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct device *dev = mmc_dev(mmc);
+ struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ struct intel_host *intel_host = sdhci_acpi_priv(c);
+ unsigned int fn;
+ u32 result = 0;
+ int err;
+
+ err = sdhci_start_signal_voltage_switch(mmc, ios);
+ if (err)
+ return err;
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ fn = INTEL_DSM_V33_SWITCH;
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ fn = INTEL_DSM_V18_SWITCH;
+ break;
+ default:
+ return 0;
+ }
+
+ err = intel_dsm(intel_host, dev, fn, &result);
+ pr_debug("%s: %s DSM fn %u error %d result %u\n",
+ mmc_hostname(mmc), __func__, fn, err, result);
+
+ return 0;
+}
+
static void sdhci_acpi_int_hw_reset(struct sdhci_host *host)
{
u8 reg;
@@ -269,56 +375,26 @@ out:
return ret;
}
-static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
+static int intel_probe_slot(struct platform_device *pdev, const char *hid,
+ const char *uid)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
- struct sdhci_host *host;
-
- if (!c || !c->host)
- return 0;
-
- host = c->host;
-
- /* Platform specific code during emmc probe slot goes here */
+ struct intel_host *intel_host = sdhci_acpi_priv(c);
+ struct sdhci_host *host = c->host;
if (hid && uid && !strcmp(hid, "80860F14") && !strcmp(uid, "1") &&
sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 &&
sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807)
host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
- return 0;
-}
-
-static int sdhci_acpi_sdio_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
-{
- struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
-
- if (!c || !c->host)
- return 0;
-
- /* Platform specific code during sdio probe slot goes here */
-
- return 0;
-}
-
-static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
-{
- struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
- struct sdhci_host *host;
-
- if (!c || !c->host || !c->slot)
- return 0;
-
- host = c->host;
-
- /* Platform specific code during sd probe slot goes here */
-
if (hid && !strcmp(hid, "80865ACA"))
host->mmc_host_ops.get_cd = bxt_get_cd;
+ intel_dsm_init(intel_host, &pdev->dev, host->mmc);
+
+ host->mmc_host_ops.start_signal_voltage_switch =
+ intel_start_signal_voltage_switch;
+
return 0;
}
@@ -332,7 +408,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_STOP_WITH_TC |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
- .probe_slot = sdhci_acpi_emmc_probe_slot,
+ .probe_slot = intel_probe_slot,
+ .priv_size = sizeof(struct intel_host),
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
@@ -343,7 +420,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
MMC_CAP_WAIT_WHILE_BUSY,
.flags = SDHCI_ACPI_RUNTIME_PM,
.pm_caps = MMC_PM_KEEP_POWER,
- .probe_slot = sdhci_acpi_sdio_probe_slot,
+ .probe_slot = intel_probe_slot,
+ .priv_size = sizeof(struct intel_host),
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
@@ -353,7 +431,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
SDHCI_QUIRK2_STOP_WITH_TC,
.caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM,
- .probe_slot = sdhci_acpi_sd_probe_slot,
+ .probe_slot = intel_probe_slot,
+ .priv_size = sizeof(struct intel_host),
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd_3v = {
@@ -429,11 +508,13 @@ static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid,
static int sdhci_acpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct sdhci_acpi_slot *slot;
struct acpi_device *device, *child;
struct sdhci_acpi_host *c;
struct sdhci_host *host;
struct resource *iomem;
resource_size_t len;
+ size_t priv_size;
const char *hid;
const char *uid;
int err;
@@ -443,7 +524,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
return -ENODEV;
hid = acpi_device_hid(device);
- uid = device->pnp.unique_id;
+ uid = acpi_device_uid(device);
+
+ slot = sdhci_acpi_get_slot(hid, uid);
/* Power on the SDHCI controller and its children */
acpi_device_fix_up_power(device);
@@ -467,13 +550,14 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev)))
return -ENOMEM;
- host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host));
+ priv_size = slot ? slot->priv_size : 0;
+ host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host) + priv_size);
if (IS_ERR(host))
return PTR_ERR(host);
c = sdhci_priv(host);
c->host = host;
- c->slot = sdhci_acpi_get_slot(hid, uid);
+ c->slot = slot;
c->pdev = pdev;
c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM);
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 56529c3d389a..0f589e26ee63 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -13,6 +13,7 @@
* GNU General Public License for more details.
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/iopoll.h>
#include <linux/module.h>
@@ -27,15 +28,14 @@
#define SDHCI_CDNS_HRS04_ACK BIT(26)
#define SDHCI_CDNS_HRS04_RD BIT(25)
#define SDHCI_CDNS_HRS04_WR BIT(24)
-#define SDHCI_CDNS_HRS04_RDATA_SHIFT 16
-#define SDHCI_CDNS_HRS04_WDATA_SHIFT 8
-#define SDHCI_CDNS_HRS04_ADDR_SHIFT 0
+#define SDHCI_CDNS_HRS04_RDATA GENMASK(23, 16)
+#define SDHCI_CDNS_HRS04_WDATA GENMASK(15, 8)
+#define SDHCI_CDNS_HRS04_ADDR GENMASK(5, 0)
#define SDHCI_CDNS_HRS06 0x18 /* eMMC control */
#define SDHCI_CDNS_HRS06_TUNE_UP BIT(15)
-#define SDHCI_CDNS_HRS06_TUNE_SHIFT 8
-#define SDHCI_CDNS_HRS06_TUNE_MASK 0x3f
-#define SDHCI_CDNS_HRS06_MODE_MASK 0x7
+#define SDHCI_CDNS_HRS06_TUNE GENMASK(13, 8)
+#define SDHCI_CDNS_HRS06_MODE GENMASK(2, 0)
#define SDHCI_CDNS_HRS06_MODE_SD 0x0
#define SDHCI_CDNS_HRS06_MODE_MMC_SDR 0x2
#define SDHCI_CDNS_HRS06_MODE_MMC_DDR 0x3
@@ -105,8 +105,8 @@ static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv,
u32 tmp;
int ret;
- tmp = (data << SDHCI_CDNS_HRS04_WDATA_SHIFT) |
- (addr << SDHCI_CDNS_HRS04_ADDR_SHIFT);
+ tmp = FIELD_PREP(SDHCI_CDNS_HRS04_WDATA, data) |
+ FIELD_PREP(SDHCI_CDNS_HRS04_ADDR, addr);
writel(tmp, reg);
tmp |= SDHCI_CDNS_HRS04_WR;
@@ -189,8 +189,8 @@ static void sdhci_cdns_set_emmc_mode(struct sdhci_cdns_priv *priv, u32 mode)
/* The speed mode for eMMC is selected by HRS06 register */
tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
- tmp &= ~SDHCI_CDNS_HRS06_MODE_MASK;
- tmp |= mode;
+ tmp &= ~SDHCI_CDNS_HRS06_MODE;
+ tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_MODE, mode);
writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS06);
}
@@ -199,7 +199,7 @@ static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv)
u32 tmp;
tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
- return tmp & SDHCI_CDNS_HRS06_MODE_MASK;
+ return FIELD_GET(SDHCI_CDNS_HRS06_MODE, tmp);
}
static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
@@ -254,12 +254,12 @@ static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06;
u32 tmp;
- if (WARN_ON(val > SDHCI_CDNS_HRS06_TUNE_MASK))
+ if (WARN_ON(!FIELD_FIT(SDHCI_CDNS_HRS06_TUNE, val)))
return -EINVAL;
tmp = readl(reg);
- tmp &= ~(SDHCI_CDNS_HRS06_TUNE_MASK << SDHCI_CDNS_HRS06_TUNE_SHIFT);
- tmp |= val << SDHCI_CDNS_HRS06_TUNE_SHIFT;
+ tmp &= ~SDHCI_CDNS_HRS06_TUNE;
+ tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_TUNE, val);
tmp |= SDHCI_CDNS_HRS06_TUNE_UP;
writel(tmp, reg);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index fc73e56eb1e2..3fb7d2eec93f 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -123,14 +123,17 @@
#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
#define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
+
+/* Timeout value to avoid infinite waiting for pwr_irq */
+#define MSM_PWR_IRQ_TIMEOUT_MS 5000
+
struct sdhci_msm_host {
struct platform_device *pdev;
void __iomem *core_mem; /* MSM SDCC mapped address */
int pwr_irq; /* power irq */
- struct clk *clk; /* main SD/MMC bus clock */
- struct clk *pclk; /* SDHC peripheral bus clock */
struct clk *bus_clk; /* SDHC bus voter clock */
struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
+ struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */
unsigned long clk_rate;
struct mmc_host *mmc;
bool use_14lpp_dll_reset;
@@ -138,6 +141,10 @@ struct sdhci_msm_host {
bool calibration_done;
u8 saved_tuning_phase;
bool use_cdclp533;
+ u32 curr_pwr_state;
+ u32 curr_io_level;
+ wait_queue_head_t pwr_irq_wait;
+ bool pwr_irq_flag;
};
static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
@@ -164,10 +171,11 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
struct mmc_ios curr_ios = host->mmc->ios;
+ struct clk *core_clk = msm_host->bulk_clks[0].clk;
int rc;
clock = msm_get_clock_rate_for_bus_mode(host, clock);
- rc = clk_set_rate(msm_host->clk, clock);
+ rc = clk_set_rate(core_clk, clock);
if (rc) {
pr_err("%s: Failed to set clock at rate %u at timing %d\n",
mmc_hostname(host->mmc), clock,
@@ -176,7 +184,7 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
}
msm_host->clk_rate = clock;
pr_debug("%s: Setting clock at rate %lu at timing %d\n",
- mmc_hostname(host->mmc), clk_get_rate(msm_host->clk),
+ mmc_hostname(host->mmc), clk_get_rate(core_clk),
curr_ios.timing);
}
@@ -995,21 +1003,142 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
sdhci_msm_hs400(host, &mmc->ios);
}
-static void sdhci_msm_voltage_switch(struct sdhci_host *host)
+static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host)
+{
+ init_waitqueue_head(&msm_host->pwr_irq_wait);
+}
+
+static inline void sdhci_msm_complete_pwr_irq_wait(
+ struct sdhci_msm_host *msm_host)
+{
+ wake_up(&msm_host->pwr_irq_wait);
+}
+
+/*
+ * sdhci_msm_check_power_status API should be called when registers writes
+ * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens.
+ * To what state the register writes will change the IO lines should be passed
+ * as the argument req_type. This API will check whether the IO line's state
+ * is already the expected state and will wait for power irq only if
+ * power irq is expected to be trigerred based on the current IO line state
+ * and expected IO line state.
+ */
+static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ bool done = false;
+
+ pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
+ mmc_hostname(host->mmc), __func__, req_type,
+ msm_host->curr_pwr_state, msm_host->curr_io_level);
+
+ /*
+ * The IRQ for request type IO High/LOW will be generated when -
+ * there is a state change in 1.8V enable bit (bit 3) of
+ * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0
+ * which indicates 3.3V IO voltage. So, when MMC core layer tries
+ * to set it to 3.3V before card detection happens, the
+ * IRQ doesn't get triggered as there is no state change in this bit.
+ * The driver already handles this case by changing the IO voltage
+ * level to high as part of controller power up sequence. Hence, check
+ * for host->pwr to handle a case where IO voltage high request is
+ * issued even before controller power up.
+ */
+ if ((req_type & REQ_IO_HIGH) && !host->pwr) {
+ pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
+ mmc_hostname(host->mmc), req_type);
+ return;
+ }
+ if ((req_type & msm_host->curr_pwr_state) ||
+ (req_type & msm_host->curr_io_level))
+ done = true;
+ /*
+ * This is needed here to handle cases where register writes will
+ * not change the current bus state or io level of the controller.
+ * In this case, no power irq will be triggerred and we should
+ * not wait.
+ */
+ if (!done) {
+ if (!wait_event_timeout(msm_host->pwr_irq_wait,
+ msm_host->pwr_irq_flag,
+ msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
+ dev_warn(&msm_host->pdev->dev,
+ "%s: pwr_irq for req: (%d) timed out\n",
+ mmc_hostname(host->mmc), req_type);
+ }
+ pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
+ __func__, req_type);
+}
+
+static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
+ mmc_hostname(host->mmc),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
+}
+
+static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
u32 irq_status, irq_ack = 0;
+ int retry = 10;
+ int pwr_state = 0, io_level = 0;
+
irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
irq_status &= INT_MASK;
writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR);
- if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
+ /*
+ * There is a rare HW scenario where the first clear pulse could be
+ * lost when actual reset and clear/read of status register is
+ * happening at a time. Hence, retry for at least 10 times to make
+ * sure status register is cleared. Otherwise, this will result in
+ * a spurious power IRQ resulting in system instability.
+ */
+ while (irq_status & readl_relaxed(msm_host->core_mem +
+ CORE_PWRCTL_STATUS)) {
+ if (retry == 0) {
+ pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
+ mmc_hostname(host->mmc), irq_status);
+ sdhci_msm_dump_pwr_ctrl_regs(host);
+ WARN_ON(1);
+ break;
+ }
+ writel_relaxed(irq_status,
+ msm_host->core_mem + CORE_PWRCTL_CLEAR);
+ retry--;
+ udelay(10);
+ }
+
+ /* Handle BUS ON/OFF*/
+ if (irq_status & CORE_PWRCTL_BUS_ON) {
+ pwr_state = REQ_BUS_ON;
+ io_level = REQ_IO_HIGH;
+ irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+ }
+ if (irq_status & CORE_PWRCTL_BUS_OFF) {
+ pwr_state = REQ_BUS_OFF;
+ io_level = REQ_IO_LOW;
irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
- if (irq_status & (CORE_PWRCTL_IO_LOW | CORE_PWRCTL_IO_HIGH))
+ }
+ /* Handle IO LOW/HIGH */
+ if (irq_status & CORE_PWRCTL_IO_LOW) {
+ io_level = REQ_IO_LOW;
+ irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+ }
+ if (irq_status & CORE_PWRCTL_IO_HIGH) {
+ io_level = REQ_IO_HIGH;
irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+ }
/*
* The driver has to acknowledge the interrupt, switch voltages and
@@ -1017,13 +1146,27 @@ static void sdhci_msm_voltage_switch(struct sdhci_host *host)
* switches are handled by the sdhci core, so just report success.
*/
writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL);
+
+ if (pwr_state)
+ msm_host->curr_pwr_state = pwr_state;
+ if (io_level)
+ msm_host->curr_io_level = io_level;
+
+ pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
+ mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
+ irq_ack);
}
static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
{
struct sdhci_host *host = (struct sdhci_host *)data;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_msm_handle_pwr_irq(host, irq);
+ msm_host->pwr_irq_flag = 1;
+ sdhci_msm_complete_pwr_irq_wait(msm_host);
- sdhci_msm_voltage_switch(host);
return IRQ_HANDLED;
}
@@ -1032,8 +1175,9 @@ static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct clk *core_clk = msm_host->bulk_clks[0].clk;
- return clk_round_rate(msm_host->clk, ULONG_MAX);
+ return clk_round_rate(core_clk, ULONG_MAX);
}
static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
@@ -1092,6 +1236,69 @@ out:
__sdhci_msm_set_clock(host, clock);
}
+/*
+ * Platform specific register write functions. This is so that, if any
+ * register write needs to be followed up by platform specific actions,
+ * they can be added here. These functions can go to sleep when writes
+ * to certain registers are done.
+ * These functions are relying on sdhci_set_ios not using spinlock.
+ */
+static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ u32 req_type = 0;
+
+ switch (reg) {
+ case SDHCI_HOST_CONTROL2:
+ req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
+ REQ_IO_HIGH;
+ break;
+ case SDHCI_SOFTWARE_RESET:
+ if (host->pwr && (val & SDHCI_RESET_ALL))
+ req_type = REQ_BUS_OFF;
+ break;
+ case SDHCI_POWER_CONTROL:
+ req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
+ break;
+ }
+
+ if (req_type) {
+ msm_host->pwr_irq_flag = 0;
+ /*
+ * Since this register write may trigger a power irq, ensure
+ * all previous register writes are complete by this point.
+ */
+ mb();
+ }
+ return req_type;
+}
+
+/* This function may sleep*/
+static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ u32 req_type = 0;
+
+ req_type = __sdhci_msm_check_write(host, val, reg);
+ writew_relaxed(val, host->ioaddr + reg);
+
+ if (req_type)
+ sdhci_msm_check_power_status(host, req_type);
+}
+
+/* This function may sleep*/
+static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ u32 req_type = 0;
+
+ req_type = __sdhci_msm_check_write(host, val, reg);
+
+ writeb_relaxed(val, host->ioaddr + reg);
+
+ if (req_type)
+ sdhci_msm_check_power_status(host, req_type);
+}
+
static const struct of_device_id sdhci_msm_dt_match[] = {
{ .compatible = "qcom,sdhci-msm-v4" },
{},
@@ -1106,7 +1313,8 @@ static const struct sdhci_ops sdhci_msm_ops = {
.get_max_clock = sdhci_msm_get_max_clock,
.set_bus_width = sdhci_set_bus_width,
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
- .voltage_switch = sdhci_msm_voltage_switch,
+ .write_w = sdhci_msm_writew,
+ .write_b = sdhci_msm_writeb,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
@@ -1124,6 +1332,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_msm_host *msm_host;
struct resource *core_memres;
+ struct clk *clk;
int ret;
u16 host_version, core_minor;
u32 core_version, config;
@@ -1160,24 +1369,42 @@ static int sdhci_msm_probe(struct platform_device *pdev)
}
/* Setup main peripheral bus clock */
- msm_host->pclk = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(msm_host->pclk)) {
- ret = PTR_ERR(msm_host->pclk);
+ clk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
goto bus_clk_disable;
}
-
- ret = clk_prepare_enable(msm_host->pclk);
- if (ret)
- goto bus_clk_disable;
+ msm_host->bulk_clks[1].clk = clk;
/* Setup SDC MMC clock */
- msm_host->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(msm_host->clk)) {
- ret = PTR_ERR(msm_host->clk);
+ clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
- goto pclk_disable;
+ goto bus_clk_disable;
}
+ msm_host->bulk_clks[0].clk = clk;
+
+ /* Vote for maximum clock rate for maximum performance */
+ ret = clk_set_rate(clk, INT_MAX);
+ if (ret)
+ dev_warn(&pdev->dev, "core clock boost failed\n");
+
+ clk = devm_clk_get(&pdev->dev, "cal");
+ if (IS_ERR(clk))
+ clk = NULL;
+ msm_host->bulk_clks[2].clk = clk;
+
+ clk = devm_clk_get(&pdev->dev, "sleep");
+ if (IS_ERR(clk))
+ clk = NULL;
+ msm_host->bulk_clks[3].clk = clk;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
+ if (ret)
+ goto bus_clk_disable;
/*
* xo clock is needed for FLL feature of cm_dll.
@@ -1189,15 +1416,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
}
- /* Vote for maximum clock rate for maximum performance */
- ret = clk_set_rate(msm_host->clk, INT_MAX);
- if (ret)
- dev_warn(&pdev->dev, "core clock boost failed\n");
-
- ret = clk_prepare_enable(msm_host->clk);
- if (ret)
- goto pclk_disable;
-
core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres);
@@ -1251,6 +1469,21 @@ static int sdhci_msm_probe(struct platform_device *pdev)
CORE_VENDOR_SPEC_CAPABILITIES0);
}
+ /*
+ * Power on reset state may trigger power irq if previous status of
+ * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
+ * interrupt in GIC, any pending power irq interrupt should be
+ * acknowledged. Otherwise power irq interrupt handler would be
+ * fired prematurely.
+ */
+ sdhci_msm_handle_pwr_irq(host, 0);
+
+ /*
+ * Ensure that above writes are propogated before interrupt enablement
+ * in GIC.
+ */
+ mb();
+
/* Setup IRQ for handling power/voltage tasks with PMIC */
msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
if (msm_host->pwr_irq < 0) {
@@ -1260,6 +1493,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto clk_disable;
}
+ sdhci_msm_init_pwr_irq_wait(msm_host);
+ /* Enable pwr irq interrupts */
+ writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK);
+
ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
sdhci_msm_pwr_irq, IRQF_ONESHOT,
dev_name(&pdev->dev), host);
@@ -1290,9 +1527,8 @@ pm_runtime_disable:
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
clk_disable:
- clk_disable_unprepare(msm_host->clk);
-pclk_disable:
- clk_disable_unprepare(msm_host->pclk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
bus_clk_disable:
if (!IS_ERR(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
@@ -1315,8 +1551,8 @@ static int sdhci_msm_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- clk_disable_unprepare(msm_host->clk);
- clk_disable_unprepare(msm_host->pclk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
if (!IS_ERR(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
sdhci_pltfm_free(pdev);
@@ -1330,8 +1566,8 @@ static int sdhci_msm_runtime_suspend(struct device *dev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
- clk_disable_unprepare(msm_host->clk);
- clk_disable_unprepare(msm_host->pclk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
return 0;
}
@@ -1341,21 +1577,9 @@ static int sdhci_msm_runtime_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
- int ret;
- ret = clk_prepare_enable(msm_host->clk);
- if (ret) {
- dev_err(dev, "clk_enable failed for core_clk: %d\n", ret);
- return ret;
- }
- ret = clk_prepare_enable(msm_host->pclk);
- if (ret) {
- dev_err(dev, "clk_enable failed for iface_clk: %d\n", ret);
- clk_disable_unprepare(msm_host->clk);
- return ret;
- }
-
- return 0;
+ return clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
}
#endif
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 4e47ed6bc716..682c573e20a7 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -114,7 +114,8 @@ static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_set_power_noreg(host, mode, vdd);
}
-void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
+static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
{
if (timing == MMC_TIMING_MMC_DDR52)
sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d96a057a7db8..1f424374bbbb 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -458,6 +458,33 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
return clock / 256 / 16;
}
+static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
+{
+ u32 val;
+ ktime_t timeout;
+
+ val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+
+ if (enable)
+ val |= ESDHC_CLOCK_SDCLKEN;
+ else
+ val &= ~ESDHC_CLOCK_SDCLKEN;
+
+ sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
+
+ /* Wait max 20 ms */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ val = ESDHC_CLOCK_STABLE;
+ while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
+ if (ktime_after(ktime_get(), timeout)) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ break;
+ }
+ udelay(10);
+ }
+}
+
static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -469,8 +496,10 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
host->mmc->actual_clock = 0;
- if (clock == 0)
+ if (clock == 0) {
+ esdhc_clock_enable(host, false);
return;
+ }
/* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
if (esdhc->vendor_ver < VENDOR_V_23)
@@ -558,33 +587,6 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
sdhci_writel(host, ctrl, ESDHC_PROCTL);
}
-static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
-{
- u32 val;
- ktime_t timeout;
-
- val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
-
- if (enable)
- val |= ESDHC_CLOCK_SDCLKEN;
- else
- val &= ~ESDHC_CLOCK_SDCLKEN;
-
- sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
-
- /* Wait max 20 ms */
- timeout = ktime_add_ms(ktime_get(), 20);
- val = ESDHC_CLOCK_STABLE;
- while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
- if (ktime_after(ktime_get(), timeout)) {
- pr_err("%s: Internal clock never stabilised.\n",
- mmc_hostname(host->mmc));
- break;
- }
- udelay(10);
- }
-}
-
static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
sdhci_reset(host, mask);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
new file mode 100644
index 000000000000..628bfe9a3d17
--- /dev/null
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -0,0 +1,607 @@
+/**
+ * SDHCI Controller driver for TI's OMAP SoCs
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+#include "sdhci-pltfm.h"
+
+#define SDHCI_OMAP_CON 0x12c
+#define CON_DW8 BIT(5)
+#define CON_DMA_MASTER BIT(20)
+#define CON_INIT BIT(1)
+#define CON_OD BIT(0)
+
+#define SDHCI_OMAP_CMD 0x20c
+
+#define SDHCI_OMAP_HCTL 0x228
+#define HCTL_SDBP BIT(8)
+#define HCTL_SDVS_SHIFT 9
+#define HCTL_SDVS_MASK (0x7 << HCTL_SDVS_SHIFT)
+#define HCTL_SDVS_33 (0x7 << HCTL_SDVS_SHIFT)
+#define HCTL_SDVS_30 (0x6 << HCTL_SDVS_SHIFT)
+#define HCTL_SDVS_18 (0x5 << HCTL_SDVS_SHIFT)
+
+#define SDHCI_OMAP_SYSCTL 0x22c
+#define SYSCTL_CEN BIT(2)
+#define SYSCTL_CLKD_SHIFT 6
+#define SYSCTL_CLKD_MASK 0x3ff
+
+#define SDHCI_OMAP_STAT 0x230
+
+#define SDHCI_OMAP_IE 0x234
+#define INT_CC_EN BIT(0)
+
+#define SDHCI_OMAP_AC12 0x23c
+#define AC12_V1V8_SIGEN BIT(19)
+
+#define SDHCI_OMAP_CAPA 0x240
+#define CAPA_VS33 BIT(24)
+#define CAPA_VS30 BIT(25)
+#define CAPA_VS18 BIT(26)
+
+#define SDHCI_OMAP_TIMEOUT 1 /* 1 msec */
+
+#define SYSCTL_CLKD_MAX 0x3FF
+
+#define IOV_1V8 1800000 /* 180000 uV */
+#define IOV_3V0 3000000 /* 300000 uV */
+#define IOV_3V3 3300000 /* 330000 uV */
+
+struct sdhci_omap_data {
+ u32 offset;
+};
+
+struct sdhci_omap_host {
+ void __iomem *base;
+ struct device *dev;
+ struct regulator *pbias;
+ bool pbias_enabled;
+ struct sdhci_host *host;
+ u8 bus_mode;
+ u8 power_mode;
+};
+
+static inline u32 sdhci_omap_readl(struct sdhci_omap_host *host,
+ unsigned int offset)
+{
+ return readl(host->base + offset);
+}
+
+static inline void sdhci_omap_writel(struct sdhci_omap_host *host,
+ unsigned int offset, u32 data)
+{
+ writel(data, host->base + offset);
+}
+
+static int sdhci_omap_set_pbias(struct sdhci_omap_host *omap_host,
+ bool power_on, unsigned int iov)
+{
+ int ret;
+ struct device *dev = omap_host->dev;
+
+ if (IS_ERR(omap_host->pbias))
+ return 0;
+
+ if (power_on) {
+ ret = regulator_set_voltage(omap_host->pbias, iov, iov);
+ if (ret) {
+ dev_err(dev, "pbias set voltage failed\n");
+ return ret;
+ }
+
+ if (omap_host->pbias_enabled)
+ return 0;
+
+ ret = regulator_enable(omap_host->pbias);
+ if (ret) {
+ dev_err(dev, "pbias reg enable fail\n");
+ return ret;
+ }
+
+ omap_host->pbias_enabled = true;
+ } else {
+ if (!omap_host->pbias_enabled)
+ return 0;
+
+ ret = regulator_disable(omap_host->pbias);
+ if (ret) {
+ dev_err(dev, "pbias reg disable fail\n");
+ return ret;
+ }
+ omap_host->pbias_enabled = false;
+ }
+
+ return 0;
+}
+
+static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
+ unsigned int iov)
+{
+ int ret;
+ struct sdhci_host *host = omap_host->host;
+ struct mmc_host *mmc = host->mmc;
+
+ ret = sdhci_omap_set_pbias(omap_host, false, 0);
+ if (ret)
+ return ret;
+
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = regulator_set_voltage(mmc->supply.vqmmc, iov, iov);
+ if (ret) {
+ dev_err(mmc_dev(mmc), "vqmmc set voltage failed\n");
+ return ret;
+ }
+ }
+
+ ret = sdhci_omap_set_pbias(omap_host, true, iov);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void sdhci_omap_conf_bus_power(struct sdhci_omap_host *omap_host,
+ unsigned char signal_voltage)
+{
+ u32 reg;
+ ktime_t timeout;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
+ reg &= ~HCTL_SDVS_MASK;
+
+ if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+ reg |= HCTL_SDVS_33;
+ else
+ reg |= HCTL_SDVS_18;
+
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg);
+
+ reg |= HCTL_SDBP;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg);
+
+ /* wait 1ms */
+ timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
+ while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP)) {
+ if (WARN_ON(ktime_after(ktime_get(), timeout)))
+ return;
+ usleep_range(5, 10);
+ }
+}
+
+static int sdhci_omap_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ u32 reg;
+ int ret;
+ unsigned int iov;
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_omap_host *omap_host;
+ struct device *dev;
+
+ pltfm_host = sdhci_priv(host);
+ omap_host = sdhci_pltfm_priv(pltfm_host);
+ dev = omap_host->dev;
+
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ if (!(reg & CAPA_VS33))
+ return -EOPNOTSUPP;
+
+ sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
+ reg &= ~AC12_V1V8_SIGEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
+
+ iov = IOV_3V3;
+ } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ if (!(reg & CAPA_VS18))
+ return -EOPNOTSUPP;
+
+ sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
+ reg |= AC12_V1V8_SIGEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
+
+ iov = IOV_1V8;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ ret = sdhci_omap_enable_iov(omap_host, iov);
+ if (ret) {
+ dev_err(dev, "failed to switch IO voltage to %dmV\n", iov);
+ return ret;
+ }
+
+ dev_dbg(dev, "IO voltage switched to %dmV\n", iov);
+ return 0;
+}
+
+static void sdhci_omap_set_bus_mode(struct sdhci_omap_host *omap_host,
+ unsigned int mode)
+{
+ u32 reg;
+
+ if (omap_host->bus_mode == mode)
+ return;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ if (mode == MMC_BUSMODE_OPENDRAIN)
+ reg |= CON_OD;
+ else
+ reg &= ~CON_OD;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+
+ omap_host->bus_mode = mode;
+}
+
+static void sdhci_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_omap_host *omap_host;
+
+ pltfm_host = sdhci_priv(host);
+ omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_omap_set_bus_mode(omap_host, ios->bus_mode);
+ sdhci_set_ios(mmc, ios);
+}
+
+static u16 sdhci_omap_calc_divisor(struct sdhci_pltfm_host *host,
+ unsigned int clock)
+{
+ u16 dsor;
+
+ dsor = DIV_ROUND_UP(clk_get_rate(host->clk), clock);
+ if (dsor > SYSCTL_CLKD_MAX)
+ dsor = SYSCTL_CLKD_MAX;
+
+ return dsor;
+}
+
+static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host)
+{
+ u32 reg;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL);
+ reg |= SYSCTL_CEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg);
+}
+
+static void sdhci_omap_stop_clock(struct sdhci_omap_host *omap_host)
+{
+ u32 reg;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL);
+ reg &= ~SYSCTL_CEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg);
+}
+
+static void sdhci_omap_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long clkdiv;
+
+ sdhci_omap_stop_clock(omap_host);
+
+ if (!clock)
+ return;
+
+ clkdiv = sdhci_omap_calc_divisor(pltfm_host, clock);
+ clkdiv = (clkdiv & SYSCTL_CLKD_MASK) << SYSCTL_CLKD_SHIFT;
+ sdhci_enable_clk(host, clkdiv);
+
+ sdhci_omap_start_clock(omap_host);
+}
+
+static void sdhci_omap_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+}
+
+static int sdhci_omap_enable_dma(struct sdhci_host *host)
+{
+ u32 reg;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ reg |= CON_DMA_MASTER;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+
+ return 0;
+}
+
+static unsigned int sdhci_omap_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_get_rate(pltfm_host->clk) / SYSCTL_CLKD_MAX;
+}
+
+static void sdhci_omap_set_bus_width(struct sdhci_host *host, int width)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ u32 reg;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ if (width == MMC_BUS_WIDTH_8)
+ reg |= CON_DW8;
+ else
+ reg &= ~CON_DW8;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+
+ sdhci_set_bus_width(host, width);
+}
+
+static void sdhci_omap_init_74_clocks(struct sdhci_host *host, u8 power_mode)
+{
+ u32 reg;
+ ktime_t timeout;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ if (omap_host->power_mode == power_mode)
+ return;
+
+ if (power_mode != MMC_POWER_ON)
+ return;
+
+ disable_irq(host->irq);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ reg |= CON_INIT;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CMD, 0x0);
+
+ /* wait 1ms */
+ timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
+ while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN)) {
+ if (WARN_ON(ktime_after(ktime_get(), timeout)))
+ return;
+ usleep_range(5, 10);
+ }
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ reg &= ~CON_INIT;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_STAT, INT_CC_EN);
+
+ enable_irq(host->irq);
+
+ omap_host->power_mode = power_mode;
+}
+
+static struct sdhci_ops sdhci_omap_ops = {
+ .set_clock = sdhci_omap_set_clock,
+ .set_power = sdhci_omap_set_power,
+ .enable_dma = sdhci_omap_enable_dma,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_min_clock = sdhci_omap_get_min_clock,
+ .set_bus_width = sdhci_omap_set_bus_width,
+ .platform_send_init_74_clocks = sdhci_omap_init_74_clocks,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
+{
+ u32 reg;
+ int ret = 0;
+ struct device *dev = omap_host->dev;
+ struct regulator *vqmmc;
+
+ vqmmc = regulator_get(dev, "vqmmc");
+ if (IS_ERR(vqmmc)) {
+ ret = PTR_ERR(vqmmc);
+ goto reg_put;
+ }
+
+ /* voltage capabilities might be set by boot loader, clear it */
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ reg &= ~(CAPA_VS18 | CAPA_VS30 | CAPA_VS33);
+
+ if (regulator_is_supported_voltage(vqmmc, IOV_3V3, IOV_3V3))
+ reg |= CAPA_VS33;
+ if (regulator_is_supported_voltage(vqmmc, IOV_1V8, IOV_1V8))
+ reg |= CAPA_VS18;
+
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, reg);
+
+reg_put:
+ regulator_put(vqmmc);
+
+ return ret;
+}
+
+static const struct sdhci_pltfm_data sdhci_omap_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
+ .quirks2 = SDHCI_QUIRK2_NO_1_8_V |
+ SDHCI_QUIRK2_ACMD23_BROKEN |
+ SDHCI_QUIRK2_RSP_136_HAS_CRC,
+ .ops = &sdhci_omap_ops,
+};
+
+static const struct sdhci_omap_data dra7_data = {
+ .offset = 0x200,
+};
+
+static const struct of_device_id omap_sdhci_match[] = {
+ { .compatible = "ti,dra7-sdhci", .data = &dra7_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_sdhci_match);
+
+static int sdhci_omap_probe(struct platform_device *pdev)
+{
+ int ret;
+ u32 offset;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_omap_host *omap_host;
+ struct mmc_host *mmc;
+ const struct of_device_id *match;
+ struct sdhci_omap_data *data;
+
+ match = of_match_device(omap_sdhci_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct sdhci_omap_data *)match->data;
+ if (!data) {
+ dev_err(dev, "no sdhci omap data\n");
+ return -EINVAL;
+ }
+ offset = data->offset;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata,
+ sizeof(*omap_host));
+ if (IS_ERR(host)) {
+ dev_err(dev, "Failed sdhci_pltfm_init\n");
+ return PTR_ERR(host);
+ }
+
+ pltfm_host = sdhci_priv(host);
+ omap_host = sdhci_pltfm_priv(pltfm_host);
+ omap_host->host = host;
+ omap_host->base = host->ioaddr;
+ omap_host->dev = dev;
+ host->ioaddr += offset;
+
+ mmc = host->mmc;
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto err_pltfm_free;
+
+ pltfm_host->clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(pltfm_host->clk)) {
+ ret = PTR_ERR(pltfm_host->clk);
+ goto err_pltfm_free;
+ }
+
+ ret = clk_set_rate(pltfm_host->clk, mmc->f_max);
+ if (ret) {
+ dev_err(dev, "failed to set clock to %d\n", mmc->f_max);
+ goto err_pltfm_free;
+ }
+
+ omap_host->pbias = devm_regulator_get_optional(dev, "pbias");
+ if (IS_ERR(omap_host->pbias)) {
+ ret = PTR_ERR(omap_host->pbias);
+ if (ret != -ENODEV)
+ goto err_pltfm_free;
+ dev_dbg(dev, "unable to get pbias regulator %d\n", ret);
+ }
+ omap_host->pbias_enabled = false;
+
+ /*
+ * omap_device_pm_domain has callbacks to enable the main
+ * functional clock, interface clock and also configure the
+ * SYSCONFIG register of omap devices. The callback will be invoked
+ * as part of pm_runtime_get_sync.
+ */
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ pm_runtime_put_noidle(dev);
+ goto err_rpm_disable;
+ }
+
+ ret = sdhci_omap_set_capabilities(omap_host);
+ if (ret) {
+ dev_err(dev, "failed to set system capabilities\n");
+ goto err_put_sync;
+ }
+
+ host->mmc_host_ops.get_ro = mmc_gpio_get_ro;
+ host->mmc_host_ops.start_signal_voltage_switch =
+ sdhci_omap_start_signal_voltage_switch;
+ host->mmc_host_ops.set_ios = sdhci_omap_set_ios;
+
+ sdhci_read_caps(host);
+ host->caps |= SDHCI_CAN_DO_ADMA2;
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_put_sync;
+
+ return 0;
+
+err_put_sync:
+ pm_runtime_put_sync(dev);
+
+err_rpm_disable:
+ pm_runtime_disable(dev);
+
+err_pltfm_free:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int sdhci_omap_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+
+ sdhci_remove_host(host, true);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_omap_driver = {
+ .probe = sdhci_omap_probe,
+ .remove = sdhci_omap_remove,
+ .driver = {
+ .name = "sdhci-omap",
+ .of_match_table = omap_sdhci_match,
+ },
+};
+
+module_platform_driver(sdhci_omap_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for OMAP SoCs");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sdhci_omap");
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index d0ccc6729fd2..3e4f04fd5175 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -32,7 +32,6 @@
#include "sdhci.h"
#include "sdhci-pci.h"
-#include "sdhci-pci-o2micro.h"
static int sdhci_pci_enable_dma(struct sdhci_host *host);
static void sdhci_pci_hw_reset(struct sdhci_host *host);
@@ -448,6 +447,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
int err;
u32 val;
+ intel_host->d3_retune = true;
+
err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
if (err) {
pr_debug("%s: DSM not supported, error %d\n",
@@ -796,15 +797,6 @@ static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = {
.probe_slot = intel_mrfld_mmc_probe_slot,
};
-/* O2Micro extra registers */
-#define O2_SD_LOCK_WP 0xD3
-#define O2_SD_MULTI_VCC3V 0xEE
-#define O2_SD_CLKREQ 0xEC
-#define O2_SD_CAPS 0xE0
-#define O2_SD_ADMA1 0xE2
-#define O2_SD_ADMA2 0xE7
-#define O2_SD_INF_MOD 0xF1
-
static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
{
u8 scratch;
@@ -1288,6 +1280,7 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc),
+ SDHCI_PCI_DEVICE(INTEL, CDF_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc),
SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd),
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 14273ca00641..555970a29c94 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -19,7 +19,40 @@
#include "sdhci.h"
#include "sdhci-pci.h"
-#include "sdhci-pci-o2micro.h"
+
+/*
+ * O2Micro device registers
+ */
+
+#define O2_SD_MISC_REG5 0x64
+#define O2_SD_LD0_CTRL 0x68
+#define O2_SD_DEV_CTRL 0x88
+#define O2_SD_LOCK_WP 0xD3
+#define O2_SD_TEST_REG 0xD4
+#define O2_SD_FUNC_REG0 0xDC
+#define O2_SD_MULTI_VCC3V 0xEE
+#define O2_SD_CLKREQ 0xEC
+#define O2_SD_CAPS 0xE0
+#define O2_SD_ADMA1 0xE2
+#define O2_SD_ADMA2 0xE7
+#define O2_SD_INF_MOD 0xF1
+#define O2_SD_MISC_CTRL4 0xFC
+#define O2_SD_TUNING_CTRL 0x300
+#define O2_SD_PLL_SETTING 0x304
+#define O2_SD_CLK_SETTING 0x328
+#define O2_SD_CAP_REG2 0x330
+#define O2_SD_CAP_REG0 0x334
+#define O2_SD_UHS1_CAP_SETTING 0x33C
+#define O2_SD_DELAY_CTRL 0x350
+#define O2_SD_UHS2_L1_CTRL 0x35C
+#define O2_SD_FUNC_REG3 0x3E0
+#define O2_SD_FUNC_REG4 0x3E4
+#define O2_SD_LED_ENABLE BIT(6)
+#define O2_SD_FREG0_LEDOFF BIT(13)
+#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22)
+
+#define O2_SD_VENDOR_SETTING 0x110
+#define O2_SD_VENDOR_SETTING2 0x1C8
static void o2_pci_set_baseclk(struct sdhci_pci_chip *chip, u32 value)
{
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.h b/drivers/mmc/host/sdhci-pci-o2micro.h
deleted file mode 100644
index 770f53857211..000000000000
--- a/drivers/mmc/host/sdhci-pci-o2micro.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2013 BayHub Technology Ltd.
- *
- * Authors: Peter Guo <peter.guo@bayhubtech.com>
- * Adam Lee <adam.lee@canonical.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __SDHCI_PCI_O2MICRO_H
-#define __SDHCI_PCI_O2MICRO_H
-
-#include "sdhci-pci.h"
-
-/*
- * O2Micro device IDs
- */
-
-#define PCI_DEVICE_ID_O2_SDS0 0x8420
-#define PCI_DEVICE_ID_O2_SDS1 0x8421
-#define PCI_DEVICE_ID_O2_FUJIN2 0x8520
-#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620
-#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621
-
-/*
- * O2Micro device registers
- */
-
-#define O2_SD_MISC_REG5 0x64
-#define O2_SD_LD0_CTRL 0x68
-#define O2_SD_DEV_CTRL 0x88
-#define O2_SD_LOCK_WP 0xD3
-#define O2_SD_TEST_REG 0xD4
-#define O2_SD_FUNC_REG0 0xDC
-#define O2_SD_MULTI_VCC3V 0xEE
-#define O2_SD_CLKREQ 0xEC
-#define O2_SD_CAPS 0xE0
-#define O2_SD_ADMA1 0xE2
-#define O2_SD_ADMA2 0xE7
-#define O2_SD_INF_MOD 0xF1
-#define O2_SD_MISC_CTRL4 0xFC
-#define O2_SD_TUNING_CTRL 0x300
-#define O2_SD_PLL_SETTING 0x304
-#define O2_SD_CLK_SETTING 0x328
-#define O2_SD_CAP_REG2 0x330
-#define O2_SD_CAP_REG0 0x334
-#define O2_SD_UHS1_CAP_SETTING 0x33C
-#define O2_SD_DELAY_CTRL 0x350
-#define O2_SD_UHS2_L1_CTRL 0x35C
-#define O2_SD_FUNC_REG3 0x3E0
-#define O2_SD_FUNC_REG4 0x3E4
-#define O2_SD_LED_ENABLE BIT(6)
-#define O2_SD_FREG0_LEDOFF BIT(13)
-#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22)
-
-#define O2_SD_VENDOR_SETTING 0x110
-#define O2_SD_VENDOR_SETTING2 0x1C8
-
-extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
-
-extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
-
-extern int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
-
-#endif /* __SDHCI_PCI_O2MICRO_H */
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 75196a2b5289..0056f08a29cc 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __SDHCI_PCI_H
#define __SDHCI_PCI_H
@@ -5,6 +6,12 @@
* PCI device IDs, sub IDs
*/
+#define PCI_DEVICE_ID_O2_SDS0 0x8420
+#define PCI_DEVICE_ID_O2_SDS1 0x8421
+#define PCI_DEVICE_ID_O2_FUJIN2 0x8520
+#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620
+#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621
+
#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809
#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a
#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14
@@ -25,6 +32,7 @@
#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c
#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d
#define PCI_DEVICE_ID_INTEL_DNV_EMMC 0x19db
+#define PCI_DEVICE_ID_INTEL_CDF_EMMC 0x18db
#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
@@ -163,4 +171,10 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot)
int sdhci_pci_resume_host(struct sdhci_pci_chip *chip);
#endif
+int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
+int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
+#ifdef CONFIG_PM_SLEEP
+int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
+#endif
+
#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index d328fcf284d1..cda83ccb2702 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -761,32 +761,24 @@ static const struct dev_pm_ops sdhci_s3c_pmops = {
NULL)
};
-#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212)
-static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
- .no_divider = true,
-};
-#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data)
-#else
-#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)NULL)
-#endif
-
static const struct platform_device_id sdhci_s3c_driver_ids[] = {
{
.name = "s3c-sdhci",
.driver_data = (kernel_ulong_t)NULL,
- }, {
- .name = "exynos4-sdhci",
- .driver_data = EXYNOS4_SDHCI_DRV_DATA,
},
{ }
};
MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids);
#ifdef CONFIG_OF
+static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
+ .no_divider = true,
+};
+
static const struct of_device_id sdhci_s3c_dt_match[] = {
{ .compatible = "samsung,s3c6410-sdhci", },
{ .compatible = "samsung,exynos4210-sdhci",
- .data = (void *)EXYNOS4_SDHCI_DRV_DATA },
+ .data = &exynos4_sdhci_drv_data },
{},
};
MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match);
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 0cd6fa80db66..b877c13184c2 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -422,7 +422,15 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ /* SDHCI controllers on Tegra186 support 40-bit addressing.
+ * IOVA addresses are 48-bit wide on Tegra186.
+ * With 64-bit dma mask used for SDHCI, accesses can
+ * be broken. Disable 64-bit dma, which would fall back
+ * to 32-bit dma mask. Ideally 40-bit dma mask would work,
+ * But it is not supported as of now.
+ */
+ SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
.ops = &tegra114_sdhci_ops,
};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 0d5fcca18c9e..2f14334e42df 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2407,12 +2407,12 @@ static void sdhci_tasklet_finish(unsigned long param)
;
}
-static void sdhci_timeout_timer(unsigned long data)
+static void sdhci_timeout_timer(struct timer_list *t)
{
struct sdhci_host *host;
unsigned long flags;
- host = (struct sdhci_host*)data;
+ host = from_timer(host, t, timer);
spin_lock_irqsave(&host->lock, flags);
@@ -2429,12 +2429,12 @@ static void sdhci_timeout_timer(unsigned long data)
spin_unlock_irqrestore(&host->lock, flags);
}
-static void sdhci_timeout_data_timer(unsigned long data)
+static void sdhci_timeout_data_timer(struct timer_list *t)
{
struct sdhci_host *host;
unsigned long flags;
- host = (struct sdhci_host *)data;
+ host = from_timer(host, t, data_timer);
spin_lock_irqsave(&host->lock, flags);
@@ -3238,7 +3238,7 @@ int sdhci_setup_host(struct sdhci_host *host)
* available.
*/
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
return ret;
DBG("Version: 0x%08x | Present: 0x%08x\n",
@@ -3749,9 +3749,8 @@ int __sdhci_add_host(struct sdhci_host *host)
tasklet_init(&host->finish_tasklet,
sdhci_tasklet_finish, (unsigned long)host);
- setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
- setup_timer(&host->data_timer, sdhci_timeout_data_timer,
- (unsigned long)host);
+ timer_setup(&host->timer, sdhci_timeout_timer, 0);
+ timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
init_waitqueue_head(&host->buf_ready_int);
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index 111b66f5439b..04ca0d33a521 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/clk.h>
#include "sdhci-pltfm.h"
@@ -47,6 +48,7 @@ struct f_sdhost_priv {
struct clk *clk;
u32 vendor_hs200;
struct device *dev;
+ bool enable_cmd_dat_delay;
};
static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
@@ -84,10 +86,19 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host)
static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ u32 ctl;
+
if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0)
sdhci_writew(host, 0xBC01, SDHCI_CLOCK_CONTROL);
sdhci_reset(host, mask);
+
+ if (priv->enable_cmd_dat_delay) {
+ ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL);
+ ctl |= F_SDH30_CMD_DAT_DELAY;
+ sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL);
+ }
}
static const struct sdhci_ops sdhci_f_sdh30_ops = {
@@ -126,6 +137,9 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
SDHCI_QUIRK2_TUNING_WORK_AROUND;
+ priv->enable_cmd_dat_delay = device_property_read_bool(dev,
+ "fujitsu,cmd-dat-delay-select");
+
ret = mmc_of_parse(host->mmc);
if (ret)
goto err;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 53c970fe0873..cc98355dbdb9 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1175,11 +1175,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
return -EINVAL;
ret = mmc_regulator_get_supply(host->mmc);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Could not get vmmc supply\n");
+ if (ret)
return ret;
- }
host->reg_base = devm_ioremap_resource(&pdev->dev,
platform_get_resource(pdev, IORESOURCE_MEM, 0));
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 93c4b40df90a..a3d8380ab480 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -783,9 +783,9 @@ static void tifm_sd_end_cmd(unsigned long data)
mmc_request_done(mmc, mrq);
}
-static void tifm_sd_abort(unsigned long data)
+static void tifm_sd_abort(struct timer_list *t)
{
- struct tifm_sd *host = (struct tifm_sd*)data;
+ struct tifm_sd *host = from_timer(host, t, timer);
pr_err("%s : card failed to respond for a long period of time "
"(%x, %x)\n",
@@ -968,7 +968,7 @@ static int tifm_sd_probe(struct tifm_dev *sock)
tasklet_init(&host->finish_tasklet, tifm_sd_end_cmd,
(unsigned long)host);
- setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host);
+ timer_setup(&host->timer, tifm_sd_abort, 0);
mmc->ops = &tifm_sd_ops;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index a7293e186e03..583bf3262df5 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -47,6 +47,7 @@
#include <linux/mmc/sdio.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
+#include <linux/swiotlb.h>
#include <linux/workqueue.h>
#include "tmio_mmc.h"
@@ -166,11 +167,11 @@ static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
/* HW engineers overrode docs: no sleep needed on R-Car2+ */
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- msleep(10);
+ usleep_range(10000, 11000);
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
- msleep(10);
+ usleep_range(10000, 11000);
}
}
@@ -178,7 +179,7 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
- msleep(10);
+ usleep_range(10000, 11000);
}
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
@@ -186,7 +187,7 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
/* HW engineers overrode docs: no sleep needed on R-Car2+ */
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- msleep(10);
+ usleep_range(10000, 11000);
}
static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
@@ -218,7 +219,7 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- msleep(10);
+ usleep_range(10000, 11000);
tmio_mmc_clk_start(host);
}
@@ -229,11 +230,11 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
- msleep(10);
+ usleep_range(10000, 11000);
sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
- msleep(10);
+ usleep_range(10000, 11000);
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
@@ -1112,8 +1113,11 @@ static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
{
struct tmio_mmc_data *pdata = host->pdata;
struct mmc_host *mmc = host->mmc;
+ int err;
- mmc_regulator_get_supply(mmc);
+ err = mmc_regulator_get_supply(mmc);
+ if (err)
+ return err;
/* use ocr_mask if no regulator */
if (!mmc->ocr_avail)
@@ -1215,6 +1219,18 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
mmc->max_blk_count = pdata->max_blk_count ? :
(PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ /*
+ * Since swiotlb has memory size limitation, this will calculate
+ * the maximum size locally (because we don't have any APIs for it now)
+ * and check the current max_req_size. And then, this will update
+ * the max_req_size if needed as a workaround.
+ */
+ if (swiotlb_max_segment()) {
+ unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
+
+ if (mmc->max_req_size > max_size)
+ mmc->max_req_size = max_size;
+ }
mmc->max_seg_size = mmc->max_req_size;
_host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
@@ -1286,23 +1302,24 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
pm_runtime_enable(&pdev->dev);
ret = mmc_add_host(mmc);
- if (ret < 0) {
- tmio_mmc_host_remove(_host);
- return ret;
- }
+ if (ret)
+ goto remove_host;
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
- if (ret < 0) {
- tmio_mmc_host_remove(_host);
- return ret;
- }
+ if (ret)
+ goto remove_host;
+
mmc_gpiod_request_cd_irq(mmc);
}
return 0;
+
+remove_host:
+ tmio_mmc_host_remove(_host);
+ return ret;
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_probe);
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 64da6a88cfb9..cdfeb15b6f05 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1757,7 +1757,7 @@ static int usdhi6_probe(struct platform_device *pdev)
return -ENOMEM;
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto e_free_mmc;
ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index a838bf5480d8..32c4211506fc 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -932,12 +932,12 @@ out:
return result;
}
-static void via_sdc_timeout(unsigned long ulongdata)
+static void via_sdc_timeout(struct timer_list *t)
{
struct via_crdr_mmc_host *sdhost;
unsigned long flags;
- sdhost = (struct via_crdr_mmc_host *)ulongdata;
+ sdhost = from_timer(sdhost, t, timer);
spin_lock_irqsave(&sdhost->lock, flags);
@@ -1036,9 +1036,7 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host)
u32 lenreg;
u32 status;
- init_timer(&host->timer);
- host->timer.data = (unsigned long)host;
- host->timer.function = via_sdc_timeout;
+ timer_setup(&host->timer, via_sdc_timeout, 0);
spin_lock_init(&host->lock);
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 8f569d257405..1fe68137a30f 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -741,9 +741,10 @@ static void vub300_deadwork_thread(struct work_struct *work)
kref_put(&vub300->kref, vub300_delete);
}
-static void vub300_inactivity_timer_expired(unsigned long data)
+static void vub300_inactivity_timer_expired(struct timer_list *t)
{ /* softirq */
- struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
+ struct vub300_mmc_host *vub300 = from_timer(vub300, t,
+ inactivity_timer);
if (!vub300->interface) {
kref_put(&vub300->kref, vub300_delete);
} else if (vub300->cmd) {
@@ -1180,9 +1181,10 @@ static void send_command(struct vub300_mmc_host *vub300)
* timer callback runs in atomic mode
* so it cannot call usb_kill_urb()
*/
-static void vub300_sg_timed_out(unsigned long data)
+static void vub300_sg_timed_out(struct timer_list *t)
{
- struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
+ struct vub300_mmc_host *vub300 = from_timer(vub300, t,
+ sg_transfer_timer);
vub300->usb_timed_out = 1;
usb_sg_cancel(&vub300->sg_request);
usb_unlink_urb(vub300->command_out_urb);
@@ -1244,12 +1246,8 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
USB_RECIP_DEVICE, 0x0000, 0x0000,
xfer_buffer, xfer_length, HZ);
kfree(xfer_buffer);
- if (retval < 0) {
- strncpy(vub300->vub_name,
- "SDIO pseudocode download failed",
- sizeof(vub300->vub_name));
- return;
- }
+ if (retval < 0)
+ goto copy_error_message;
} else {
dev_err(&vub300->udev->dev,
"not enough memory for xfer buffer to send"
@@ -1291,12 +1289,8 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
USB_RECIP_DEVICE, 0x0000, 0x0000,
xfer_buffer, xfer_length, HZ);
kfree(xfer_buffer);
- if (retval < 0) {
- strncpy(vub300->vub_name,
- "SDIO pseudocode download failed",
- sizeof(vub300->vub_name));
- return;
- }
+ if (retval < 0)
+ goto copy_error_message;
} else {
dev_err(&vub300->udev->dev,
"not enough memory for xfer buffer to send"
@@ -1349,6 +1343,12 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
sizeof(vub300->vub_name));
return;
}
+
+ return;
+
+copy_error_message:
+ strncpy(vub300->vub_name, "SDIO pseudocode download failed",
+ sizeof(vub300->vub_name));
}
/*
@@ -2323,13 +2323,10 @@ static int vub300_probe(struct usb_interface *interface,
INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread);
INIT_WORK(&vub300->deadwork, vub300_deadwork_thread);
kref_init(&vub300->kref);
- init_timer(&vub300->sg_transfer_timer);
- vub300->sg_transfer_timer.data = (unsigned long)vub300;
- vub300->sg_transfer_timer.function = vub300_sg_timed_out;
+ timer_setup(&vub300->sg_transfer_timer, vub300_sg_timed_out, 0);
kref_get(&vub300->kref);
- init_timer(&vub300->inactivity_timer);
- vub300->inactivity_timer.data = (unsigned long)vub300;
- vub300->inactivity_timer.function = vub300_inactivity_timer_expired;
+ timer_setup(&vub300->inactivity_timer,
+ vub300_inactivity_timer_expired, 0);
vub300->inactivity_timer.expires = jiffies + HZ;
add_timer(&vub300->inactivity_timer);
if (vub300->card_present)
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 546aaf8d1507..f4233576153b 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -956,9 +956,9 @@ static const struct mmc_host_ops wbsd_ops = {
* Helper function to reset detection ignore
*/
-static void wbsd_reset_ignore(unsigned long data)
+static void wbsd_reset_ignore(struct timer_list *t)
{
- struct wbsd_host *host = (struct wbsd_host *)data;
+ struct wbsd_host *host = from_timer(host, t, ignore_timer);
BUG_ON(host == NULL);
@@ -1224,9 +1224,7 @@ static int wbsd_alloc_mmc(struct device *dev)
/*
* Set up timers
*/
- init_timer(&host->ignore_timer);
- host->ignore_timer.data = (unsigned long)host;
- host->ignore_timer.function = wbsd_reset_ignore;
+ timer_setup(&host->ignore_timer, wbsd_reset_ignore, 0);
/*
* Maximum number of segments. Worst case is one sector per segment