summaryrefslogtreecommitdiffstats
path: root/drivers/dma/idxd
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2022-11-29 16:42:27 -0400
committerJason Gunthorpe <jgg@nvidia.com>2022-12-02 12:04:39 -0400
commit90337f526c98129b0b180fc52dc5f57d8e7a8614 (patch)
treed7458a03027cde56b8191f656dc5cdf1531a767d /drivers/dma/idxd
parent169dd5c987e60e62aa5785b30d22ded2ae000286 (diff)
parentb7b275e60bcd5f89771e865a8239325f86d9927d (diff)
downloadlinux-90337f526c98129b0b180fc52dc5f57d8e7a8614.tar.gz
linux-90337f526c98129b0b180fc52dc5f57d8e7a8614.tar.bz2
linux-90337f526c98129b0b180fc52dc5f57d8e7a8614.zip
Merge tag 'v6.1-rc7' into iommufd.git for-next
Resolve conflicts in drivers/vfio/vfio_main.c by using the iommfd version. The rc fix was done a different way when iommufd patches reworked this code. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/dma/idxd')
-rw-r--r--drivers/dma/idxd/cdev.c18
-rw-r--r--drivers/dma/idxd/device.c26
-rw-r--r--drivers/dma/idxd/idxd.h32
-rw-r--r--drivers/dma/idxd/init.c4
-rw-r--r--drivers/dma/idxd/sysfs.c2
5 files changed, 70 insertions, 12 deletions
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 66720001ba1c..e13e92609943 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -311,6 +311,24 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
if (idxd->state != IDXD_DEV_ENABLED)
return -ENXIO;
+ /*
+ * User type WQ is enabled only when SVA is enabled for two reasons:
+ * - If no IOMMU or IOMMU Passthrough without SVA, userspace
+ * can directly access physical address through the WQ.
+ * - The IDXD cdev driver does not provide any ways to pin
+ * user pages and translate the address from user VA to IOVA or
+ * PA without IOMMU SVA. Therefore the application has no way
+ * to instruct the device to perform DMA function. This makes
+ * the cdev not usable for normal application usage.
+ */
+ if (!device_user_pasid_enabled(idxd)) {
+ idxd->cmd_status = IDXD_SCMD_WQ_USER_NO_IOMMU;
+ dev_dbg(&idxd->pdev->dev,
+ "User type WQ cannot be enabled without SVA.\n");
+
+ return -EOPNOTSUPP;
+ }
+
mutex_lock(&wq->wq_lock);
wq->type = IDXD_WQT_USER;
rc = drv_enable_wq(wq);
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 2c1e6f6daa62..6f44fa8f78a5 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -390,7 +390,7 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
- wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
+ idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
if (wq->opcap_bmap)
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
}
@@ -730,13 +730,21 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
void idxd_device_clear_state(struct idxd_device *idxd)
{
- if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
- return;
+ /* IDXD is always disabled. Other states are cleared only when IDXD is configurable. */
+ if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
+ /*
+ * Clearing wq state is protected by wq lock.
+ * So no need to be protected by device lock.
+ */
+ idxd_device_wqs_clear_state(idxd);
+
+ spin_lock(&idxd->dev_lock);
+ idxd_groups_clear_state(idxd);
+ idxd_engines_clear_state(idxd);
+ } else {
+ spin_lock(&idxd->dev_lock);
+ }
- idxd_device_wqs_clear_state(idxd);
- spin_lock(&idxd->dev_lock);
- idxd_groups_clear_state(idxd);
- idxd_engines_clear_state(idxd);
idxd->state = IDXD_DEV_DISABLED;
spin_unlock(&idxd->dev_lock);
}
@@ -869,7 +877,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
/* bytes 12-15 */
wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
- wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
+ idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size));
/* bytes 32-63 */
if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) {
@@ -1051,7 +1059,7 @@ static int idxd_wq_load_config(struct idxd_wq *wq)
wq->priority = wq->wqcfg->priority;
wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift;
- wq->max_batch_size = 1ULL << wq->wqcfg->max_batch_shift;
+ idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift);
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 1196ab342f01..7ced8d283d98 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -548,6 +548,38 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
return wq->client_count;
};
+/*
+ * Intel IAA does not support batch processing.
+ * The max batch size of device, max batch size of wq and
+ * max batch shift of wqcfg should be always 0 on IAA.
+ */
+static inline void idxd_set_max_batch_size(int idxd_type, struct idxd_device *idxd,
+ u32 max_batch_size)
+{
+ if (idxd_type == IDXD_TYPE_IAX)
+ idxd->max_batch_size = 0;
+ else
+ idxd->max_batch_size = max_batch_size;
+}
+
+static inline void idxd_wq_set_max_batch_size(int idxd_type, struct idxd_wq *wq,
+ u32 max_batch_size)
+{
+ if (idxd_type == IDXD_TYPE_IAX)
+ wq->max_batch_size = 0;
+ else
+ wq->max_batch_size = max_batch_size;
+}
+
+static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wqcfg,
+ u32 max_batch_shift)
+{
+ if (idxd_type == IDXD_TYPE_IAX)
+ wqcfg->max_batch_shift = 0;
+ else
+ wqcfg->max_batch_shift = max_batch_shift;
+}
+
int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
struct module *module, const char *mod_name);
#define idxd_driver_register(driver) \
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 2c0fcfdc75c7..529ea09c9094 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -182,7 +182,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
init_completion(&wq->wq_dead);
init_completion(&wq->wq_resurrect);
wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
- wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
+ idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
if (!wq->wqcfg) {
@@ -417,7 +417,7 @@ static void idxd_read_caps(struct idxd_device *idxd)
idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
- idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
+ idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift);
dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
if (idxd->hw.gen_cap.config_en)
set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index bdaccf9e0436..7269bd54554f 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1046,7 +1046,7 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu
if (batch_size > idxd->max_batch_size)
return -EINVAL;
- wq->max_batch_size = (u32)batch_size;
+ idxd_wq_set_max_batch_size(idxd->data->type, wq, (u32)batch_size);
return count;
}