summaryrefslogtreecommitdiffstats
path: root/drivers/dma/idxd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/idxd')
-rw-r--r--drivers/dma/idxd/device.c10
-rw-r--r--drivers/dma/idxd/idxd.h3
-rw-r--r--drivers/dma/idxd/init.c2
-rw-r--r--drivers/dma/idxd/irq.c2
-rw-r--r--drivers/dma/idxd/sysfs.c95
5 files changed, 108 insertions, 4 deletions
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index b75d699160bf..200b9109cacf 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -368,6 +368,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
__func__, cmd_code, operand);
+ idxd->cmd_status = 0;
__set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
idxd->cmd_done = &done;
iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
@@ -379,8 +380,11 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
spin_unlock_irqrestore(&idxd->dev_lock, flags);
wait_for_completion(&done);
spin_lock_irqsave(&idxd->dev_lock, flags);
- if (status)
+ if (status) {
*status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
+ idxd->cmd_status = *status & GENMASK(7, 0);
+ }
+
__clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
/* Wake up other pending commands */
wake_up(&idxd->cmd_waitq);
@@ -555,8 +559,8 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
wq->wqcfg.priority = wq->priority;
/* bytes 12-15 */
- wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift;
- wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift;
+ wq->wqcfg.max_xfer_shift = ilog2(wq->max_xfer_bytes);
+ wq->wqcfg.max_batch_shift = ilog2(wq->max_batch_size);
dev_dbg(dev, "WQ %d CFGs\n", wq->id);
for (i = 0; i < 8; i++) {
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index e62b4799d189..c64df197e724 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -114,6 +114,8 @@ struct idxd_wq {
struct sbitmap_queue sbq;
struct dma_chan dma_chan;
char name[WQ_NAME_SIZE + 1];
+ u64 max_xfer_bytes;
+ u32 max_batch_size;
};
struct idxd_engine {
@@ -154,6 +156,7 @@ struct idxd_device {
unsigned long flags;
int id;
int major;
+ u8 cmd_status;
struct pci_dev *pdev;
void __iomem *reg_base;
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index c7c61974f20f..11e5ce168177 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -176,6 +176,8 @@ static int idxd_setup_internals(struct idxd_device *idxd)
wq->idxd = idxd;
mutex_init(&wq->wq_lock);
wq->idxd_cdev.minor = -1;
+ wq->max_xfer_bytes = idxd->max_xfer_bytes;
+ wq->max_batch_size = idxd->max_batch_size;
}
for (i = 0; i < idxd->max_engines; i++) {
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 1e9e6991f543..17a65a13fb64 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -64,6 +64,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
bool err = false;
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
if (cause & IDXD_INTC_ERR) {
spin_lock_bh(&idxd->dev_lock);
@@ -121,7 +122,6 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
val);
- iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
if (!err)
goto out;
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index dcba60953217..07a5db06a29a 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1064,6 +1064,89 @@ static ssize_t wq_cdev_minor_show(struct device *dev,
static struct device_attribute dev_attr_wq_cdev_minor =
__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
+static int __get_sysfs_u64(const char *buf, u64 *val)
+{
+ int rc;
+
+ rc = kstrtou64(buf, 0, val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (*val == 0)
+ return -EINVAL;
+
+ *val = roundup_pow_of_two(*val);
+ return 0;
+}
+
+static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%llu\n", wq->max_xfer_bytes);
+}
+
+static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+ u64 xfer_size;
+ int rc;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ rc = __get_sysfs_u64(buf, &xfer_size);
+ if (rc < 0)
+ return rc;
+
+ if (xfer_size > idxd->max_xfer_bytes)
+ return -EINVAL;
+
+ wq->max_xfer_bytes = xfer_size;
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_max_transfer_size =
+ __ATTR(max_transfer_size, 0644,
+ wq_max_transfer_size_show, wq_max_transfer_size_store);
+
+static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%u\n", wq->max_batch_size);
+}
+
+static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+ u64 batch_size;
+ int rc;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ rc = __get_sysfs_u64(buf, &batch_size);
+ if (rc < 0)
+ return rc;
+
+ if (batch_size > idxd->max_batch_size)
+ return -EINVAL;
+
+ wq->max_batch_size = (u32)batch_size;
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_max_batch_size =
+ __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
+
static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_clients.attr,
&dev_attr_wq_state.attr,
@@ -1074,6 +1157,8 @@ static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_type.attr,
&dev_attr_wq_name.attr,
&dev_attr_wq_cdev_minor.attr,
+ &dev_attr_wq_max_transfer_size.attr,
+ &dev_attr_wq_max_batch_size.attr,
NULL,
};
@@ -1317,6 +1402,15 @@ static ssize_t cdev_major_show(struct device *dev,
}
static DEVICE_ATTR_RO(cdev_major);
+static ssize_t cmd_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%#x\n", idxd->cmd_status);
+}
+static DEVICE_ATTR_RO(cmd_status);
+
static struct attribute *idxd_device_attributes[] = {
&dev_attr_version.attr,
&dev_attr_max_groups.attr,
@@ -1335,6 +1429,7 @@ static struct attribute *idxd_device_attributes[] = {
&dev_attr_max_tokens.attr,
&dev_attr_token_limit.attr,
&dev_attr_cdev_major.attr,
+ &dev_attr_cmd_status.attr,
NULL,
};