summaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-09 09:46:45 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-09 09:46:45 -0700
commit1763e735b0a093a6747078b3bd101f079e576ab6 (patch)
tree75203a3229977d12dc5a5990d1122e7a3d5f30fc /drivers/dma/ioat
parentb29bdba51924f6fd5971352ba111784dee3a5853 (diff)
parent3065c194670b61e213656ce25976d7c8a95e3c93 (diff)
downloadlinux-1763e735b0a093a6747078b3bd101f079e576ab6.tar.gz
linux-1763e735b0a093a6747078b3bd101f079e576ab6.tar.bz2
linux-1763e735b0a093a6747078b3bd101f079e576ab6.zip
Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul: "This time we have dmatest improvements from Andy along with dw_dmac fixes. He has also done support for acpi for dmanegine. Also we have bunch of fixes going in DT support for dmanegine for various folks. Then Haswell and other ioat changes from Dave and SUDMAC support from Shimoda." * 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (53 commits) dma: tegra: implement suspend/resume callbacks dma:of: Use a mutex to protect the of_dma_list dma: of: Fix of_node reference leak dmaengine: sirf: move driver init from module_init to subsys_initcall sudmac: add support for SUDMAC dma: sh: add Kconfig at_hdmac: move to generic DMA binding ioatdma: ioat3_alloc_sed can be static ioatdma: Adding write back descriptor error status support for ioatdma 3.3 ioatdma: S1200 platforms ioatdma channel 2 and 3 falsely advertise RAID cap ioatdma: Adding support for 16 src PQ ops and super extended descriptors ioatdma: Removing hw bug workaround for CB3.x .2 and earlier dw_dmac: add ACPI support dmaengine: call acpi_dma_request_slave_channel as well dma: acpi-dma: introduce ACPI DMA helpers dma: of: Remove unnecessary list_empty check DMA: OF: Check properties value before running be32_to_cpup() on it DMA: of: Constant names ioatdma: skip silicon bug workaround for pq_align for cb3.3 ioatdma: Removing PQ val disable for cb3.3 ...
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r--drivers/dma/ioat/dma.c8
-rw-r--r--drivers/dma/ioat/dma.h53
-rw-r--r--drivers/dma/ioat/dma_v2.h2
-rw-r--r--drivers/dma/ioat/dma_v3.c912
-rw-r--r--drivers/dma/ioat/hw.h88
-rw-r--r--drivers/dma/ioat/pci.c20
-rw-r--r--drivers/dma/ioat/registers.h4
7 files changed, 950 insertions, 137 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1879a5942bfc..17a2393b3e25 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -892,7 +892,7 @@ MODULE_PARM_DESC(ioat_interrupt_style,
* ioat_dma_setup_interrupts - setup interrupt handler
* @device: ioat device
*/
-static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
+int ioat_dma_setup_interrupts(struct ioatdma_device *device)
{
struct ioat_chan_common *chan;
struct pci_dev *pdev = device->pdev;
@@ -941,6 +941,7 @@ msix:
}
}
intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
+ device->irq_mode = IOAT_MSIX;
goto done;
msix_single_vector:
@@ -956,6 +957,7 @@ msix_single_vector:
pci_disable_msix(pdev);
goto msi;
}
+ device->irq_mode = IOAT_MSIX_SINGLE;
goto done;
msi:
@@ -969,6 +971,7 @@ msi:
pci_disable_msi(pdev);
goto intx;
}
+ device->irq_mode = IOAT_MSIX;
goto done;
intx:
@@ -977,6 +980,7 @@ intx:
if (err)
goto err_no_irq;
+ device->irq_mode = IOAT_INTX;
done:
if (device->intr_quirk)
device->intr_quirk(device);
@@ -987,9 +991,11 @@ done:
err_no_irq:
/* Disable all interrupt generation */
writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
+ device->irq_mode = IOAT_NOIRQ;
dev_err(dev, "no usable interrupts\n");
return err;
}
+EXPORT_SYMBOL(ioat_dma_setup_interrupts);
static void ioat_disable_interrupts(struct ioatdma_device *device)
{
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 53a4cbb78f47..54fb7b9ff9aa 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -39,6 +39,7 @@
#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd)
#define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev)
+#define to_pdev(ioat_chan) ((ioat_chan)->device->pdev)
#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
@@ -48,6 +49,14 @@
*/
#define NULL_DESC_BUFFER_SIZE 1
+enum ioat_irq_mode {
+ IOAT_NOIRQ = 0,
+ IOAT_MSIX,
+ IOAT_MSIX_SINGLE,
+ IOAT_MSI,
+ IOAT_INTX
+};
+
/**
* struct ioatdma_device - internal representation of a IOAT device
* @pdev: PCI-Express device
@@ -72,11 +81,16 @@ struct ioatdma_device {
void __iomem *reg_base;
struct pci_pool *dma_pool;
struct pci_pool *completion_pool;
+#define MAX_SED_POOLS 5
+ struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
+ struct kmem_cache *sed_pool;
struct dma_device common;
u8 version;
struct msix_entry msix_entries[4];
struct ioat_chan_common *idx[4];
struct dca_provider *dca;
+ enum ioat_irq_mode irq_mode;
+ u32 cap;
void (*intr_quirk)(struct ioatdma_device *device);
int (*enumerate_channels)(struct ioatdma_device *device);
int (*reset_hw)(struct ioat_chan_common *chan);
@@ -131,6 +145,20 @@ struct ioat_dma_chan {
u16 active;
};
+/**
+ * struct ioat_sed_ent - wrapper around super extended hardware descriptor
+ * @hw: hardware SED
+ * @sed_dma: dma address for the SED
+ * @list: list member
+ * @parent: point to the dma descriptor that's the parent
+ */
+struct ioat_sed_ent {
+ struct ioat_sed_raw_descriptor *hw;
+ dma_addr_t dma;
+ struct ioat_ring_ent *parent;
+ unsigned int hw_pool;
+};
+
static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c)
{
return container_of(c, struct ioat_chan_common, common);
@@ -179,7 +207,7 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
struct device *dev = to_dev(chan);
dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
- " ctl: %#x (op: %d int_en: %d compl: %d)\n", id,
+ " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
(unsigned long long) tx->phys,
(unsigned long long) hw->next, tx->cookie, tx->flags,
hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
@@ -201,7 +229,7 @@ ioat_chan_by_index(struct ioatdma_device *device, int index)
return device->idx[index];
}
-static inline u64 ioat_chansts(struct ioat_chan_common *chan)
+static inline u64 ioat_chansts_32(struct ioat_chan_common *chan)
{
u8 ver = chan->device->version;
u64 status;
@@ -218,6 +246,26 @@ static inline u64 ioat_chansts(struct ioat_chan_common *chan)
return status;
}
+#if BITS_PER_LONG == 64
+
+static inline u64 ioat_chansts(struct ioat_chan_common *chan)
+{
+ u8 ver = chan->device->version;
+ u64 status;
+
+ /* With IOAT v3.3 the status register is 64bit. */
+ if (ver >= IOAT_VER_3_3)
+ status = readq(chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
+ else
+ status = ioat_chansts_32(chan);
+
+ return status;
+}
+
+#else
+#define ioat_chansts ioat_chansts_32
+#endif
+
static inline void ioat_start(struct ioat_chan_common *chan)
{
u8 ver = chan->device->version;
@@ -321,6 +369,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
dma_addr_t *phys_complete);
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
void ioat_kobject_del(struct ioatdma_device *device);
+int ioat_dma_setup_interrupts(struct ioatdma_device *device);
extern const struct sysfs_ops ioat_sysfs_ops;
extern struct ioat_sysfs_entry ioat_version_attr;
extern struct ioat_sysfs_entry ioat_cap_attr;
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index e100f644e344..29bf9448035d 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -137,6 +137,7 @@ struct ioat_ring_ent {
#ifdef DEBUG
int id;
#endif
+ struct ioat_sed_ent *sed;
};
static inline struct ioat_ring_ent *
@@ -157,6 +158,7 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
int ioat2_dma_probe(struct ioatdma_device *dev, int dca);
int ioat3_dma_probe(struct ioatdma_device *dev, int dca);
+void ioat3_dma_remove(struct ioatdma_device *dev);
struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index e8336cce360b..ca6ea9b3551b 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -55,7 +55,7 @@
/*
* Support routines for v3+ hardware
*/
-
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/dmaengine.h>
@@ -70,6 +70,10 @@
/* ioat hardware assumes at least two sources for raid operations */
#define src_cnt_to_sw(x) ((x) + 2)
#define src_cnt_to_hw(x) ((x) - 2)
+#define ndest_to_sw(x) ((x) + 1)
+#define ndest_to_hw(x) ((x) - 1)
+#define src16_cnt_to_sw(x) ((x) + 9)
+#define src16_cnt_to_hw(x) ((x) - 9)
/* provide a lookup table for setting the source address in the base or
* extended descriptor of an xor or pq descriptor
@@ -77,7 +81,20 @@
static const u8 xor_idx_to_desc = 0xe0;
static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
static const u8 pq_idx_to_desc = 0xf8;
+static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2 };
static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
+static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6 };
+
+/*
+ * technically sources 1 and 2 do not require SED, but the op will have
+ * at least 9 descriptors so that's irrelevant.
+ */
+static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1 };
+
+static void ioat3_eh(struct ioat2_dma_chan *ioat);
static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
{
@@ -101,6 +118,13 @@ static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
return raw->field[pq_idx_to_field[idx]];
}
+static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
+{
+ struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
+
+ return raw->field[pq16_idx_to_field[idx]];
+}
+
static void pq_set_src(struct ioat_raw_descriptor *descs[2],
dma_addr_t addr, u32 offset, u8 coef, int idx)
{
@@ -111,6 +135,167 @@ static void pq_set_src(struct ioat_raw_descriptor *descs[2],
pq->coef[idx] = coef;
}
+static int sed_get_pq16_pool_idx(int src_cnt)
+{
+
+ return pq16_idx_to_sed[src_cnt];
+}
+
+static bool is_jf_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_snb_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_ivb_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
+ case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
+ return true;
+ default:
+ return false;
+ }
+
+}
+
+static bool is_hsw_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
+ case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
+ return true;
+ default:
+ return false;
+ }
+
+}
+
+static bool is_xeon_cb32(struct pci_dev *pdev)
+{
+ return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
+ is_hsw_ioat(pdev);
+}
+
+static bool is_bwd_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_bwd_noraid(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
+ case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+ return true;
+ default:
+ return false;
+ }
+
+}
+
+static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
+ dma_addr_t addr, u32 offset, u8 coef, int idx)
+{
+ struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
+ struct ioat_pq16a_descriptor *pq16 =
+ (struct ioat_pq16a_descriptor *)desc[1];
+ struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
+
+ raw->field[pq16_idx_to_field[idx]] = addr + offset;
+
+ if (idx < 8)
+ pq->coef[idx] = coef;
+ else
+ pq16->coef[idx - 8] = coef;
+}
+
+static struct ioat_sed_ent *
+ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
+{
+ struct ioat_sed_ent *sed;
+ gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
+
+ sed = kmem_cache_alloc(device->sed_pool, flags);
+ if (!sed)
+ return NULL;
+
+ sed->hw_pool = hw_pool;
+ sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
+ flags, &sed->dma);
+ if (!sed->hw) {
+ kmem_cache_free(device->sed_pool, sed);
+ return NULL;
+ }
+
+ return sed;
+}
+
+static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed)
+{
+ if (!sed)
+ return;
+
+ dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
+ kmem_cache_free(device->sed_pool, sed);
+}
+
static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
struct ioat_ring_ent *desc, int idx)
{
@@ -223,6 +408,54 @@ static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
}
break;
}
+ case IOAT_OP_PQ_16S:
+ case IOAT_OP_PQ_VAL_16S: {
+ struct ioat_pq_descriptor *pq = desc->pq;
+ int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
+ struct ioat_raw_descriptor *descs[4];
+ int i;
+
+ /* in the 'continue' case don't unmap the dests as sources */
+ if (dmaf_p_disabled_continue(flags))
+ src_cnt--;
+ else if (dmaf_continue(flags))
+ src_cnt -= 3;
+
+ if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ descs[0] = (struct ioat_raw_descriptor *)pq;
+ descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw);
+ descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]);
+ for (i = 0; i < src_cnt; i++) {
+ dma_addr_t src = pq16_get_src(descs, i);
+
+ ioat_unmap(pdev, src - offset, len,
+ PCI_DMA_TODEVICE, flags, 0);
+ }
+
+ /* the dests are sources in pq validate operations */
+ if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
+ if (!(flags & DMA_PREP_PQ_DISABLE_P))
+ ioat_unmap(pdev, pq->p_addr - offset,
+ len, PCI_DMA_TODEVICE,
+ flags, 0);
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q))
+ ioat_unmap(pdev, pq->q_addr - offset,
+ len, PCI_DMA_TODEVICE,
+ flags, 0);
+ break;
+ }
+ }
+
+ if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (!(flags & DMA_PREP_PQ_DISABLE_P))
+ ioat_unmap(pdev, pq->p_addr - offset, len,
+ PCI_DMA_BIDIRECTIONAL, flags, 1);
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q))
+ ioat_unmap(pdev, pq->q_addr - offset, len,
+ PCI_DMA_BIDIRECTIONAL, flags, 1);
+ }
+ break;
+ }
default:
dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
__func__, desc->hw->ctl_f.op);
@@ -250,6 +483,63 @@ static bool desc_has_ext(struct ioat_ring_ent *desc)
return false;
}
+static u64 ioat3_get_current_completion(struct ioat_chan_common *chan)
+{
+ u64 phys_complete;
+ u64 completion;
+
+ completion = *chan->completion;
+ phys_complete = ioat_chansts_to_addr(completion);
+
+ dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
+ (unsigned long long) phys_complete);
+
+ return phys_complete;
+}
+
+static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan,
+ u64 *phys_complete)
+{
+ *phys_complete = ioat3_get_current_completion(chan);
+ if (*phys_complete == chan->last_completion)
+ return false;
+
+ clear_bit(IOAT_COMPLETION_ACK, &chan->state);
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+ return true;
+}
+
+static void
+desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc)
+{
+ struct ioat_dma_descriptor *hw = desc->hw;
+
+ switch (hw->ctl_f.op) {
+ case IOAT_OP_PQ_VAL:
+ case IOAT_OP_PQ_VAL_16S:
+ {
+ struct ioat_pq_descriptor *pq = desc->pq;
+
+ /* check if there's error written */
+ if (!pq->dwbes_f.wbes)
+ return;
+
+ /* need to set a chanerr var for checking to clear later */
+
+ if (pq->dwbes_f.p_val_err)
+ *desc->result |= SUM_CHECK_P_RESULT;
+
+ if (pq->dwbes_f.q_val_err)
+ *desc->result |= SUM_CHECK_Q_RESULT;
+
+ return;
+ }
+ default:
+ return;
+ }
+}
+
/**
* __cleanup - reclaim used descriptors
* @ioat: channel (ring) to clean
@@ -260,6 +550,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc)
static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
{
struct ioat_chan_common *chan = &ioat->base;
+ struct ioatdma_device *device = chan->device;
struct ioat_ring_ent *desc;
bool seen_current = false;
int idx = ioat->tail, i;
@@ -268,6 +559,16 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued);
+ /*
+ * At restart of the channel, the completion address and the
+ * channel status will be 0 due to starting a new chain. Since
+ * it's new chain and the first descriptor "fails", there is
+ * nothing to clean up. We do not want to reap the entire submitted
+ * chain due to this 0 address value and then BUG.
+ */
+ if (!phys_complete)
+ return;
+
active = ioat2_ring_active(ioat);
for (i = 0; i < active && !seen_current; i++) {
struct dma_async_tx_descriptor *tx;
@@ -276,6 +577,11 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
desc = ioat2_get_ring_ent(ioat, idx + i);
dump_desc_dbg(ioat, desc);
+
+ /* set err stat if we are using dwbes */
+ if (device->cap & IOAT_CAP_DWBES)
+ desc_get_errstat(ioat, desc);
+
tx = &desc->txd;
if (tx->cookie) {
dma_cookie_complete(tx);
@@ -294,6 +600,12 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
BUG_ON(i + 1 >= active);
i++;
}
+
+ /* cleanup super extended descriptors */
+ if (desc->sed) {
+ ioat3_free_sed(device, desc->sed);
+ desc->sed = NULL;
+ }
}
smp_mb(); /* finish all descriptor reads before incrementing tail */
ioat->tail = idx + i;
@@ -314,11 +626,22 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- dma_addr_t phys_complete;
+ u64 phys_complete;
spin_lock_bh(&chan->cleanup_lock);
- if (ioat_cleanup_preamble(chan, &phys_complete))
+
+ if (ioat3_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
+
+ if (is_ioat_halted(*chan->completion)) {
+ u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+
+ if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
+ mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
+ ioat3_eh(ioat);
+ }
+ }
+
spin_unlock_bh(&chan->cleanup_lock);
}
@@ -333,15 +656,78 @@ static void ioat3_cleanup_event(unsigned long data)
static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- dma_addr_t phys_complete;
+ u64 phys_complete;
ioat2_quiesce(chan, 0);
- if (ioat_cleanup_preamble(chan, &phys_complete))
+ if (ioat3_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
__ioat2_restart_chan(ioat);
}
+static void ioat3_eh(struct ioat2_dma_chan *ioat)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ struct pci_dev *pdev = to_pdev(chan);
+ struct ioat_dma_descriptor *hw;
+ u64 phys_complete;
+ struct ioat_ring_ent *desc;
+ u32 err_handled = 0;
+ u32 chanerr_int;
+ u32 chanerr;
+
+ /* cleanup so tail points to descriptor that caused the error */
+ if (ioat3_cleanup_preamble(chan, &phys_complete))
+ __cleanup(ioat, phys_complete);
+
+ chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+ pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
+
+ dev_dbg(to_dev(chan), "%s: error = %x:%x\n",
+ __func__, chanerr, chanerr_int);
+
+ desc = ioat2_get_ring_ent(ioat, ioat->tail);
+ hw = desc->hw;
+ dump_desc_dbg(ioat, desc);
+
+ switch (hw->ctl_f.op) {
+ case IOAT_OP_XOR_VAL:
+ if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
+ *desc->result |= SUM_CHECK_P_RESULT;
+ err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
+ }
+ break;
+ case IOAT_OP_PQ_VAL:
+ case IOAT_OP_PQ_VAL_16S:
+ if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
+ *desc->result |= SUM_CHECK_P_RESULT;
+ err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
+ }
+ if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
+ *desc->result |= SUM_CHECK_Q_RESULT;
+ err_handled |= IOAT_CHANERR_XOR_Q_ERR;
+ }
+ break;
+ }
+
+ /* fault on unhandled error or spurious halt */
+ if (chanerr ^ err_handled || chanerr == 0) {
+ dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
+ __func__, chanerr, err_handled);
+ BUG();
+ }
+
+ writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
+ pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
+
+ /* mark faulting descriptor as complete */
+ *chan->completion = desc->txd.phys;
+
+ spin_lock_bh(&ioat->prep_lock);
+ ioat3_restart_channel(ioat);
+ spin_unlock_bh(&ioat->prep_lock);
+}
+
static void check_active(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
@@ -605,7 +991,8 @@ dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct
int i;
dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
- " sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n",
+ " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
+ " src_cnt: %d)\n",
desc_id(desc), (unsigned long long) desc->txd.phys,
(unsigned long long) (pq_ex ? pq_ex->next : pq->next),
desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
@@ -617,6 +1004,42 @@ dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct
(unsigned long long) pq_get_src(descs, i), pq->coef[i]);
dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
+ dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
+}
+
+static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat,
+ struct ioat_ring_ent *desc)
+{
+ struct device *dev = to_dev(&ioat->base);
+ struct ioat_pq_descriptor *pq = desc->pq;
+ struct ioat_raw_descriptor *descs[] = { (void *)pq,
+ (void *)pq,
+ (void *)pq };
+ int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
+ int i;
+
+ if (desc->sed) {
+ descs[1] = (void *)desc->sed->hw;
+ descs[2] = (void *)desc->sed->hw + 64;
+ }
+
+ dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
+ " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
+ " src_cnt: %d)\n",
+ desc_id(desc), (unsigned long long) desc->txd.phys,
+ (unsigned long long) pq->next,
+ desc->txd.flags, pq->size, pq->ctl,
+ pq->ctl_f.op, pq->ctl_f.int_en,
+ pq->ctl_f.compl_write,
+ pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
+ pq->ctl_f.src_cnt);
+ for (i = 0; i < src_cnt; i++) {
+ dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
+ (unsigned long long) pq16_get_src(descs, i),
+ pq->coef[i]);
+ }
+ dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
+ dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
}
static struct dma_async_tx_descriptor *
@@ -627,6 +1050,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_chan_common *chan = &ioat->base;
+ struct ioatdma_device *device = chan->device;
struct ioat_ring_ent *compl_desc;
struct ioat_ring_ent *desc;
struct ioat_ring_ent *ext;
@@ -637,6 +1061,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
u32 offset = 0;
u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
int i, s, idx, with_ext, num_descs;
+ int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0;
dev_dbg(to_dev(chan), "%s\n", __func__);
/* the engine requires at least two sources (we provide
@@ -662,7 +1087,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
* order.
*/
if (likely(num_descs) &&
- ioat2_check_space_lock(ioat, num_descs+1) == 0)
+ ioat2_check_space_lock(ioat, num_descs + cb32) == 0)
idx = ioat->head;
else
return NULL;
@@ -700,6 +1125,9 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
pq->q_addr = dst[1] + offset;
pq->ctl = 0;
pq->ctl_f.op = op;
+ /* we turn on descriptor write back error status */
+ if (device->cap & IOAT_CAP_DWBES)
+ pq->ctl_f.wb_en = result ? 1 : 0;
pq->ctl_f.src_cnt = src_cnt_to_hw(s);
pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
@@ -716,26 +1144,140 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
dump_pq_desc_dbg(ioat, desc, ext);
- /* completion descriptor carries interrupt bit */
- compl_desc = ioat2_get_ring_ent(ioat, idx + i);
- compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
- hw = compl_desc->hw;
- hw->ctl = 0;
- hw->ctl_f.null = 1;
- hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
- hw->ctl_f.compl_write = 1;
- hw->size = NULL_DESC_BUFFER_SIZE;
- dump_desc_dbg(ioat, compl_desc);
+ if (!cb32) {
+ pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ pq->ctl_f.compl_write = 1;
+ compl_desc = desc;
+ } else {
+ /* completion descriptor carries interrupt bit */
+ compl_desc = ioat2_get_ring_ent(ioat, idx + i);
+ compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
+ hw = compl_desc->hw;
+ hw->ctl = 0;
+ hw->ctl_f.null = 1;
+ hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ hw->ctl_f.compl_write = 1;
+ hw->size = NULL_DESC_BUFFER_SIZE;
+ dump_desc_dbg(ioat, compl_desc);
+ }
+
/* we leave the channel locked to ensure in order submission */
return &compl_desc->txd;
}
static struct dma_async_tx_descriptor *
+__ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
+ const dma_addr_t *dst, const dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf,
+ size_t len, unsigned long flags)
+{
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
+ struct ioat_chan_common *chan = &ioat->base;
+ struct ioatdma_device *device = chan->device;
+ struct ioat_ring_ent *desc;
+ size_t total_len = len;
+ struct ioat_pq_descriptor *pq;
+ u32 offset = 0;
+ u8 op;
+ int i, s, idx, num_descs;
+
+ /* this function only handles src_cnt 9 - 16 */
+ BUG_ON(src_cnt < 9);
+
+ /* this function is only called with 9-16 sources */
+ op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
+
+ dev_dbg(to_dev(chan), "%s\n", __func__);
+
+ num_descs = ioat2_xferlen_to_descs(ioat, len);
+
+ /*
+ * 16 source pq is only available on cb3.3 and has no completion
+ * write hw bug.
+ */
+ if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0)
+ idx = ioat->head;
+ else
+ return NULL;
+
+ i = 0;
+
+ do {
+ struct ioat_raw_descriptor *descs[4];
+ size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
+
+ desc = ioat2_get_ring_ent(ioat, idx + i);
+ pq = desc->pq;
+
+ descs[0] = (struct ioat_raw_descriptor *) pq;
+
+ desc->sed = ioat3_alloc_sed(device,
+ sed_get_pq16_pool_idx(src_cnt));
+ if (!desc->sed) {
+ dev_err(to_dev(chan),
+ "%s: no free sed entries\n", __func__);
+ return NULL;
+ }
+
+ pq->sed_addr = desc->sed->dma;
+ desc->sed->parent = desc;
+
+ descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
+ descs[2] = (void *)descs[1] + 64;
+
+ for (s = 0; s < src_cnt; s++)
+ pq16_set_src(descs, src[s], offset, scf[s], s);
+
+ /* see the comment for dma_maxpq in include/linux/dmaengine.h */
+ if (dmaf_p_disabled_continue(flags))
+ pq16_set_src(descs, dst[1], offset, 1, s++);
+ else if (dmaf_continue(flags)) {
+ pq16_set_src(descs, dst[0], offset, 0, s++);
+ pq16_set_src(descs, dst[1], offset, 1, s++);
+ pq16_set_src(descs, dst[1], offset, 0, s++);
+ }
+
+ pq->size = xfer_size;
+ pq->p_addr = dst[0] + offset;
+ pq->q_addr = dst[1] + offset;
+ pq->ctl = 0;
+ pq->ctl_f.op = op;
+ pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
+ /* we turn on descriptor write back error status */
+ if (device->cap & IOAT_CAP_DWBES)
+ pq->ctl_f.wb_en = result ? 1 : 0;
+ pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
+ pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
+
+ len -= xfer_size;
+ offset += xfer_size;
+ } while (++i < num_descs);
+
+ /* last pq descriptor carries the unmap parameters and fence bit */
+ desc->txd.flags = flags;
+ desc->len = total_len;
+ if (result)
+ desc->result = result;
+ pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+
+ /* with cb3.3 we should be able to do completion w/o a null desc */
+ pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+ pq->ctl_f.compl_write = 1;
+
+ dump_pq16_desc_dbg(ioat, desc);
+
+ /* we leave the channel locked to ensure in order submission */
+ return &desc->txd;
+}
+
+static struct dma_async_tx_descriptor *
ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
unsigned long flags)
{
+ struct dma_device *dma = chan->device;
+
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
dst[0] = dst[1];
@@ -755,11 +1297,20 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
single_source_coef[0] = scf[0];
single_source_coef[1] = 0;
- return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
- single_source_coef, len, flags);
- } else
- return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf,
- len, flags);
+ return (src_cnt > 8) && (dma->max_pq > 8) ?
+ __ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
+ 2, single_source_coef, len,
+ flags) :
+ __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
+ single_source_coef, len, flags);
+
+ } else {
+ return (src_cnt > 8) && (dma->max_pq > 8) ?
+ __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
+ scf, len, flags) :
+ __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
+ scf, len, flags);
+ }
}
struct dma_async_tx_descriptor *
@@ -767,6 +1318,8 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
enum sum_check_flags *pqres, unsigned long flags)
{
+ struct dma_device *dma = chan->device;
+
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
pq[0] = pq[1];
@@ -778,14 +1331,18 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
*/
*pqres = 0;
- return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
- flags);
+ return (src_cnt > 8) && (dma->max_pq > 8) ?
+ __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
+ flags) :
+ __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
+ flags);
}
static struct dma_async_tx_descriptor *
ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags)
{
+ struct dma_device *dma = chan->device;
unsigned char scf[src_cnt];
dma_addr_t pq[2];
@@ -794,8 +1351,11 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[1] = dst; /* specify valid address for disabled result */
- return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
- flags);
+ return (src_cnt > 8) && (dma->max_pq > 8) ?
+ __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
+ flags) :
+ __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
+ flags);
}
struct dma_async_tx_descriptor *
@@ -803,6 +1363,7 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
unsigned int src_cnt, size_t len,
enum sum_check_flags *result, unsigned long flags)
{
+ struct dma_device *dma = chan->device;
unsigned char scf[src_cnt];
dma_addr_t pq[2];
@@ -816,8 +1377,12 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[1] = pq[0]; /* specify valid address for disabled result */
- return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf,
- len, flags);
+
+ return (src_cnt > 8) && (dma->max_pq > 8) ?
+ __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
+ scf, len, flags) :
+ __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
+ scf, len, flags);
}
static struct dma_async_tx_descriptor *
@@ -1167,6 +1732,56 @@ static int ioat3_dma_self_test(struct ioatdma_device *device)
return 0;
}
+static int ioat3_irq_reinit(struct ioatdma_device *device)
+{
+ int msixcnt = device->common.chancnt;
+ struct pci_dev *pdev = device->pdev;
+ int i;
+ struct msix_entry *msix;
+ struct ioat_chan_common *chan;
+ int err = 0;
+
+ switch (device->irq_mode) {
+ case IOAT_MSIX:
+
+ for (i = 0; i < msixcnt; i++) {
+ msix = &device->msix_entries[i];
+ chan = ioat_chan_by_index(device, i);
+ devm_free_irq(&pdev->dev, msix->vector, chan);
+ }
+
+ pci_disable_msix(pdev);
+ break;
+
+ case IOAT_MSIX_SINGLE:
+ msix = &device->msix_entries[0];
+ chan = ioat_chan_by_index(device, 0);
+ devm_free_irq(&pdev->dev, msix->vector, chan);
+ pci_disable_msix(pdev);
+ break;
+
+ case IOAT_MSI:
+ chan = ioat_chan_by_index(device, 0);
+ devm_free_irq(&pdev->dev, pdev->irq, chan);
+ pci_disable_msi(pdev);
+ break;
+
+ case IOAT_INTX:
+ chan = ioat_chan_by_index(device, 0);
+ devm_free_irq(&pdev->dev, pdev->irq, chan);
+ break;
+
+ default:
+ return 0;
+ }
+
+ device->irq_mode = IOAT_NOIRQ;
+
+ err = ioat_dma_setup_interrupts(device);
+
+ return err;
+}
+
static int ioat3_reset_hw(struct ioat_chan_common *chan)
{
/* throw away whatever the channel was doing and get it
@@ -1183,80 +1798,65 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
- /* clear any pending errors */
- err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
+ if (device->version < IOAT_VER_3_3) {
+ /* clear any pending errors */
+ err = pci_read_config_dword(pdev,
+ IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
+ if (err) {
+ dev_err(&pdev->dev,
+ "channel error register unreachable\n");
+ return err;
+ }
+ pci_write_config_dword(pdev,
+ IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
+
+ /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+ * (workaround for spurious config parity error after restart)
+ */
+ pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
+ if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
+ pci_write_config_dword(pdev,
+ IOAT_PCI_DMAUNCERRSTS_OFFSET,
+ 0x10);
+ }
+ }
+
+ err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
if (err) {
- dev_err(&pdev->dev, "channel error register unreachable\n");
+ dev_err(&pdev->dev, "Failed to reset!\n");
return err;
}
- pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
- /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
- * (workaround for spurious config parity error after restart)
- */
- pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
- if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
- pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
+ if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev))
+ err = ioat3_irq_reinit(device);
- return ioat2_reset_sync(chan, msecs_to_jiffies(200));
+ return err;
}
-static bool is_jf_ioat(struct pci_dev *pdev)
+static void ioat3_intr_quirk(struct ioatdma_device *device)
{
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
- case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
- return true;
- default:
- return false;
- }
-}
+ struct dma_device *dma;
+ struct dma_chan *c;
+ struct ioat_chan_common *chan;
+ u32 errmask;
-static bool is_snb_ioat(struct pci_dev *pdev)
-{
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
- case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
- return true;
- default:
- return false;
- }
-}
+ dma = &device->common;
-static bool is_ivb_ioat(struct pci_dev *pdev)
-{
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
- case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
- return true;
- default:
- return false;
+ /*
+ * if we have descriptor write back error status, we mask the
+ * error interrupts
+ */
+ if (device->cap & IOAT_CAP_DWBES) {
+ list_for_each_entry(c, &dma->channels, device_node) {
+ chan = to_chan_common(c);
+ errmask = readl(chan->reg_base +
+ IOAT_CHANERR_MASK_OFFSET);
+ errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
+ IOAT_CHANERR_XOR_Q_ERR;
+ writel(errmask, chan->reg_base +
+ IOAT_CHANERR_MASK_OFFSET);
+ }
}
-
}
int ioat3_dma_probe(struct ioatdma_device *device, int dca)
@@ -1268,30 +1868,33 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
struct ioat_chan_common *chan;
bool is_raid_device = false;
int err;
- u32 cap;
device->enumerate_channels = ioat2_enumerate_channels;
device->reset_hw = ioat3_reset_hw;
device->self_test = ioat3_dma_self_test;
+ device->intr_quirk = ioat3_intr_quirk;
dma = &device->common;
dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
dma->device_issue_pending = ioat2_issue_pending;
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- if (is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev))
+ if (is_xeon_cb32(pdev))
dma->copy_align = 6;
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
- cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
+ device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
+
+ if (is_bwd_noraid(pdev))
+ device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
/* dca is incompatible with raid operations */
- if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
- cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
+ if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
+ device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
- if (cap & IOAT_CAP_XOR) {
+ if (device->cap & IOAT_CAP_XOR) {
is_raid_device = true;
dma->max_xor = 8;
dma->xor_align = 6;
@@ -1302,53 +1905,86 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
}
- if (cap & IOAT_CAP_PQ) {
+
+ if (device->cap & IOAT_CAP_PQ) {
is_raid_device = true;
- dma_set_maxpq(dma, 8, 0);
- dma->pq_align = 6;
- dma_cap_set(DMA_PQ, dma->cap_mask);
dma->device_prep_dma_pq = ioat3_prep_pq;
-
- dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
+ dma_cap_set(DMA_PQ, dma->cap_mask);
+ dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
- if (!(cap & IOAT_CAP_XOR)) {
- dma->max_xor = 8;
- dma->xor_align = 6;
+ if (device->cap & IOAT_CAP_RAID16SS) {
+ dma_set_maxpq(dma, 16, 0);
+ dma->pq_align = 0;
+ } else {
+ dma_set_maxpq(dma, 8, 0);
+ if (is_xeon_cb32(pdev))
+ dma->pq_align = 6;
+ else
+ dma->pq_align = 0;
+ }
- dma_cap_set(DMA_XOR, dma->cap_mask);
+ if (!(device->cap & IOAT_CAP_XOR)) {
dma->device_prep_dma_xor = ioat3_prep_pqxor;
-
- dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
+ dma_cap_set(DMA_XOR, dma->cap_mask);
+ dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
+
+ if (device->cap & IOAT_CAP_RAID16SS) {
+ dma->max_xor = 16;
+ dma->xor_align = 0;
+ } else {
+ dma->max_xor = 8;
+ if (is_xeon_cb32(pdev))
+ dma->xor_align = 6;
+ else
+ dma->xor_align = 0;
+ }
}
}
- if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) {
+
+ if (is_raid_device && (device->cap & IOAT_CAP_FILL_BLOCK)) {
dma_cap_set(DMA_MEMSET, dma->cap_mask);
dma->device_prep_dma_memset = ioat3_prep_memset_lock;
}
- if (is_raid_device) {
- dma->device_tx_status = ioat3_tx_status;
- device->cleanup_fn = ioat3_cleanup_event;
- device->timer_fn = ioat3_timer_event;
- } else {
- dma->device_tx_status = ioat_dma_tx_status;
- device->cleanup_fn = ioat2_cleanup_event;
- device->timer_fn = ioat2_timer_event;
+ dma->device_tx_status = ioat3_tx_status;
+ device->cleanup_fn = ioat3_cleanup_event;
+ device->timer_fn = ioat3_timer_event;
+
+ if (is_xeon_cb32(pdev)) {
+ dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
+ dma->device_prep_dma_xor_val = NULL;
+
+ dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
+ dma->device_prep_dma_pq_val = NULL;
}
- #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
- dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
- dma->device_prep_dma_pq_val = NULL;
- #endif
+ /* starting with CB3.3 super extended descriptors are supported */
+ if (device->cap & IOAT_CAP_RAID16SS) {
+ char pool_name[14];
+ int i;
+
+ /* allocate sw descriptor pool for SED */
+ device->sed_pool = kmem_cache_create("ioat_sed",
+ sizeof(struct ioat_sed_ent), 0, 0, NULL);
+ if (!device->sed_pool)
+ return -ENOMEM;
+
+ for (i = 0; i < MAX_SED_POOLS; i++) {
+ snprintf(pool_name, 14, "ioat_hw%d_sed", i);
- #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
- dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
- dma->device_prep_dma_xor_val = NULL;
- #endif
+ /* allocate SED DMA pool */
+ device->sed_hw_pool[i] = dma_pool_create(pool_name,
+ &pdev->dev,
+ SED_SIZE * (i + 1), 64, 0);
+ if (!device->sed_hw_pool[i])
+ goto sed_pool_cleanup;
+
+ }
+ }
err = ioat_probe(device);
if (err)
@@ -1371,4 +2007,28 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
device->dca = ioat3_dca_init(pdev, device->reg_base);
return 0;
+
+sed_pool_cleanup:
+ if (device->sed_pool) {
+ int i;
+ kmem_cache_destroy(device->sed_pool);
+
+ for (i = 0; i < MAX_SED_POOLS; i++)
+ if (device->sed_hw_pool[i])
+ dma_pool_destroy(device->sed_hw_pool[i]);
+ }
+
+ return -ENOMEM;
+}
+
+void ioat3_dma_remove(struct ioatdma_device *device)
+{
+ if (device->sed_pool) {
+ int i;
+ kmem_cache_destroy(device->sed_pool);
+
+ for (i = 0; i < MAX_SED_POOLS; i++)
+ if (device->sed_hw_pool[i])
+ dma_pool_destroy(device->sed_hw_pool[i]);
+ }
}
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 7cb74c62c719..5ee57d402a6e 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -30,11 +30,6 @@
#define IOAT_PCI_DID_SCNB 0x65FF
#define IOAT_PCI_DID_SNB 0x402F
-#define IOAT_VER_1_2 0x12 /* Version 1.2 */
-#define IOAT_VER_2_0 0x20 /* Version 2.0 */
-#define IOAT_VER_3_0 0x30 /* Version 3.0 */
-#define IOAT_VER_3_2 0x32 /* Version 3.2 */
-
#define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20
#define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21
#define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22
@@ -46,6 +41,29 @@
#define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e
#define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW0 0x2f20
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW1 0x2f21
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW2 0x2f22
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW3 0x2f23
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW4 0x2f24
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW5 0x2f25
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW6 0x2f26
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW7 0x2f27
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW8 0x2f2e
+#define PCI_DEVICE_ID_INTEL_IOAT_HSW9 0x2f2f
+
+#define PCI_DEVICE_ID_INTEL_IOAT_BWD0 0x0C50
+#define PCI_DEVICE_ID_INTEL_IOAT_BWD1 0x0C51
+#define PCI_DEVICE_ID_INTEL_IOAT_BWD2 0x0C52
+#define PCI_DEVICE_ID_INTEL_IOAT_BWD3 0x0C53
+
+#define IOAT_VER_1_2 0x12 /* Version 1.2 */
+#define IOAT_VER_2_0 0x20 /* Version 2.0 */
+#define IOAT_VER_3_0 0x30 /* Version 3.0 */
+#define IOAT_VER_3_2 0x32 /* Version 3.2 */
+#define IOAT_VER_3_3 0x33 /* Version 3.3 */
+
+
int system_has_dca_enabled(struct pci_dev *pdev);
struct ioat_dma_descriptor {
@@ -147,7 +165,17 @@ struct ioat_xor_ext_descriptor {
};
struct ioat_pq_descriptor {
- uint32_t size;
+ union {
+ uint32_t size;
+ uint32_t dwbes;
+ struct {
+ unsigned int rsvd:25;
+ unsigned int p_val_err:1;
+ unsigned int q_val_err:1;
+ unsigned int rsvd1:4;
+ unsigned int wbes:1;
+ } dwbes_f;
+ };
union {
uint32_t ctl;
struct {
@@ -162,9 +190,14 @@ struct ioat_pq_descriptor {
unsigned int hint:1;
unsigned int p_disable:1;
unsigned int q_disable:1;
- unsigned int rsvd:11;
+ unsigned int rsvd2:2;
+ unsigned int wb_en:1;
+ unsigned int prl_en:1;
+ unsigned int rsvd3:7;
#define IOAT_OP_PQ 0x89
#define IOAT_OP_PQ_VAL 0x8a
+ #define IOAT_OP_PQ_16S 0xa0
+ #define IOAT_OP_PQ_VAL_16S 0xa1
unsigned int op:8;
} ctl_f;
};
@@ -172,7 +205,10 @@ struct ioat_pq_descriptor {
uint64_t p_addr;
uint64_t next;
uint64_t src_addr2;
- uint64_t src_addr3;
+ union {
+ uint64_t src_addr3;
+ uint64_t sed_addr;
+ };
uint8_t coef[8];
uint64_t q_addr;
};
@@ -221,4 +257,40 @@ struct ioat_pq_update_descriptor {
struct ioat_raw_descriptor {
uint64_t field[8];
};
+
+struct ioat_pq16a_descriptor {
+ uint8_t coef[8];
+ uint64_t src_addr3;
+ uint64_t src_addr4;
+ uint64_t src_addr5;
+ uint64_t src_addr6;
+ uint64_t src_addr7;
+ uint64_t src_addr8;
+ uint64_t src_addr9;
+};
+
+struct ioat_pq16b_descriptor {
+ uint64_t src_addr10;
+ uint64_t src_addr11;
+ uint64_t src_addr12;
+ uint64_t src_addr13;
+ uint64_t src_addr14;
+ uint64_t src_addr15;
+ uint64_t src_addr16;
+ uint64_t rsvd;
+};
+
+union ioat_sed_pq_descriptor {
+ struct ioat_pq16a_descriptor a;
+ struct ioat_pq16b_descriptor b;
+};
+
+#define SED_SIZE 64
+
+struct ioat_sed_raw_descriptor {
+ uint64_t a[8];
+ uint64_t b[8];
+ uint64_t c[8];
+};
+
#endif
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 71c7ecd80fac..2c8d560e6334 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -94,6 +94,23 @@ static struct pci_device_id ioat_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
+
+ /* I/OAT v3.3 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
+
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
@@ -190,6 +207,9 @@ static void ioat_remove(struct pci_dev *pdev)
if (!device)
return;
+ if (device->version >= IOAT_VER_3_0)
+ ioat3_dma_remove(device);
+
dev_err(&pdev->dev, "Removing dma and dca services\n");
if (device->dca) {
unregister_dca_provider(device->dca, &pdev->dev);
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 1391798542b6..2f1cfa0f1f47 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -79,6 +79,8 @@
#define IOAT_CAP_APIC 0x00000080
#define IOAT_CAP_XOR 0x00000100
#define IOAT_CAP_PQ 0x00000200
+#define IOAT_CAP_DWBES 0x00002000
+#define IOAT_CAP_RAID16SS 0x00020000
#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */
@@ -93,6 +95,8 @@
#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
#define IOAT_CHANCTRL_INT_REARM 0x0001
#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\
+ IOAT_CHANCTRL_ERR_INT_EN |\
+ IOAT_CHANCTRL_ERR_COMPLETION_EN |\
IOAT_CHANCTRL_ANY_ERR_ABORT_EN)
#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */