summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-04-10 15:30:16 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-04-10 15:30:16 -0700
commit94fb175c0414902ad9dbd956addf3a5feafbc85b (patch)
tree5d3c37abe78f072e92072f2079a98303c92cf16e /drivers/dma
parenta9e1e53bcfb29b3b503a5e75ce498d9a64f32c1e (diff)
parenta2bd1140a264b561e38d99e656cd843c2d840e86 (diff)
downloadlinux-94fb175c0414902ad9dbd956addf3a5feafbc85b.tar.gz
linux-94fb175c0414902ad9dbd956addf3a5feafbc85b.tar.bz2
linux-94fb175c0414902ad9dbd956addf3a5feafbc85b.zip
Merge tag 'dmaengine-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine
Pull dmaengine fixes from Dan Williams: 1/ regression fix for Xen as it now trips over a broken assumption about the dma address size on 32-bit builds 2/ new quirk for netdma to ignore dma channels that cannot meet netdma alignment requirements 3/ fixes for two long standing issues in ioatdma (ring size overflow) and iop-adma (potential stack corruption) * tag 'dmaengine-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine: netdma: adding alignment check for NETDMA ops ioatdma: DMA copy alignment needed to address IOAT DMA silicon errata ioat: ring size variables need to be 32bit to avoid overflow iop-adma: Corrected array overflow in RAID6 Xscale(R) test. ioat: fix size of 'completion' for Xen
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dmaengine.c14
-rw-r--r--drivers/dma/ioat/dma.c16
-rw-r--r--drivers/dma/ioat/dma.h6
-rw-r--r--drivers/dma/ioat/dma_v2.c12
-rw-r--r--drivers/dma/ioat/dma_v2.h4
-rw-r--r--drivers/dma/ioat/dma_v3.c49
-rw-r--r--drivers/dma/iop-adma.c4
7 files changed, 80 insertions, 25 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 767bcc31b365..2397f6f451b1 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -332,6 +332,20 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
}
EXPORT_SYMBOL(dma_find_channel);
+/*
+ * net_dma_find_channel - find a channel for net_dma
+ * net_dma has alignment requirements
+ */
+struct dma_chan *net_dma_find_channel(void)
+{
+ struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
+ if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
+ return NULL;
+
+ return chan;
+}
+EXPORT_SYMBOL(net_dma_find_channel);
+
/**
* dma_issue_pending_all - flush all pending operations across all channels
*/
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 31493d80e0e9..73b2b65cb1de 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -546,9 +546,9 @@ void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
PCI_DMA_TODEVICE, flags, 0);
}
-unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
+dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
{
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
u64 completion;
completion = *chan->completion;
@@ -569,7 +569,7 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
}
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
- unsigned long *phys_complete)
+ dma_addr_t *phys_complete)
{
*phys_complete = ioat_get_current_completion(chan);
if (*phys_complete == chan->last_completion)
@@ -580,14 +580,14 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
return true;
}
-static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
+static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
{
struct ioat_chan_common *chan = &ioat->base;
struct list_head *_desc, *n;
struct dma_async_tx_descriptor *tx;
- dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
- __func__, phys_complete);
+ dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
+ __func__, (unsigned long long) phys_complete);
list_for_each_safe(_desc, n, &ioat->used_desc) {
struct ioat_desc_sw *desc;
@@ -652,7 +652,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
static void ioat1_cleanup(struct ioat_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
prefetch(chan->completion);
@@ -698,7 +698,7 @@ static void ioat1_timer_event(unsigned long data)
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
spin_unlock_bh(&ioat->desc_lock);
} else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
spin_lock_bh(&ioat->desc_lock);
/* if we haven't made progress and we have already
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index c7888bccd974..5e8fe01ba69d 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -88,7 +88,7 @@ struct ioatdma_device {
struct ioat_chan_common {
struct dma_chan common;
void __iomem *reg_base;
- unsigned long last_completion;
+ dma_addr_t last_completion;
spinlock_t cleanup_lock;
unsigned long state;
#define IOAT_COMPLETION_PENDING 0
@@ -310,7 +310,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device);
void __devexit ioat_dma_remove(struct ioatdma_device *device);
struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
void __iomem *iobase);
-unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
+dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx);
enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
@@ -318,7 +318,7 @@ enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
- unsigned long *phys_complete);
+ dma_addr_t *phys_complete);
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
void ioat_kobject_del(struct ioatdma_device *device);
extern const struct sysfs_ops ioat_sysfs_ops;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index e8e110ff3d96..86895760b598 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -128,7 +128,7 @@ static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
spin_unlock_bh(&ioat->prep_lock);
}
-static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
+static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
{
struct ioat_chan_common *chan = &ioat->base;
struct dma_async_tx_descriptor *tx;
@@ -179,7 +179,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
spin_lock_bh(&chan->cleanup_lock);
if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -260,7 +260,7 @@ int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -275,7 +275,7 @@ void ioat2_timer_event(unsigned long data)
struct ioat_chan_common *chan = &ioat->base;
if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
u64 status;
status = ioat_chansts(chan);
@@ -572,9 +572,9 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
*/
struct ioat_chan_common *chan = &ioat->base;
struct dma_chan *c = &chan->common;
- const u16 curr_size = ioat2_ring_size(ioat);
+ const u32 curr_size = ioat2_ring_size(ioat);
const u16 active = ioat2_ring_active(ioat);
- const u16 new_size = 1 << order;
+ const u32 new_size = 1 << order;
struct ioat_ring_ent **ring;
u16 i;
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index a2c413b2b8d8..be2a55b95c23 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -74,7 +74,7 @@ static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
return container_of(chan, struct ioat2_dma_chan, base);
}
-static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat)
+static inline u32 ioat2_ring_size(struct ioat2_dma_chan *ioat)
{
return 1 << ioat->alloc_order;
}
@@ -91,7 +91,7 @@ static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
}
-static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat)
+static inline u32 ioat2_ring_space(struct ioat2_dma_chan *ioat)
{
return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
}
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 2c4476c0e405..f7f1dc62c15c 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -257,7 +257,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc)
* The difference from the dma_v2.c __cleanup() is that this routine
* handles extended descriptors and dma-unmapping raid operations.
*/
-static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
+static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
{
struct ioat_chan_common *chan = &ioat->base;
struct ioat_ring_ent *desc;
@@ -314,7 +314,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
spin_lock_bh(&chan->cleanup_lock);
if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -333,7 +333,7 @@ static void ioat3_cleanup_event(unsigned long data)
static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -348,7 +348,7 @@ static void ioat3_timer_event(unsigned long data)
struct ioat_chan_common *chan = &ioat->base;
if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
u64 status;
status = ioat_chansts(chan);
@@ -1149,6 +1149,44 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
return ioat2_reset_sync(chan, msecs_to_jiffies(200));
}
+static bool is_jf_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
+ case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_snb_ioat(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
+ case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
+ return true;
+ default:
+ return false;
+ }
+}
+
int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
@@ -1169,6 +1207,9 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
+ if (is_jf_ioat(pdev) || is_snb_ioat(pdev))
+ dma->copy_align = 6;
+
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index da6c4c2c066a..79e3eba29702 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1252,8 +1252,8 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
/* address conversion buffers (dma_map / page_address) */
void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
- dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST];
- dma_addr_t pq_dest[2];
+ dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2];
+ dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST];
int i;
struct dma_async_tx_descriptor *tx;