summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFUJITA Tomonori <tomof@acm.org>2008-02-04 22:28:10 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 09:44:11 -0800
commit1b39b077789955c8389488d53d075518fdcd582e (patch)
treeb633d041bdc32959c118ec9ef77ee17bbd0b5b23
parent383af9525bb27f927511874f6306247ec13f1c28 (diff)
downloadlinux-1b39b077789955c8389488d53d075518fdcd582e.tar.gz
linux-1b39b077789955c8389488d53d075518fdcd582e.tar.bz2
linux-1b39b077789955c8389488d53d075518fdcd582e.zip
iommu sg: x86: convert calgary IOMMU to use the IOMMU helper
This patch converts calgary IOMMU to use the IOMMU helper functions. The IOMMU doesn't allocate a memory area spanning LLD's segment boundary anymore. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Jeff Garzik <jeff@garzik.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Muli Ben-Yehuda <mulix@mulix.org> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/kernel/pci-calgary_64.c34
2 files changed, 23 insertions, 14 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 59eef1c7fdaa..c976eb41c5c8 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -465,6 +465,9 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
Calgary anyway, pass 'iommu=calgary' on the kernel command line.
If unsure, say Y.
+config IOMMU_HELPER
+ def_bool CALGARY_IOMMU
+
# need this always selected by IOMMU for the VIA workaround
config SWIOTLB
bool
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 1fe7f043ebde..1b5464c2434f 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -35,6 +35,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
+#include <linux/iommu-helper.h>
#include <asm/gart.h>
#include <asm/calgary.h>
#include <asm/tce.h>
@@ -260,22 +261,28 @@ static void iommu_range_reserve(struct iommu_table *tbl,
spin_unlock_irqrestore(&tbl->it_lock, flags);
}
-static unsigned long iommu_range_alloc(struct iommu_table *tbl,
- unsigned int npages)
+static unsigned long iommu_range_alloc(struct device *dev,
+ struct iommu_table *tbl,
+ unsigned int npages)
{
unsigned long flags;
unsigned long offset;
+ unsigned long boundary_size;
+
+ boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+ PAGE_SIZE) >> PAGE_SHIFT;
BUG_ON(npages == 0);
spin_lock_irqsave(&tbl->it_lock, flags);
- offset = find_next_zero_string(tbl->it_map, tbl->it_hint,
- tbl->it_size, npages);
+ offset = iommu_area_alloc(tbl->it_map, tbl->it_size, tbl->it_hint,
+ npages, 0, boundary_size, 0);
if (offset == ~0UL) {
tbl->chip_ops->tce_cache_blast(tbl);
- offset = find_next_zero_string(tbl->it_map, 0,
- tbl->it_size, npages);
+
+ offset = iommu_area_alloc(tbl->it_map, tbl->it_size, 0,
+ npages, 0, boundary_size, 0);
if (offset == ~0UL) {
printk(KERN_WARNING "Calgary: IOMMU full.\n");
spin_unlock_irqrestore(&tbl->it_lock, flags);
@@ -286,7 +293,6 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
}
}
- set_bit_string(tbl->it_map, offset, npages);
tbl->it_hint = offset + npages;
BUG_ON(tbl->it_hint > tbl->it_size);
@@ -295,13 +301,13 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
return offset;
}
-static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr,
- unsigned int npages, int direction)
+static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
+ void *vaddr, unsigned int npages, int direction)
{
unsigned long entry;
dma_addr_t ret = bad_dma_address;
- entry = iommu_range_alloc(tbl, npages);
+ entry = iommu_range_alloc(dev, tbl, npages);
if (unlikely(entry == bad_dma_address))
goto error;
@@ -354,7 +360,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
badbit, tbl, dma_addr, entry, npages);
}
- __clear_bit_string(tbl->it_map, entry, npages);
+ iommu_area_free(tbl->it_map, entry, npages);
spin_unlock_irqrestore(&tbl->it_lock, flags);
}
@@ -438,7 +444,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
vaddr = (unsigned long) sg_virt(s);
npages = num_dma_pages(vaddr, s->length);
- entry = iommu_range_alloc(tbl, npages);
+ entry = iommu_range_alloc(dev, tbl, npages);
if (entry == bad_dma_address) {
/* makes sure unmap knows to stop */
s->dma_length = 0;
@@ -476,7 +482,7 @@ static dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
npages = num_dma_pages(uaddr, size);
if (translation_enabled(tbl))
- dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
+ dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction);
else
dma_handle = virt_to_bus(vaddr);
@@ -516,7 +522,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
if (translation_enabled(tbl)) {
/* set up tces to cover the allocated range */
- mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL);
+ mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
if (mapping == bad_dma_address)
goto free;