summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2006-11-11 17:25:10 +1100
committerPaul Mackerras <paulus@samba.org>2006-12-04 20:38:52 +1100
commit4cb3cee03d558fd457cb58f56c80a2a09a66110c (patch)
treefe903107d098871a7babc1e3432448758c542cde /arch/powerpc/mm
parentd03f387eb321189bc2ba278b6ca82f1a45cf19d6 (diff)
downloadlinux-4cb3cee03d558fd457cb58f56c80a2a09a66110c.tar.gz
linux-4cb3cee03d558fd457cb58f56c80a2a09a66110c.tar.bz2
linux-4cb3cee03d558fd457cb58f56c80a2a09a66110c.zip
[POWERPC] Allow hooking of PCI MMIO & PIO accessors on 64 bits
This patch reworks the way iSeries hooks on PCI IO operations (both MMIO and PIO) and provides a generic way for other platforms to do so (we have need to do that for various other platforms). While reworking the IO ops, I ended up doing some spring cleaning in io.h and eeh.h which I might want to split into 2 or 3 patches (among others, eeh.h had a lot of useless stuff in it). A side effect is that EEH for PIO should work now (it used to pass IO ports down to the eeh address check functions which is bogus). Also, new are MMIO "repeat" ops, which other archs like ARM already had, and that we have too now: readsb, readsw, readsl, writesb, writesw, writesl. In the long run, I might also make EEH use the hooks instead of wrapping at the toplevel, which would make things even cleaner and relegate EEH completely in platforms/iseries, but we have to measure the performance impact there (though it's really only on MMIO reads) Since I also need to hook on ioremap, I shuffled the functions a bit there. I introduced ioremap_flags() to use by drivers who want to pass explicit flags to ioremap (and it can be hooked). The old __ioremap() is still there as a low level and cannot be hooked, thus drivers who use it should migrate unless they know they want the low level version. The patch "arch provides generic iomap missing accessors" (should be number 4 in this series) is a pre-requisite to provide full iomap API support with this patch. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/pgtable_64.c46
1 files changed, 31 insertions, 15 deletions
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index ac64f4aaa509..e9b21846ccbd 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -129,22 +129,12 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
return (void __iomem *) (ea + (addr & ~PAGE_MASK));
}
-
-void __iomem *
-ioremap(unsigned long addr, unsigned long size)
-{
- return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
-}
-
void __iomem * __ioremap(unsigned long addr, unsigned long size,
unsigned long flags)
{
unsigned long pa, ea;
void __iomem *ret;
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return (void __iomem *)addr;
-
/*
* Choose an address to map it to.
* Once the imalloc system is running, we use it.
@@ -178,6 +168,25 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size,
return ret;
}
+
+void __iomem * ioremap(unsigned long addr, unsigned long size)
+{
+ unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
+
+ if (ppc_md.ioremap)
+ return ppc_md.ioremap(addr, size, flags);
+ return __ioremap(addr, size, flags);
+}
+
+void __iomem * ioremap_flags(unsigned long addr, unsigned long size,
+ unsigned long flags)
+{
+ if (ppc_md.ioremap)
+ return ppc_md.ioremap(addr, size, flags);
+ return __ioremap(addr, size, flags);
+}
+
+
#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
int __ioremap_explicit(unsigned long pa, unsigned long ea,
@@ -235,13 +244,10 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
*
* XXX what about calls before mem_init_done (ie python_countermeasures())
*/
-void iounmap(volatile void __iomem *token)
+void __iounmap(void __iomem *token)
{
void *addr;
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- return;
-
if (!mem_init_done)
return;
@@ -250,6 +256,14 @@ void iounmap(volatile void __iomem *token)
im_free(addr);
}
+void iounmap(void __iomem *token)
+{
+ if (ppc_md.iounmap)
+ ppc_md.iounmap(token);
+ else
+ __iounmap(token);
+}
+
static int iounmap_subset_regions(unsigned long addr, unsigned long size)
{
struct vm_struct *area;
@@ -268,7 +282,7 @@ static int iounmap_subset_regions(unsigned long addr, unsigned long size)
return 0;
}
-int iounmap_explicit(volatile void __iomem *start, unsigned long size)
+int __iounmap_explicit(void __iomem *start, unsigned long size)
{
struct vm_struct *area;
unsigned long addr;
@@ -303,8 +317,10 @@ int iounmap_explicit(volatile void __iomem *start, unsigned long size)
}
EXPORT_SYMBOL(ioremap);
+EXPORT_SYMBOL(ioremap_flags);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(__iounmap);
void __iomem * reserve_phb_iospace(unsigned long size)
{