diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-09 17:25:00 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-09 17:25:00 -0800 |
commit | a0e4467726cd26bacb16f13d207ffcfa82ffc07d (patch) | |
tree | 98b5fcbda0cd787b07d09da90d25c87b3883c567 | |
parent | ed8efd2de75479a175bd21df073d9e97df65a820 (diff) | |
parent | cb61f6769b8836081940ba26249f1b756400c7df (diff) | |
download | linux-stable-a0e4467726cd26bacb16f13d207ffcfa82ffc07d.tar.gz linux-stable-a0e4467726cd26bacb16f13d207ffcfa82ffc07d.tar.bz2 linux-stable-a0e4467726cd26bacb16f13d207ffcfa82ffc07d.zip |
Merge tag 'asm-generic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic
Pull asm-generic asm/io.h rewrite from Arnd Bergmann:
"While there normally is no reason to have a pull request for
asm-generic but have all changes get merged through whichever tree
needs them, I do have a series for 3.19.
There are two sets of patches that change significant portions of
asm/io.h, and this branch contains both in order to resolve the
conflicts:
- Will Deacon has done a set of patches to ensure that all
architectures define {read,write}{b,w,l,q}_relaxed() functions or
get them by including asm-generic/io.h.
These functions are commonly used on ARM specific drivers to avoid
expensive L2 cache synchronization implied by the normal
{read,write}{b,w,l,q}, but we need to define them on all
architectures in order to share the drivers across architectures
and to enable CONFIG_COMPILE_TEST configurations for them
- Thierry Reding has done an unrelated set of patches that extends
the asm-generic/io.h file to the degree necessary to make it useful
on ARM64 and potentially other architectures"
* tag 'asm-generic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic: (29 commits)
ARM64: use GENERIC_PCI_IOMAP
sparc: io: remove duplicate relaxed accessors on sparc32
ARM: sa11x0: Use void __iomem * in MMIO accessors
arm64: Use include/asm-generic/io.h
ARM: Use include/asm-generic/io.h
asm-generic/io.h: Implement generic {read,write}s*()
asm-generic/io.h: Reconcile I/O accessor overrides
/dev/mem: Use more consistent data types
Change xlate_dev_{kmem,mem}_ptr() prototypes
ARM: ixp4xx: Properly override I/O accessors
ARM: ixp4xx: Fix build with IXP4XX_INDIRECT_PCI
ARM: ebsa110: Properly override I/O accessors
ARC: Remove redundant PCI_IOBASE declaration
documentation: memory-barriers: clarify relaxed io accessor semantics
x86: io: implement dummy relaxed accessor macros for writes
tile: io: implement dummy relaxed accessor macros for writes
sparc: io: implement dummy relaxed accessor macros for writes
powerpc: io: implement dummy relaxed accessor macros for writes
parisc: io: implement dummy relaxed accessor macros for writes
mn10300: io: implement dummy relaxed accessor macros for writes
...
33 files changed, 835 insertions, 368 deletions
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index 22a969cdd476..4af4cea8cff0 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt @@ -2465,10 +2465,15 @@ functions: Please refer to the PCI specification for more information on interactions between PCI transactions. - (*) readX_relaxed() - - These are similar to readX(), but are not guaranteed to be ordered in any - way. Be aware that there is no I/O read barrier available. + (*) readX_relaxed(), writeX_relaxed() + + These are similar to readX() and writeX(), but provide weaker memory + ordering guarantees. Specifically, they do not guarantee ordering with + respect to normal memory accesses (e.g. DMA buffers) nor do they guarantee + ordering with respect to LOCK or UNLOCK operations. If the latter is + required, an mmiowb() barrier can be used. Note that relaxed accesses to + the same peripheral are guaranteed to be ordered with respect to each + other. (*) ioreadX(), iowriteX() diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index 334ce7017a18..cabd518cb253 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h @@ -13,8 +13,6 @@ #include <asm/byteorder.h> #include <asm/page.h> -#define PCI_IOBASE ((void __iomem *)0) - extern void __iomem *ioremap(unsigned long physaddr, unsigned long size); extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags); diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 180567408ee8..db58deb00aa7 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -47,13 +47,13 @@ extern void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set); * Generic IO read/write. These perform native-endian accesses. Note * that some architectures will want to re-define __raw_{read,write}w. */ -extern void __raw_writesb(void __iomem *addr, const void *data, int bytelen); -extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen); -extern void __raw_writesl(void __iomem *addr, const void *data, int longlen); +void __raw_writesb(volatile void __iomem *addr, const void *data, int bytelen); +void __raw_writesw(volatile void __iomem *addr, const void *data, int wordlen); +void __raw_writesl(volatile void __iomem *addr, const void *data, int longlen); -extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen); -extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen); -extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); +void __raw_readsb(const volatile void __iomem *addr, void *data, int bytelen); +void __raw_readsw(const volatile void __iomem *addr, void *data, int wordlen); +void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen); #if __LINUX_ARM_ARCH__ < 6 /* @@ -69,6 +69,7 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); * writeback addressing modes as these incur a significant performance * overhead (the address generation must be emulated in software). */ +#define __raw_writew __raw_writew static inline void __raw_writew(u16 val, volatile void __iomem *addr) { asm volatile("strh %1, %0" @@ -76,6 +77,7 @@ static inline void __raw_writew(u16 val, volatile void __iomem *addr) : "r" (val)); } +#define __raw_readw __raw_readw static inline u16 __raw_readw(const volatile void __iomem *addr) { u16 val; @@ -86,6 +88,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr) } #endif +#define __raw_writeb __raw_writeb static inline void __raw_writeb(u8 val, volatile void __iomem *addr) { asm volatile("strb %1, %0" @@ -93,6 +96,7 @@ static inline void __raw_writeb(u8 val, volatile void __iomem *addr) : "r" (val)); } +#define __raw_writel __raw_writel static inline void __raw_writel(u32 val, volatile void __iomem *addr) { asm volatile("str %1, %0" @@ -100,6 +104,7 @@ static inline void __raw_writel(u32 val, volatile void __iomem *addr) : "r" (val)); } +#define __raw_readb __raw_readb static inline u8 __raw_readb(const volatile void __iomem *addr) { u8 val; @@ -109,6 +114,7 @@ static inline u8 __raw_readb(const volatile void __iomem *addr) return val; } +#define __raw_readl __raw_readl static inline u32 __raw_readl(const volatile void __iomem *addr) { u32 val; @@ -267,20 +273,6 @@ extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr); #define insl(p,d,l) __raw_readsl(__io(p),d,l) #endif -#define outb_p(val,port) outb((val),(port)) -#define outw_p(val,port) outw((val),(port)) -#define outl_p(val,port) outl((val),(port)) -#define inb_p(port) inb((port)) -#define inw_p(port) inw((port)) -#define inl_p(port) inl((port)) - -#define outsb_p(port,from,len) outsb(port,from,len) -#define outsw_p(port,from,len) outsw(port,from,len) -#define outsl_p(port,from,len) outsl(port,from,len) -#define insb_p(port,to,len) insb(port,to,len) -#define insw_p(port,to,len) insw(port,to,len) -#define insl_p(port,to,len) insl(port,to,len) - /* * String version of IO memory access ops: */ @@ -347,40 +339,42 @@ extern void _memset_io(volatile void __iomem *, int, size_t); #define iounmap __arm_iounmap /* - * io{read,write}{8,16,32} macros + * io{read,write}{16,32}be() macros */ -#ifndef ioread8 -#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __iormb(); __v; }) -#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; }) -#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; }) - -#define ioread16be(p) ({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) -#define ioread32be(p) ({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) - -#define iowrite8(v,p) ({ __iowmb(); __raw_writeb(v, p); }) -#define iowrite16(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_le16(v), p); }) -#define iowrite32(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_le32(v), p); }) +#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) +#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) -#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); }) -#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); }) - -#define ioread8_rep(p,d,c) __raw_readsb(p,d,c) -#define ioread16_rep(p,d,c) __raw_readsw(p,d,c) -#define ioread32_rep(p,d,c) __raw_readsl(p,d,c) - -#define iowrite8_rep(p,s,c) __raw_writesb(p,s,c) -#define iowrite16_rep(p,s,c) __raw_writesw(p,s,c) -#define iowrite32_rep(p,s,c) __raw_writesl(p,s,c) +#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); }) +#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); }) +#ifndef ioport_map +#define ioport_map ioport_map extern void __iomem *ioport_map(unsigned long port, unsigned int nr); +#endif +#ifndef ioport_unmap +#define ioport_unmap ioport_unmap extern void ioport_unmap(void __iomem *addr); #endif struct pci_dev; +#define pci_iounmap pci_iounmap extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr); /* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * access + */ +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#define xlate_dev_kmem_ptr(p) p + +#include <asm-generic/io.h> + +/* * can the hardware map this into one segment or not, given no other * constraints. */ @@ -402,17 +396,6 @@ extern int devmem_is_allowed(unsigned long pfn); #endif /* - * Convert a physical pointer to a virtual kernel pointer for /dev/mem - * access - */ -#define xlate_dev_mem_ptr(p) __va(p) - -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - -/* * Register ISA memory and port locations for glibc iopl/inb/outb * emulation. */ diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index e731018869a7..184def0e1652 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -274,11 +274,13 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) * translation for translating DMA addresses. Use the driver * DMA support - see dma-mapping.h. */ +#define virt_to_phys virt_to_phys static inline phys_addr_t virt_to_phys(const volatile void *x) { return __virt_to_phys((unsigned long)(x)); } +#define phys_to_virt phys_to_virt static inline void *phys_to_virt(phys_addr_t x) { return (void *)__phys_to_virt(x); @@ -322,11 +324,13 @@ static inline phys_addr_t __virt_to_idmap(unsigned long x) #endif #ifdef CONFIG_VIRT_TO_BUS +#define virt_to_bus virt_to_bus static inline __deprecated unsigned long virt_to_bus(void *x) { return __virt_to_bus((unsigned long)x); } +#define bus_to_virt bus_to_virt static inline __deprecated void *bus_to_virt(unsigned long x) { return (void *)__bus_to_virt(x); diff --git a/arch/arm/mach-ebsa110/include/mach/io.h b/arch/arm/mach-ebsa110/include/mach/io.h index 11bb0799424b..69975784acfa 100644 --- a/arch/arm/mach-ebsa110/include/mach/io.h +++ b/arch/arm/mach-ebsa110/include/mach/io.h @@ -29,9 +29,9 @@ u8 __readb(const volatile void __iomem *addr); u16 __readw(const volatile void __iomem *addr); u32 __readl(const volatile void __iomem *addr); -void __writeb(u8 val, void __iomem *addr); -void __writew(u16 val, void __iomem *addr); -void __writel(u32 val, void __iomem *addr); +void __writeb(u8 val, volatile void __iomem *addr); +void __writew(u16 val, volatile void __iomem *addr); +void __writel(u32 val, volatile void __iomem *addr); /* * Argh, someone forgot the IOCS16 line. We therefore have to handle @@ -62,20 +62,31 @@ void __writel(u32 val, void __iomem *addr); #define writew(v,b) __writew(v,b) #define writel(v,b) __writel(v,b) +#define insb insb extern void insb(unsigned int port, void *buf, int sz); +#define insw insw extern void insw(unsigned int port, void *buf, int sz); +#define insl insl extern void insl(unsigned int port, void *buf, int sz); +#define outsb outsb extern void outsb(unsigned int port, const void *buf, int sz); +#define outsw outsw extern void outsw(unsigned int port, const void *buf, int sz); +#define outsl outsl extern void outsl(unsigned int port, const void *buf, int sz); /* can't support writesb atm */ -extern void writesw(void __iomem *addr, const void *data, int wordlen); -extern void writesl(void __iomem *addr, const void *data, int longlen); +#define writesw writesw +extern void writesw(volatile void __iomem *addr, const void *data, int wordlen); +#define writesl writesl +extern void writesl(volatile void __iomem *addr, const void *data, int longlen); /* can't support readsb atm */ -extern void readsw(const void __iomem *addr, void *data, int wordlen); -extern void readsl(const void __iomem *addr, void *data, int longlen); +#define readsw readsw +extern void readsw(const volatile void __iomem *addr, void *data, int wordlen); + +#define readsl readsl +extern void readsl(const volatile void __iomem *addr, void *data, int longlen); #endif diff --git a/arch/arm/mach-ebsa110/io.c b/arch/arm/mach-ebsa110/io.c index 756cc377a73d..b57980b435fd 100644 --- a/arch/arm/mach-ebsa110/io.c +++ b/arch/arm/mach-ebsa110/io.c @@ -102,7 +102,7 @@ EXPORT_SYMBOL(__readb); EXPORT_SYMBOL(__readw); EXPORT_SYMBOL(__readl); -void readsw(const void __iomem *addr, void *data, int len) +void readsw(const volatile void __iomem *addr, void *data, int len) { void __iomem *a = __isamem_convert_addr(addr); @@ -112,7 +112,7 @@ void readsw(const void __iomem *addr, void *data, int len) } EXPORT_SYMBOL(readsw); -void readsl(const void __iomem *addr, void *data, int len) +void readsl(const volatile void __iomem *addr, void *data, int len) { void __iomem *a = __isamem_convert_addr(addr); @@ -122,7 +122,7 @@ void readsl(const void __iomem *addr, void *data, int len) } EXPORT_SYMBOL(readsl); -void __writeb(u8 val, void __iomem *addr) +void __writeb(u8 val, volatile void __iomem *addr) { void __iomem *a = __isamem_convert_addr(addr); @@ -132,7 +132,7 @@ void __writeb(u8 val, void __iomem *addr) __raw_writeb(val, a); } -void __writew(u16 val, void __iomem *addr) +void __writew(u16 val, volatile void __iomem *addr) { void __iomem *a = __isamem_convert_addr(addr); @@ -142,7 +142,7 @@ void __writew(u16 val, void __iomem *addr) __raw_writew(val, a); } -void __writel(u32 val, void __iomem *addr) +void __writel(u32 val, volatile void __iomem *addr) { void __iomem *a = __isamem_convert_addr(addr); @@ -157,7 +157,7 @@ EXPORT_SYMBOL(__writeb); EXPORT_SYMBOL(__writew); EXPORT_SYMBOL(__writel); -void writesw(void __iomem *addr, const void *data, int len) +void writesw(volatile void __iomem *addr, const void *data, int len) { void __iomem *a = __isamem_convert_addr(addr); @@ -167,7 +167,7 @@ void writesw(void __iomem *addr, const void *data, int len) } EXPORT_SYMBOL(writesw); -void writesl(void __iomem *addr, const void *data, int len) +void writesl(volatile void __iomem *addr, const void *data, int len) { void __iomem *a = __isamem_convert_addr(addr); diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index fc4b7b24265e..8537d4c41e34 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -652,7 +652,7 @@ static void __iomem *ixp4xx_ioremap_caller(phys_addr_t addr, size_t size, return (void __iomem *)addr; } -static void ixp4xx_iounmap(void __iomem *addr) +static void ixp4xx_iounmap(volatile void __iomem *addr) { if (!is_pci_memory((__force u32)addr)) __iounmap(addr); diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h index 7d11979da030..6a722860e34d 100644 --- a/arch/arm/mach-ixp4xx/include/mach/io.h +++ b/arch/arm/mach-ixp4xx/include/mach/io.h @@ -58,6 +58,10 @@ static inline int is_pci_memory(u32 addr) #define writew(v, p) __indirect_writew(v, p) #define writel(v, p) __indirect_writel(v, p) +#define writeb_relaxed(v, p) __indirect_writeb(v, p) +#define writew_relaxed(v, p) __indirect_writew(v, p) +#define writel_relaxed(v, p) __indirect_writel(v, p) + #define writesb(p, v, l) __indirect_writesb(p, v, l) #define writesw(p, v, l) __indirect_writesw(p, v, l) #define writesl(p, v, l) __indirect_writesl(p, v, l) @@ -66,6 +70,10 @@ static inline int is_pci_memory(u32 addr) #define readw(p) __indirect_readw(p) #define readl(p) __indirect_readl(p) +#define readb_relaxed(p) __indirect_readb(p) +#define readw_relaxed(p) __indirect_readw(p) +#define readl_relaxed(p) __indirect_readl(p) + #define readsb(p, v, l) __indirect_readsb(p, v, l) #define readsw(p, v, l) __indirect_readsw(p, v, l) #define readsl(p, v, l) __indirect_readsl(p, v, l) @@ -99,7 +107,7 @@ static inline void __indirect_writew(u16 value, volatile void __iomem *p) u32 n, byte_enables, data; if (!is_pci_memory(addr)) { - __raw_writew(value, addr); + __raw_writew(value, p); return; } @@ -164,7 +172,7 @@ static inline unsigned short __indirect_readw(const volatile void __iomem *p) u32 n, byte_enables, data; if (!is_pci_memory(addr)) - return __raw_readw(addr); + return __raw_readw(p); n = addr % 4; byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL; @@ -226,6 +234,7 @@ static inline void __indirect_readsl(const volatile void __iomem *bus_addr, * I/O functions. */ +#define outb outb static inline void outb(u8 value, u32 addr) { u32 n, byte_enables, data; @@ -235,12 +244,14 @@ static inline void outb(u8 value, u32 addr) ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data); } +#define outsb outsb static inline void outsb(u32 io_addr, const u8 *vaddr, u32 count) { while (count--) outb(*vaddr++, io_addr); } +#define outw outw static inline void outw(u16 value, u32 addr) { u32 n, byte_enables, data; @@ -250,23 +261,27 @@ static inline void outw(u16 value, u32 addr) ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data); } +#define outsw outsw static inline void outsw(u32 io_addr, const u16 *vaddr, u32 count) { while (count--) outw(cpu_to_le16(*vaddr++), io_addr); } +#define outl outl static inline void outl(u32 value, u32 addr) { ixp4xx_pci_write(addr, NP_CMD_IOWRITE, value); } +#define outsl outsl static inline void outsl(u32 io_addr, const u32 *vaddr, u32 count) { while (count--) outl(cpu_to_le32(*vaddr++), io_addr); } +#define inb inb static inline u8 inb(u32 addr) { u32 n, byte_enables, data; @@ -278,12 +293,14 @@ static inline u8 inb(u32 addr) return data >> (8*n); } +#define insb insb static inline void insb(u32 io_addr, u8 *vaddr, u32 count) { while (count--) *vaddr++ = inb(io_addr); } +#define inw inw static inline u16 inw(u32 addr) { u32 n, byte_enables, data; @@ -295,12 +312,14 @@ static inline u16 inw(u32 addr) return data>>(8*n); } +#define insw insw static inline void insw(u32 io_addr, u16 *vaddr, u32 count) { while (count--) *vaddr++ = le16_to_cpu(inw(io_addr)); } +#define inl inl static inline u32 inl(u32 addr) { u32 data; @@ -310,6 +329,7 @@ static inline u32 inl(u32 addr) return data; } +#define insl insl static inline void insl(u32 io_addr, u32 *vaddr, u32 count) { while (count--) diff --git a/arch/arm/mach-sa1100/pci-nanoengine.c b/arch/arm/mach-sa1100/pci-nanoengine.c index ff02e2da99f2..b704433c529c 100644 --- a/arch/arm/mach-sa1100/pci-nanoengine.c +++ b/arch/arm/mach-sa1100/pci-nanoengine.c @@ -33,12 +33,12 @@ static DEFINE_SPINLOCK(nano_lock); static int nanoengine_get_pci_address(struct pci_bus *bus, - unsigned int devfn, int where, unsigned long *address) + unsigned int devfn, int where, void __iomem **address) { int ret = PCIBIOS_DEVICE_NOT_FOUND; unsigned int busnr = bus->number; - *address = NANO_PCI_CONFIG_SPACE_VIRT + + *address = (void __iomem *)NANO_PCI_CONFIG_SPACE_VIRT + ((bus->number << 16) | (devfn << 8) | (where & ~3)); ret = (busnr > 255 || devfn > 255 || where > 255) ? @@ -51,7 +51,7 @@ static int nanoengine_read_config(struct pci_bus *bus, unsigned int devfn, int w int size, u32 *val) { int ret; - unsigned long address; + void __iomem *address; unsigned long flags; u32 v; @@ -85,7 +85,7 @@ static int nanoengine_write_config(struct pci_bus *bus, unsigned int devfn, int int size, u32 val) { int ret; - unsigned long address; + void __iomem *address; unsigned long flags; unsigned shift; u32 v; diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index f385273f4b24..6b1ebd964c10 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -24,9 +24,9 @@ config ARM64 select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_CPU_AUTOPROBE select GENERIC_EARLY_IOREMAP - select GENERIC_IOMAP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW + select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select GENERIC_STRNCPY_FROM_USER diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 75825b63464d..949c406d4df4 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -36,26 +36,31 @@ /* * Generic IO read/write. These perform native-endian accesses. */ +#define __raw_writeb __raw_writeb static inline void __raw_writeb(u8 val, volatile void __iomem *addr) { asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr)); } +#define __raw_writew __raw_writew static inline void __raw_writew(u16 val, volatile void __iomem *addr) { asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr)); } +#define __raw_writel __raw_writel static inline void __raw_writel(u32 val, volatile void __iomem *addr) { asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr)); } +#define __raw_writeq __raw_writeq static inline void __raw_writeq(u64 val, volatile void __iomem *addr) { asm volatile("str %0, [%1]" : : "r" (val), "r" (addr)); } +#define __raw_readb __raw_readb static inline u8 __raw_readb(const volatile void __iomem *addr) { u8 val; @@ -66,6 +71,7 @@ static inline u8 __raw_readb(const volatile void __iomem *addr) return val; } +#define __raw_readw __raw_readw static inline u16 __raw_readw(const volatile void __iomem *addr) { u16 val; @@ -77,6 +83,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr) return val; } +#define __raw_readl __raw_readl static inline u32 __raw_readl(const volatile void __iomem *addr) { u32 val; @@ -87,6 +94,7 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) return val; } +#define __raw_readq __raw_readq static inline u64 __raw_readq(const volatile void __iomem *addr) { u64 val; @@ -140,94 +148,6 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) #define IO_SPACE_LIMIT (SZ_32M - 1) #define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M)) -static inline u8 inb(unsigned long addr) -{ - return readb(addr + PCI_IOBASE); -} - -static inline u16 inw(unsigned long addr) -{ - return readw(addr + PCI_IOBASE); -} - -static inline u32 inl(unsigned long addr) -{ - return readl(addr + PCI_IOBASE); -} - -static inline void outb(u8 b, unsigned long addr) -{ - writeb(b, addr + PCI_IOBASE); -} - -static inline void outw(u16 b, unsigned long addr) -{ - writew(b, addr + PCI_IOBASE); -} - -static inline void outl(u32 b, unsigned long addr) -{ - writel(b, addr + PCI_IOBASE); -} - -#define inb_p(addr) inb(addr) -#define inw_p(addr) inw(addr) -#define inl_p(addr) inl(addr) - -#define outb_p(x, addr) outb((x), (addr)) -#define outw_p(x, addr) outw((x), (addr)) -#define outl_p(x, addr) outl((x), (addr)) - -static inline void insb(unsigned long addr, void *buffer, int count) -{ - u8 *buf = buffer; - while (count--) - *buf++ = __raw_readb(addr + PCI_IOBASE); -} - -static inline void insw(unsigned long addr, void *buffer, int count) -{ - u16 *buf = buffer; - while (count--) - *buf++ = __raw_readw(addr + PCI_IOBASE); -} - -static inline void insl(unsigned long addr, void *buffer, int count) -{ - u32 *buf = buffer; - while (count--) - *buf++ = __raw_readl(addr + PCI_IOBASE); -} - -static inline void outsb(unsigned long addr, const void *buffer, int count) -{ - const u8 *buf = buffer; - while (count--) - __raw_writeb(*buf++, addr + PCI_IOBASE); -} - -static inline void outsw(unsigned long addr, const void *buffer, int count) -{ - const u16 *buf = buffer; - while (count--) - __raw_writew(*buf++, addr + PCI_IOBASE); -} - -static inline void outsl(unsigned long addr, const void *buffer, int count) -{ - const u32 *buf = buffer; - while (count--) - __raw_writel(*buf++, addr + PCI_IOBASE); -} - -#define insb_p(port,to,len) insb(port,to,len) -#define insw_p(port,to,len) insw(port,to,len) -#define insl_p(port,to,len) insl(port,to,len) - -#define outsb_p(port,from,len) outsb(port,from,len) -#define outsw_p(port,from,len) outsw(port,from,len) -#define outsl_p(port,from,len) outsl(port,from,len) - /* * String version of I/O memory access operations. */ @@ -251,18 +171,14 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) #define iounmap __iounmap -#define ARCH_HAS_IOREMAP_WC -#include <asm-generic/iomap.h> - /* - * More restrictive address range checking than the default implementation - * (PHYS_OFFSET and PHYS_MASK taken into account). + * io{read,write}{16,32}be() macros */ -#define ARCH_HAS_VALID_PHYS_ADDR_RANGE -extern int valid_phys_addr_range(phys_addr_t addr, size_t size); -extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); +#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) +#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) -extern int devmem_is_allowed(unsigned long pfn); +#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); }) +#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); }) /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem @@ -275,6 +191,18 @@ extern int devmem_is_allowed(unsigned long pfn); */ #define xlate_dev_kmem_ptr(p) p +#include <asm-generic/io.h> + +/* + * More restrictive address range checking than the default implementation + * (PHYS_OFFSET and PHYS_MASK taken into account). + */ +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE +extern int valid_phys_addr_range(phys_addr_t addr, size_t size); +extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); + +extern int devmem_is_allowed(unsigned long pfn); + struct bio_vec; extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, const struct bio_vec *vec2); diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index a62cd077457b..6486b2bfd562 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -120,11 +120,13 @@ extern phys_addr_t memstart_addr; * translation for translating DMA addresses. Use the driver * DMA support - see dma-mapping.h. */ +#define virt_to_phys virt_to_phys static inline phys_addr_t virt_to_phys(const volatile void *x) { return __virt_to_phys((unsigned long)(x)); } +#define phys_to_virt phys_to_virt static inline void *phys_to_virt(phys_addr_t x) { return (void *)(__phys_to_virt(x)); diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h index e59dba12ce94..752a3f45df60 100644 --- a/arch/cris/include/asm/io.h +++ b/arch/cris/include/asm/io.h @@ -112,6 +112,9 @@ static inline void writel(unsigned int b, volatile void __iomem *addr) else *(volatile unsigned int __force *) addr = b; } +#define writeb_relaxed(b, addr) writeb(b, addr) +#define writew_relaxed(b, addr) writew(b, addr) +#define writel_relaxed(b, addr) writel(b, addr) #define __raw_writeb writeb #define __raw_writew writew #define __raw_writel writel diff --git a/arch/frv/include/asm/io.h b/arch/frv/include/asm/io.h index 8cb50a2fbcb2..99bb7efaf9b7 100644 --- a/arch/frv/include/asm/io.h +++ b/arch/frv/include/asm/io.h @@ -243,6 +243,9 @@ static inline void writel(uint32_t datum, volatile void __iomem *addr) __flush_PCI_writes(); } +#define writeb_relaxed writeb +#define writew_relaxed writew +#define writel_relaxed writel /* Values for nocacheflag and cmode */ #define IOMAP_FULL_CACHING 0 diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index bee0acd52f7e..80a7e34be009 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -393,6 +393,10 @@ __writeq (unsigned long val, volatile void __iomem *addr) #define writew(v,a) __writew((v), (a)) #define writel(v,a) __writel((v), (a)) #define writeq(v,a) __writeq((v), (a)) +#define writeb_relaxed(v,a) __writeb((v), (a)) +#define writew_relaxed(v,a) __writew((v), (a)) +#define writel_relaxed(v,a) __writel((v), (a)) +#define writeq_relaxed(v,a) __writeq((v), (a)) #define __raw_writeb writeb #define __raw_writew writew #define __raw_writel writel diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 449c8c0fa2bd..103bedc59644 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -365,15 +365,15 @@ ia64_done_with_exception (struct pt_regs *regs) } #define ARCH_HAS_TRANSLATE_MEM_PTR 1 -static __inline__ char * -xlate_dev_mem_ptr (unsigned long p) +static __inline__ void * +xlate_dev_mem_ptr(phys_addr_t p) { struct page *page; - char * ptr; + void *ptr; page = pfn_to_page(p >> PAGE_SHIFT); if (PageUncached(page)) - ptr = (char *)p + __IA64_UNCACHED_OFFSET; + ptr = (void *)p + __IA64_UNCACHED_OFFSET; else ptr = __va(p); @@ -383,15 +383,15 @@ xlate_dev_mem_ptr (unsigned long p) /* * Convert a virtual cached kernel memory pointer to an uncached pointer */ -static __inline__ char * -xlate_dev_kmem_ptr (char * p) +static __inline__ void * +xlate_dev_kmem_ptr(void *p) { struct page *page; - char * ptr; + void *ptr; page = virt_to_page((unsigned long)p); if (PageUncached(page)) - ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET; + ptr = (void *)__pa(p) + __IA64_UNCACHED_OFFSET; else ptr = p; diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h index 4010f1fc5b65..6e7787f3dac7 100644 --- a/arch/m32r/include/asm/io.h +++ b/arch/m32r/include/asm/io.h @@ -161,6 +161,9 @@ static inline void _writel(unsigned long l, unsigned long addr) #define __raw_writeb writeb #define __raw_writew writew #define __raw_writel writel +#define writeb_relaxed writeb +#define writew_relaxed writew +#define writel_relaxed writel #define ioread8 read #define ioread16 readw diff --git a/arch/m68k/include/asm/io.h b/arch/m68k/include/asm/io.h index c70cc9155003..bccd5a914eb6 100644 --- a/arch/m68k/include/asm/io.h +++ b/arch/m68k/include/asm/io.h @@ -3,3 +3,11 @@ #else #include <asm/io_mm.h> #endif + +#define readb_relaxed(addr) readb(addr) +#define readw_relaxed(addr) readw(addr) +#define readl_relaxed(addr) readl(addr) + +#define writeb_relaxed(b, addr) writeb(b, addr) +#define writew_relaxed(b, addr) writew(b, addr) +#define writel_relaxed(b, addr) writel(b, addr) diff --git a/arch/m68k/include/asm/io_no.h b/arch/m68k/include/asm/io_no.h index be4b5a813ad4..a93c8cde4d38 100644 --- a/arch/m68k/include/asm/io_no.h +++ b/arch/m68k/include/asm/io_no.h @@ -40,10 +40,6 @@ static inline unsigned int _swapl(volatile unsigned long v) #define readl(addr) \ ({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; }) -#define readb_relaxed(addr) readb(addr) -#define readw_relaxed(addr) readw(addr) -#define readl_relaxed(addr) readl(addr) - #define writeb(b,addr) (void)((*(volatile unsigned char *) (addr)) = (b)) #define writew(b,addr) (void)((*(volatile unsigned short *) (addr)) = (b)) #define writel(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b)) diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h index 433751b2a003..940f5fc1d1da 100644 --- a/arch/microblaze/include/asm/io.h +++ b/arch/microblaze/include/asm/io.h @@ -69,12 +69,4 @@ extern void __iomem *ioremap(phys_addr_t address, unsigned long size); #include <asm-generic/io.h> -#define readb_relaxed readb -#define readw_relaxed readw -#define readl_relaxed readl - -#define writeb_relaxed writeb -#define writew_relaxed writew -#define writel_relaxed writel - #endif /* _ASM_MICROBLAZE_IO_H */ diff --git a/arch/mn10300/include/asm/io.h b/arch/mn10300/include/asm/io.h index e6ed0d897ccc..897ba3c12b32 100644 --- a/arch/mn10300/include/asm/io.h +++ b/arch/mn10300/include/asm/io.h @@ -67,6 +67,10 @@ static inline void writel(u32 b, volatile void __iomem *addr) #define __raw_writew writew #define __raw_writel writel +#define writeb_relaxed writeb +#define writew_relaxed writew +#define writel_relaxed writel + /*****************************************************************************/ /* * traditional input/output functions diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index 1f6d2ae7aba5..8cd0abf28ffb 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h @@ -217,10 +217,14 @@ static inline void writeq(unsigned long long q, volatile void __iomem *addr) #define writel writel #define writeq writeq -#define readb_relaxed(addr) readb(addr) -#define readw_relaxed(addr) readw(addr) -#define readl_relaxed(addr) readl(addr) -#define readq_relaxed(addr) readq(addr) +#define readb_relaxed(addr) readb(addr) +#define readw_relaxed(addr) readw(addr) +#define readl_relaxed(addr) readl(addr) +#define readq_relaxed(addr) readq(addr) +#define writeb_relaxed(b, addr) writeb(b, addr) +#define writew_relaxed(w, addr) writew(w, addr) +#define writel_relaxed(l, addr) writel(l, addr) +#define writeq_relaxed(q, addr) writeq(q, addr) #define mmiowb() do { } while (0) diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 97d3869991ca..9eaf301ac952 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -617,10 +617,14 @@ static inline void name at \ /* * We don't do relaxed operations yet, at least not with this semantic */ -#define readb_relaxed(addr) readb(addr) -#define readw_relaxed(addr) readw(addr) -#define readl_relaxed(addr) readl(addr) -#define readq_relaxed(addr) readq(addr) +#define readb_relaxed(addr) readb(addr) +#define readw_relaxed(addr) readw(addr) +#define readl_relaxed(addr) readl(addr) +#define readq_relaxed(addr) readq(addr) +#define writeb_relaxed(v, addr) writeb(v, addr) +#define writew_relaxed(v, addr) writew(v, addr) +#define writel_relaxed(v, addr) writel(v, addr) +#define writeq_relaxed(v, addr) writeq(v, addr) #ifdef CONFIG_PPC32 #define mmiowb() diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index cd6b9ee7b69c..6ad9013c67e7 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -13,9 +13,10 @@ #include <asm/page.h> #include <asm/pci_io.h> -void *xlate_dev_mem_ptr(unsigned long phys); #define xlate_dev_mem_ptr xlate_dev_mem_ptr -void unxlate_dev_mem_ptr(unsigned long phys, void *addr); +void *xlate_dev_mem_ptr(phys_addr_t phys); +#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr +void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); /* * Convert a virtual cached pointer to an uncached pointer @@ -60,11 +61,6 @@ static inline void iounmap(volatile void __iomem *addr) #define __raw_writel zpci_write_u32 #define __raw_writeq zpci_write_u64 -#define readb_relaxed readb -#define readw_relaxed readw -#define readl_relaxed readl -#define readq_relaxed readq - #endif /* CONFIG_PCI */ #include <asm-generic/io.h> diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index 2a2e35416d2f..2eb34bdfc613 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -176,7 +176,7 @@ static int is_swapped(unsigned long addr) * For swapped prefix pages a new buffer is returned that contains a copy of * the absolute memory. The buffer size is maximum one page large. */ -void *xlate_dev_mem_ptr(unsigned long addr) +void *xlate_dev_mem_ptr(phys_addr_t addr) { void *bounce = (void *) addr; unsigned long size; @@ -197,7 +197,7 @@ void *xlate_dev_mem_ptr(unsigned long addr) /* * Free converted buffer for /dev/mem access (if necessary) */ -void unxlate_dev_mem_ptr(unsigned long addr, void *buf) +void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf) { if ((void *) addr != buf) free_page((unsigned long) buf); diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h index 9f532902627c..407ac14295f4 100644 --- a/arch/sparc/include/asm/io_32.h +++ b/arch/sparc/include/asm/io_32.h @@ -4,10 +4,6 @@ #include <linux/kernel.h> #include <linux/ioport.h> /* struct resource */ -#define readb_relaxed(__addr) readb(__addr) -#define readw_relaxed(__addr) readw(__addr) -#define readl_relaxed(__addr) readl(__addr) - #define IO_SPACE_LIMIT 0xffffffff #define memset_io(d,c,sz) _memset_io(d,c,sz) diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h index 80b54b326d49..9b672be70dda 100644 --- a/arch/sparc/include/asm/io_64.h +++ b/arch/sparc/include/asm/io_64.h @@ -101,6 +101,7 @@ static inline void __raw_writeq(u64 q, const volatile void __iomem *addr) * the cache by using ASI_PHYS_BYPASS_EC_E_L */ #define readb readb +#define readb_relaxed readb static inline u8 readb(const volatile void __iomem *addr) { u8 ret; @@ -112,6 +113,7 @@ static inline u8 readb(const volatile void __iomem *addr) } #define readw readw +#define readw_relaxed readw static inline u16 readw(const volatile void __iomem *addr) { u16 ret; @@ -124,6 +126,7 @@ static inline u16 readw(const volatile void __iomem *addr) } #define readl readl +#define readl_relaxed readl static inline u32 readl(const volatile void __iomem *addr) { u32 ret; @@ -136,6 +139,7 @@ static inline u32 readl(const volatile void __iomem *addr) } #define readq readq +#define readq_relaxed readq static inline u64 readq(const volatile void __iomem *addr) { u64 ret; @@ -148,6 +152,7 @@ static inline u64 readq(const volatile void __iomem *addr) } #define writeb writeb +#define writeb_relaxed writeb static inline void writeb(u8 b, volatile void __iomem *addr) { __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_writeb */" @@ -157,6 +162,7 @@ static inline void writeb(u8 b, volatile void __iomem *addr) } #define writew writew +#define writew_relaxed writew static inline void writew(u16 w, volatile void __iomem *addr) { __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_writew */" @@ -166,6 +172,7 @@ static inline void writew(u16 w, volatile void __iomem *addr) } #define writel writel +#define writel_relaxed writel static inline void writel(u32 l, volatile void __iomem *addr) { __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_writel */" @@ -175,6 +182,7 @@ static inline void writel(u32 l, volatile void __iomem *addr) } #define writeq writeq +#define writeq_relaxed writeq static inline void writeq(u64 q, volatile void __iomem *addr) { __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_writeq */" @@ -183,7 +191,6 @@ static inline void writeq(u64 q, volatile void __iomem *addr) : "memory"); } - #define inb inb static inline u8 inb(unsigned long addr) { @@ -264,11 +271,6 @@ static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned l outsl((unsigned long __force)port, buf, count); } -#define readb_relaxed(__addr) readb(__addr) -#define readw_relaxed(__addr) readw(__addr) -#define readl_relaxed(__addr) readl(__addr) -#define readq_relaxed(__addr) readq(__addr) - /* Valid I/O Space regions are anywhere, because each PCI bus supported * can live in an arbitrary area of the physical address range. */ diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index 9fe434969fab..d372641054d9 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h @@ -241,6 +241,10 @@ static inline void writeq(u64 val, unsigned long addr) #define readw_relaxed readw #define readl_relaxed readl #define readq_relaxed readq +#define writeb_relaxed writeb +#define writew_relaxed writew +#define writel_relaxed writel +#define writeq_relaxed writeq #define ioread8 readb #define ioread16 readw diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index b8237d8a1e0c..0cdbe6e81b45 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -74,6 +74,9 @@ build_mmio_write(__writel, "l", unsigned int, "r", ) #define __raw_readw __readw #define __raw_readl __readl +#define writeb_relaxed(v, a) __writeb(v, a) +#define writew_relaxed(v, a) __writew(v, a) +#define writel_relaxed(v, a) __writel(v, a) #define __raw_writeb __writeb #define __raw_writew __writew #define __raw_writel __writel @@ -86,6 +89,7 @@ build_mmio_read(readq, "q", unsigned long, "=r", :"memory") build_mmio_write(writeq, "q", unsigned long, "r", :"memory") #define readq_relaxed(a) readq(a) +#define writeq_relaxed(v, a) writeq(v, a) #define __raw_readq(a) readq(a) #define __raw_writeq(val, addr) writeq(val, addr) @@ -310,8 +314,8 @@ BUILDIO(b, b, char) BUILDIO(w, w, short) BUILDIO(l, , int) -extern void *xlate_dev_mem_ptr(unsigned long phys); -extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr); +extern void *xlate_dev_mem_ptr(phys_addr_t phys); +extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, unsigned long prot_val); diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index af78e50ca6ce..b12f43c192cf 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -327,7 +327,7 @@ EXPORT_SYMBOL(iounmap); * Convert a physical pointer to a virtual kernel pointer for /dev/mem * access */ -void *xlate_dev_mem_ptr(unsigned long phys) +void *xlate_dev_mem_ptr(phys_addr_t phys) { void *addr; unsigned long start = phys & PAGE_MASK; @@ -343,7 +343,7 @@ void *xlate_dev_mem_ptr(unsigned long phys) return addr; } -void unxlate_dev_mem_ptr(unsigned long phys, void *addr) +void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) { if (page_is_ram(phys >> PAGE_SHIFT)) return; diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h index 74944207167e..fe1600a09438 100644 --- a/arch/xtensa/include/asm/io.h +++ b/arch/xtensa/include/asm/io.h @@ -74,13 +74,6 @@ static inline void iounmap(volatile void __iomem *addr) #endif /* CONFIG_MMU */ -/* - * Generic I/O - */ -#define readb_relaxed readb -#define readw_relaxed readw -#define readl_relaxed readl - #endif /* __KERNEL__ */ #include <asm-generic/io.h> diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 524b707894ef..4c58333b4257 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -84,9 +84,12 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) } #endif -void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr) +#ifndef unxlate_dev_mem_ptr +#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr +void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) { } +#endif /* * This funcion reads the *physical* memory. The f_pos points directly to the @@ -97,7 +100,7 @@ static ssize_t read_mem(struct file *file, char __user *buf, { phys_addr_t p = *ppos; ssize_t read, sz; - char *ptr; + void *ptr; if (p != *ppos) return 0; @@ -400,7 +403,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf, * uncached, then it must also be accessed uncached * by the kernel or data corruption may occur */ - kbuf = xlate_dev_kmem_ptr((char *)p); + kbuf = xlate_dev_kmem_ptr((void *)p); if (copy_to_user(buf, kbuf, sz)) return -EFAULT; @@ -461,7 +464,7 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf, #endif while (count > 0) { - char *ptr; + void *ptr; sz = size_inside_page(p, count); @@ -470,7 +473,7 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf, * it must also be accessed uncached by the kernel or data * corruption may occur. */ - ptr = xlate_dev_kmem_ptr((char *)p); + ptr = xlate_dev_kmem_ptr((void *)p); copied = copy_from_user(ptr, buf, sz); if (copied) { diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index b8fdc57a7335..9db042304df3 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -12,6 +12,7 @@ #define __ASM_GENERIC_IO_H #include <asm/page.h> /* I/O is all done through memory accesses */ +#include <linux/string.h> /* for memset() and memcpy() */ #include <linux/types.h> #ifdef CONFIG_GENERIC_IOMAP @@ -24,260 +25,691 @@ #define mmiowb() do {} while (0) #endif -/*****************************************************************************/ /* - * readX/writeX() are used to access memory mapped devices. On some - * architectures the memory mapped IO stuff needs to be accessed - * differently. On the simple architectures, we just read/write the - * memory location directly. + * __raw_{read,write}{b,w,l,q}() access memory in native endianness. + * + * On some architectures memory mapped IO needs to be accessed differently. + * On the simple architectures, we just read/write the memory location + * directly. */ + #ifndef __raw_readb +#define __raw_readb __raw_readb static inline u8 __raw_readb(const volatile void __iomem *addr) { - return *(const volatile u8 __force *) addr; + return *(const volatile u8 __force *)addr; } #endif #ifndef __raw_readw +#define __raw_readw __raw_readw static inline u16 __raw_readw(const volatile void __iomem *addr) { - return *(const volatile u16 __force *) addr; + return *(const volatile u16 __force *)addr; } #endif #ifndef __raw_readl +#define __raw_readl __raw_readl static inline u32 __raw_readl(const volatile void __iomem *addr) { - return *(const volatile u32 __force *) addr; + return *(const volatile u32 __force *)addr; } #endif -#define readb __raw_readb - -#define readw readw -static inline u16 readw(const volatile void __iomem *addr) -{ - return __le16_to_cpu(__raw_readw(addr)); -} - -#define readl readl -static inline u32 readl(const volatile void __iomem *addr) +#ifdef CONFIG_64BIT +#ifndef __raw_readq +#define __raw_readq __raw_readq +static inline u64 __raw_readq(const volatile void __iomem *addr) { - return __le32_to_cpu(__raw_readl(addr)); + return *(const volatile u64 __force *)addr; } +#endif +#endif /* CONFIG_64BIT */ #ifndef __raw_writeb -static inline void __raw_writeb(u8 b, volatile void __iomem *addr) +#define __raw_writeb __raw_writeb +static inline void __raw_writeb(u8 value, volatile void __iomem *addr) { - *(volatile u8 __force *) addr = b; + *(volatile u8 __force *)addr = value; } #endif #ifndef __raw_writew -static inline void __raw_writew(u16 b, volatile void __iomem *addr) +#define __raw_writew __raw_writew +static inline void __raw_writew(u16 value, volatile void __iomem *addr) { - *(volatile u16 __force *) addr = b; + *(volatile u16 __force *)addr = value; } #endif #ifndef __raw_writel -static inline void __raw_writel(u32 b, volatile void __iomem *addr) +#define __raw_writel __raw_writel +static inline void __raw_writel(u32 value, volatile void __iomem *addr) { - *(volatile u32 __force *) addr = b; + *(volatile u32 __force *)addr = value; } #endif -#define writeb __raw_writeb -#define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr) -#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr) - #ifdef CONFIG_64BIT -#ifndef __raw_readq -static inline u64 __raw_readq(const volatile void __iomem *addr) +#ifndef __raw_writeq +#define __raw_writeq __raw_writeq +static inline void __raw_writeq(u64 value, volatile void __iomem *addr) { - return *(const volatile u64 __force *) addr; + *(volatile u64 __force *)addr = value; } #endif +#endif /* CONFIG_64BIT */ -#define readq readq -static inline u64 readq(const volatile void __iomem *addr) -{ - return __le64_to_cpu(__raw_readq(addr)); -} +/* + * {read,write}{b,w,l,q}() access little endian memory and return result in + * native endianness. + */ -#ifndef __raw_writeq -static inline void __raw_writeq(u64 b, volatile void __iomem *addr) +#ifndef readb +#define readb readb +static inline u8 readb(const volatile void __iomem *addr) { - *(volatile u64 __force *) addr = b; + return __raw_readb(addr); } #endif -#define writeq(b, addr) __raw_writeq(__cpu_to_le64(b), addr) -#endif /* CONFIG_64BIT */ - -#ifndef PCI_IOBASE -#define PCI_IOBASE ((void __iomem *) 0) +#ifndef readw +#define readw readw +static inline u16 readw(const volatile void __iomem *addr) +{ + return __le16_to_cpu(__raw_readw(addr)); +} #endif -/*****************************************************************************/ -/* - * traditional input/output functions - */ - -static inline u8 inb(unsigned long addr) +#ifndef readl +#define readl readl +static inline u32 readl(const volatile void __iomem *addr) { - return readb(addr + PCI_IOBASE); + return __le32_to_cpu(__raw_readl(addr)); } +#endif -static inline u16 inw(unsigned long addr) +#ifdef CONFIG_64BIT +#ifndef readq +#define readq readq +static inline u64 readq(const volatile void __iomem *addr) { - return readw(addr + PCI_IOBASE); + return __le64_to_cpu(__raw_readq(addr)); } +#endif +#endif /* CONFIG_64BIT */ -static inline u32 inl(unsigned long addr) +#ifndef writeb +#define writeb writeb +static inline void writeb(u8 value, volatile void __iomem *addr) { - return readl(addr + PCI_IOBASE); + __raw_writeb(value, addr); } +#endif -static inline void outb(u8 b, unsigned long addr) +#ifndef writew +#define writew writew +static inline void writew(u16 value, volatile void __iomem *addr) { - writeb(b, addr + PCI_IOBASE); + __raw_writew(cpu_to_le16(value), addr); } +#endif -static inline void outw(u16 b, unsigned long addr) +#ifndef writel +#define writel writel +static inline void writel(u32 value, volatile void __iomem *addr) { - writew(b, addr + PCI_IOBASE); + __raw_writel(__cpu_to_le32(value), addr); } +#endif -static inline void outl(u32 b, unsigned long addr) +#ifdef CONFIG_64BIT +#ifndef writeq +#define writeq writeq +static inline void writeq(u64 value, volatile void __iomem *addr) { - writel(b, addr + PCI_IOBASE); + __raw_writeq(__cpu_to_le64(value), addr); } +#endif +#endif /* CONFIG_64BIT */ + +/* + * {read,write}{b,w,l,q}_relaxed() are like the regular version, but + * are not guaranteed to provide ordering against spinlocks or memory + * accesses. + */ +#ifndef readb_relaxed +#define readb_relaxed readb +#endif -#define inb_p(addr) inb(addr) -#define inw_p(addr) inw(addr) -#define inl_p(addr) inl(addr) -#define outb_p(x, addr) outb((x), (addr)) -#define outw_p(x, addr) outw((x), (addr)) -#define outl_p(x, addr) outl((x), (addr)) +#ifndef readw_relaxed +#define readw_relaxed readw +#endif -#ifndef insb -static inline void insb(unsigned long addr, void *buffer, int count) +#ifndef readl_relaxed +#define readl_relaxed readl +#endif + +#ifndef readq_relaxed +#define readq_relaxed readq +#endif + +#ifndef writeb_relaxed +#define writeb_relaxed writeb +#endif + +#ifndef writew_relaxed +#define writew_relaxed writew +#endif + +#ifndef writel_relaxed +#define writel_relaxed writel +#endif + +#ifndef writeq_relaxed +#define writeq_relaxed writeq +#endif + +/* + * {read,write}s{b,w,l,q}() repeatedly access the same memory address in + * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). + */ +#ifndef readsb +#define readsb readsb +static inline void readsb(const volatile void __iomem *addr, void *buffer, + unsigned int count) { if (count) { u8 *buf = buffer; + do { - u8 x = __raw_readb(addr + PCI_IOBASE); + u8 x = __raw_readb(addr); *buf++ = x; } while (--count); } } #endif -#ifndef insw -static inline void insw(unsigned long addr, void *buffer, int count) +#ifndef readsw +#define readsw readsw +static inline void readsw(const volatile void __iomem *addr, void *buffer, + unsigned int count) { if (count) { u16 *buf = buffer; + do { - u16 x = __raw_readw(addr + PCI_IOBASE); + u16 x = __raw_readw(addr); *buf++ = x; } while (--count); } } #endif -#ifndef insl -static inline void insl(unsigned long addr, void *buffer, int count) +#ifndef readsl +#define readsl readsl +static inline void readsl(const volatile void __iomem *addr, void *buffer, + unsigned int count) { if (count) { u32 *buf = buffer; + do { - u32 x = __raw_readl(addr + PCI_IOBASE); + u32 x = __raw_readl(addr); *buf++ = x; } while (--count); } } #endif -#ifndef outsb -static inline void outsb(unsigned long addr, const void *buffer, int count) +#ifdef CONFIG_64BIT +#ifndef readsq +#define readsq readsq +static inline void readsq(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + if (count) { + u64 *buf = buffer; + + do { + u64 x = __raw_readq(addr); + *buf++ = x; + } while (--count); + } +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef writesb +#define writesb writesb +static inline void writesb(volatile void __iomem *addr, const void *buffer, + unsigned int count) { if (count) { const u8 *buf = buffer; + do { - __raw_writeb(*buf++, addr + PCI_IOBASE); + __raw_writeb(*buf++, addr); } while (--count); } } #endif -#ifndef outsw -static inline void outsw(unsigned long addr, const void *buffer, int count) +#ifndef writesw +#define writesw writesw +static inline void writesw(volatile void __iomem *addr, const void *buffer, + unsigned int count) { if (count) { const u16 *buf = buffer; + do { - __raw_writew(*buf++, addr + PCI_IOBASE); + __raw_writew(*buf++, addr); } while (--count); } } #endif -#ifndef outsl -static inline void outsl(unsigned long addr, const void *buffer, int count) +#ifndef writesl +#define writesl writesl +static inline void writesl(volatile void __iomem *addr, const void *buffer, + unsigned int count) { if (count) { const u32 *buf = buffer; + do { - __raw_writel(*buf++, addr + PCI_IOBASE); + __raw_writel(*buf++, addr); } while (--count); } } #endif -#ifndef CONFIG_GENERIC_IOMAP -#define ioread8(addr) readb(addr) -#define ioread16(addr) readw(addr) -#define ioread16be(addr) __be16_to_cpu(__raw_readw(addr)) -#define ioread32(addr) readl(addr) -#define ioread32be(addr) __be32_to_cpu(__raw_readl(addr)) - -#define iowrite8(v, addr) writeb((v), (addr)) -#define iowrite16(v, addr) writew((v), (addr)) -#define iowrite16be(v, addr) __raw_writew(__cpu_to_be16(v), addr) -#define iowrite32(v, addr) writel((v), (addr)) -#define iowrite32be(v, addr) __raw_writel(__cpu_to_be32(v), addr) - -#define ioread8_rep(p, dst, count) \ - insb((unsigned long) (p), (dst), (count)) -#define ioread16_rep(p, dst, count) \ - insw((unsigned long) (p), (dst), (count)) -#define ioread32_rep(p, dst, count) \ - insl((unsigned long) (p), (dst), (count)) - -#define iowrite8_rep(p, src, count) \ - outsb((unsigned long) (p), (src), (count)) -#define iowrite16_rep(p, src, count) \ - outsw((unsigned long) (p), (src), (count)) -#define iowrite32_rep(p, src, count) \ - outsl((unsigned long) (p), (src), (count)) -#endif /* CONFIG_GENERIC_IOMAP */ +#ifdef CONFIG_64BIT +#ifndef writesq +#define writesq writesq +static inline void writesq(volatile void __iomem *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u64 *buf = buffer; + + do { + __raw_writeq(*buf++, addr); + } while (--count); + } +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef PCI_IOBASE +#define PCI_IOBASE ((void __iomem *)0) +#endif #ifndef IO_SPACE_LIMIT #define IO_SPACE_LIMIT 0xffff #endif +/* + * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be + * implemented on hardware that needs an additional delay for I/O accesses to + * take effect. + */ + +#ifndef inb +#define inb inb +static inline u8 inb(unsigned long addr) +{ + return readb(PCI_IOBASE + addr); +} +#endif + +#ifndef inw +#define inw inw +static inline u16 inw(unsigned long addr) +{ + return readw(PCI_IOBASE + addr); +} +#endif + +#ifndef inl +#define inl inl +static inline u32 inl(unsigned long addr) +{ + return readl(PCI_IOBASE + addr); +} +#endif + +#ifndef outb +#define outb outb +static inline void outb(u8 value, unsigned long addr) +{ + writeb(value, PCI_IOBASE + addr); +} +#endif + +#ifndef outw +#define outw outw +static inline void outw(u16 value, unsigned long addr) +{ + writew(value, PCI_IOBASE + addr); +} +#endif + +#ifndef outl +#define outl outl +static inline void outl(u32 value, unsigned long addr) +{ + writel(value, PCI_IOBASE + addr); +} +#endif + +#ifndef inb_p +#define inb_p inb_p +static inline u8 inb_p(unsigned long addr) +{ + return inb(addr); +} +#endif + +#ifndef inw_p +#define inw_p inw_p +static inline u16 inw_p(unsigned long addr) +{ + return inw(addr); +} +#endif + +#ifndef inl_p +#define inl_p inl_p +static inline u32 inl_p(unsigned long addr) +{ + return inl(addr); +} +#endif + +#ifndef outb_p +#define outb_p outb_p +static inline void outb_p(u8 value, unsigned long addr) +{ + outb(value, addr); +} +#endif + +#ifndef outw_p +#define outw_p outw_p +static inline void outw_p(u16 value, unsigned long addr) +{ + outw(value, addr); +} +#endif + +#ifndef outl_p +#define outl_p outl_p +static inline void outl_p(u32 value, unsigned long addr) +{ + outl(value, addr); +} +#endif + +/* + * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a + * single I/O port multiple times. + */ + +#ifndef insb +#define insb insb +static inline void insb(unsigned long addr, void *buffer, unsigned int count) +{ + readsb(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef insw +#define insw insw +static inline void insw(unsigned long addr, void *buffer, unsigned int count) +{ + readsw(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef insl +#define insl insl +static inline void insl(unsigned long addr, void *buffer, unsigned int count) +{ + readsl(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef outsb +#define outsb outsb +static inline void outsb(unsigned long addr, const void *buffer, + unsigned int count) +{ + writesb(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef outsw +#define outsw outsw +static inline void outsw(unsigned long addr, const void *buffer, + unsigned int count) +{ + writesw(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef outsl +#define outsl outsl +static inline void outsl(unsigned long addr, const void *buffer, + unsigned int count) +{ + writesl(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef insb_p +#define insb_p insb_p +static inline void insb_p(unsigned long addr, void *buffer, unsigned int count) +{ + insb(addr, buffer, count); +} +#endif + +#ifndef insw_p +#define insw_p insw_p +static inline void insw_p(unsigned long addr, void *buffer, unsigned int count) +{ + insw(addr, buffer, count); +} +#endif + +#ifndef insl_p +#define insl_p insl_p +static inline void insl_p(unsigned long addr, void *buffer, unsigned int count) +{ + insl(addr, buffer, count); +} +#endif + +#ifndef outsb_p +#define outsb_p outsb_p +static inline void outsb_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsb(addr, buffer, count); +} +#endif + +#ifndef outsw_p +#define outsw_p outsw_p +static inline void outsw_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsw(addr, buffer, count); +} +#endif + +#ifndef outsl_p +#define outsl_p outsl_p +static inline void outsl_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsl(addr, buffer, count); +} +#endif + +#ifndef CONFIG_GENERIC_IOMAP +#ifndef ioread8 +#define ioread8 ioread8 +static inline u8 ioread8(const volatile void __iomem *addr) +{ + return readb(addr); +} +#endif + +#ifndef ioread16 +#define ioread16 ioread16 +static inline u16 ioread16(const volatile void __iomem *addr) +{ + return readw(addr); +} +#endif + +#ifndef ioread32 +#define ioread32 ioread32 +static inline u32 ioread32(const volatile void __iomem *addr) +{ + return readl(addr); +} +#endif + +#ifndef iowrite8 +#define iowrite8 iowrite8 +static inline void iowrite8(u8 value, volatile void __iomem *addr) +{ + writeb(value, addr); +} +#endif + +#ifndef iowrite16 +#define iowrite16 iowrite16 +static inline void iowrite16(u16 value, volatile void __iomem *addr) +{ + writew(value, addr); +} +#endif + +#ifndef iowrite32 +#define iowrite32 iowrite32 +static inline void iowrite32(u32 value, volatile void __iomem *addr) +{ + writel(value, addr); +} +#endif + +#ifndef ioread16be +#define ioread16be ioread16be +static inline u16 ioread16be(const volatile void __iomem *addr) +{ + return __be16_to_cpu(__raw_readw(addr)); +} +#endif + +#ifndef ioread32be +#define ioread32be ioread32be +static inline u32 ioread32be(const volatile void __iomem *addr) +{ + return __be32_to_cpu(__raw_readl(addr)); +} +#endif + +#ifndef iowrite16be +#define iowrite16be iowrite16be +static inline void iowrite16be(u16 value, void volatile __iomem *addr) +{ + __raw_writew(__cpu_to_be16(value), addr); +} +#endif + +#ifndef iowrite32be +#define iowrite32be iowrite32be +static inline void iowrite32be(u32 value, volatile void __iomem *addr) +{ + __raw_writel(__cpu_to_be32(value), addr); +} +#endif + +#ifndef ioread8_rep +#define ioread8_rep ioread8_rep +static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + readsb(addr, buffer, count); +} +#endif + +#ifndef ioread16_rep +#define ioread16_rep ioread16_rep +static inline void ioread16_rep(const volatile void __iomem *addr, + void *buffer, unsigned int count) +{ + readsw(addr, buffer, count); +} +#endif + +#ifndef ioread32_rep +#define ioread32_rep ioread32_rep +static inline void ioread32_rep(const volatile void __iomem *addr, + void *buffer, unsigned int count) +{ + readsl(addr, buffer, count); +} +#endif + +#ifndef iowrite8_rep +#define iowrite8_rep iowrite8_rep +static inline void iowrite8_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesb(addr, buffer, count); +} +#endif + +#ifndef iowrite16_rep +#define iowrite16_rep iowrite16_rep +static inline void iowrite16_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesw(addr, buffer, count); +} +#endif + +#ifndef iowrite32_rep +#define iowrite32_rep iowrite32_rep +static inline void iowrite32_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesl(addr, buffer, count); +} +#endif +#endif /* CONFIG_GENERIC_IOMAP */ + #ifdef __KERNEL__ #include <linux/vmalloc.h> -#define __io_virt(x) ((void __force *) (x)) +#define __io_virt(x) ((void __force *)(x)) #ifndef CONFIG_GENERIC_IOMAP struct pci_dev; extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); #ifndef pci_iounmap +#define pci_iounmap pci_iounmap static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) { } @@ -289,11 +721,15 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) * These are pretty trivial */ #ifndef virt_to_phys +#define virt_to_phys virt_to_phys static inline unsigned long virt_to_phys(volatile void *address) { return __pa((unsigned long)address); } +#endif +#ifndef phys_to_virt +#define phys_to_virt phys_to_virt static inline void *phys_to_virt(unsigned long address) { return __va(address); @@ -306,37 +742,65 @@ static inline void *phys_to_virt(unsigned long address) * This implementation is for the no-MMU case only... if you have an MMU * you'll need to provide your own definitions. */ + #ifndef CONFIG_MMU -static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) +#ifndef ioremap +#define ioremap ioremap +static inline void __iomem *ioremap(phys_addr_t offset, size_t size) { - return (void __iomem*) (unsigned long)offset; + return (void __iomem *)(unsigned long)offset; } +#endif -#define __ioremap(offset, size, flags) ioremap(offset, size) +#ifndef __ioremap +#define __ioremap __ioremap +static inline void __iomem *__ioremap(phys_addr_t offset, size_t size, + unsigned long flags) +{ + return ioremap(offset, size); +} +#endif #ifndef ioremap_nocache -#define ioremap_nocache ioremap +#define ioremap_nocache ioremap_nocache +static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) +{ + return ioremap(offset, size); +} #endif #ifndef ioremap_wc -#define ioremap_wc ioremap_nocache +#define ioremap_wc ioremap_wc +static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size) +{ + return ioremap_nocache(offset, size); +} #endif +#ifndef iounmap +#define iounmap iounmap static inline void iounmap(void __iomem *addr) { } +#endif #endif /* CONFIG_MMU */ #ifdef CONFIG_HAS_IOPORT_MAP #ifndef CONFIG_GENERIC_IOMAP +#ifndef ioport_map +#define ioport_map ioport_map static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { return PCI_IOBASE + (port & IO_SPACE_LIMIT); } +#endif +#ifndef ioport_unmap +#define ioport_unmap ioport_unmap static inline void ioport_unmap(void __iomem *p) { } +#endif #else /* CONFIG_GENERIC_IOMAP */ extern void __iomem *ioport_map(unsigned long port, unsigned int nr); extern void ioport_unmap(void __iomem *p); @@ -344,35 +808,68 @@ extern void ioport_unmap(void __iomem *p); #endif /* CONFIG_HAS_IOPORT_MAP */ #ifndef xlate_dev_kmem_ptr -#define xlate_dev_kmem_ptr(p) p +#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr +static inline void *xlate_dev_kmem_ptr(void *addr) +{ + return addr; +} #endif + #ifndef xlate_dev_mem_ptr -#define xlate_dev_mem_ptr(p) __va(p) +#define xlate_dev_mem_ptr xlate_dev_mem_ptr +static inline void *xlate_dev_mem_ptr(phys_addr_t addr) +{ + return __va(addr); +} +#endif + +#ifndef unxlate_dev_mem_ptr +#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr +static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) +{ +} #endif #ifdef CONFIG_VIRT_TO_BUS #ifndef virt_to_bus -static inline unsigned long virt_to_bus(volatile void *address) +static inline unsigned long virt_to_bus(void *address) { - return ((unsigned long) address); + return (unsigned long)address; } static inline void *bus_to_virt(unsigned long address) { - return (void *) address; + return (void *)address; } #endif #endif #ifndef memset_io -#define memset_io(a, b, c) memset(__io_virt(a), (b), (c)) +#define memset_io memset_io +static inline void memset_io(volatile void __iomem *addr, int value, + size_t size) +{ + memset(__io_virt(addr), value, size); +} #endif #ifndef memcpy_fromio -#define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c)) +#define memcpy_fromio memcpy_fromio +static inline void memcpy_fromio(void *buffer, + const volatile void __iomem *addr, + size_t size) +{ + memcpy(buffer, __io_virt(addr), size); +} #endif + #ifndef memcpy_toio -#define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c)) +#define memcpy_toio memcpy_toio +static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, + size_t size) +{ + memcpy(__io_virt(addr), buffer, size); +} #endif #endif /* __KERNEL__ */ |