diff options
author | Christophe Leroy <christophe.leroy@c-s.fr> | 2019-08-20 14:07:19 +0000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2019-08-27 13:03:35 +1000 |
commit | 4a45b7460cf458012a6930f675e141256b81dcf4 (patch) | |
tree | 20eb37cd0bb73b81a000e32e811b9f4ea39c05ff | |
parent | 191e42063a7241e5c3a1d1f36896a20b147517e9 (diff) | |
download | linux-4a45b7460cf458012a6930f675e141256b81dcf4.tar.gz linux-4a45b7460cf458012a6930f675e141256b81dcf4.tar.bz2 linux-4a45b7460cf458012a6930f675e141256b81dcf4.zip |
powerpc/mm: refactor ioremap vm area setup.
PPC32 and PPC64 are doing the same once SLAB is available.
Create a do_ioremap() function that calls get_vm_area and
do the mapping.
For PPC64, we add the 4K PFN hack sanity check to __ioremap_caller()
in order to avoid using __ioremap_at(). Other checks in __ioremap_at()
are irrelevant for __ioremap_caller().
On PPC64, VM area is allocated in the range [ioremap_bot ; IOREMAP_END]
On PPC32, VM area is allocated in the range [VMALLOC_START ; VMALLOC_END]
Lets define IOREMAP_START is ioremap_bot for PPC64, and alias
IOREMAP_START/END to VMALLOC_START/END on PPC32
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/42e7e36ad32e0fdf76692426cc642799c9f689b8.1566309263.git.christophe.leroy@c-s.fr
-rw-r--r-- | arch/powerpc/include/asm/book3s/32/pgtable.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/pgtable.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/io.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/32/pgtable.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nohash/64/pgtable.h | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/ioremap.c | 20 | ||||
-rw-r--r-- | arch/powerpc/mm/ioremap_32.c | 15 | ||||
-rw-r--r-- | arch/powerpc/mm/ioremap_64.c | 17 |
8 files changed, 43 insertions, 21 deletions
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index aa1bc5f8da90..331a29a501a1 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -165,6 +165,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); #define IOREMAP_TOP KVIRT_TOP #endif +/* PPC32 shares vmalloc area with ioremap */ +#define IOREMAP_START VMALLOC_START +#define IOREMAP_END VMALLOC_END + /* * Just any arbitrary offset to the start of the vmalloc VM area: the * current 16MB value just means that there will be a 64MB "hole" after the diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 03c9a14dd902..b01624e5c467 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -316,6 +316,7 @@ extern unsigned long pci_io_base; #define PHB_IO_BASE (ISA_IO_END) #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) #define IOREMAP_BASE (PHB_IO_END) +#define IOREMAP_START (ioremap_bot) #define IOREMAP_END (KERN_IO_END) /* Advertise special mapping type for AGP */ diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 8e00d95f9600..dc529ea0fffa 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -723,6 +723,8 @@ void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size); extern void iounmap(volatile void __iomem *addr); int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot); +void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size, + pgprot_t prot, void *caller); extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size, pgprot_t prot, void *caller); diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 7ce2a7c9fade..3e1a4c1e40f0 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -93,6 +93,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); #define IOREMAP_TOP KVIRT_TOP #endif +/* PPC32 shares vmalloc area with ioremap */ +#define IOREMAP_START VMALLOC_START +#define IOREMAP_END VMALLOC_END + /* * Just any arbitrary offset to the start of the vmalloc VM area: the * current 16MB value just means that there will be a 64MB "hole" after the diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index b9f66cf15c31..9a33b8bd842d 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -53,6 +53,7 @@ #define PHB_IO_BASE (ISA_IO_END) #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) #define IOREMAP_BASE (PHB_IO_END) +#define IOREMAP_START (ioremap_bot) #define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c index 50ee6544d0b7..57630325846c 100644 --- a/arch/powerpc/mm/ioremap.c +++ b/arch/powerpc/mm/ioremap.c @@ -80,3 +80,23 @@ int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t return 0; } + +void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size, + pgprot_t prot, void *caller) +{ + struct vm_struct *area; + int ret; + + area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START, IOREMAP_END, caller); + if (area == NULL) + return NULL; + + area->phys_addr = pa; + ret = ioremap_range((unsigned long)area->addr, pa, size, prot); + if (!ret) + return (void __iomem *)area->addr + offset; + + free_vm_area(area); + + return NULL; +} diff --git a/arch/powerpc/mm/ioremap_32.c b/arch/powerpc/mm/ioremap_32.c index 85b90a62e084..fcf343dbf2bf 100644 --- a/arch/powerpc/mm/ioremap_32.c +++ b/arch/powerpc/mm/ioremap_32.c @@ -18,7 +18,7 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller) { unsigned long v; - phys_addr_t p; + phys_addr_t p, offset; int err; /* @@ -28,6 +28,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call * (ioremap_bot records where we're up to). */ p = addr & PAGE_MASK; + offset = addr & ~PAGE_MASK; size = PAGE_ALIGN(addr + size) - p; /* @@ -62,12 +63,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call goto out; if (slab_is_available()) { - struct vm_struct *area; - area = get_vm_area_caller(size, VM_IOREMAP, caller); - if (area == 0) - return NULL; - area->phys_addr = p; - v = (unsigned long)area->addr; + return do_ioremap(p, offset, size, prot, caller); } else { v = (ioremap_bot -= size); } @@ -77,11 +73,8 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call */ err = ioremap_range((unsigned long)v, p, size, prot); - if (err) { - if (slab_is_available()) - vunmap((void *)v); + if (err) return NULL; - } out: return (void __iomem *)(v + ((unsigned long)addr & ~PAGE_MASK)); diff --git a/arch/powerpc/mm/ioremap_64.c b/arch/powerpc/mm/ioremap_64.c index d132ce1e538d..e37b68b7f0e8 100644 --- a/arch/powerpc/mm/ioremap_64.c +++ b/arch/powerpc/mm/ioremap_64.c @@ -46,9 +46,13 @@ EXPORT_SYMBOL(__iounmap_at); void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller) { - phys_addr_t paligned; + phys_addr_t paligned, offset; void __iomem *ret; + /* We don't support the 4K PFN hack with ioremap */ + if (pgprot_val(prot) & H_PAGE_4K_PFN) + return NULL; + /* * Choose an address to map it to. Once the vmalloc system is running, * we use it. Before that, we map using addresses going up from @@ -56,21 +60,14 @@ void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size, * through ioremap_bot. */ paligned = addr & PAGE_MASK; + offset = addr & ~PAGE_MASK; size = PAGE_ALIGN(addr + size) - paligned; if (size == 0 || paligned == 0) return NULL; if (slab_is_available()) { - struct vm_struct *area; - - area = __get_vm_area_caller(size, VM_IOREMAP, ioremap_bot, - IOREMAP_END, caller); - if (area == NULL) - return NULL; - - area->phys_addr = paligned; - ret = __ioremap_at(paligned, area->addr, size, prot); + return do_ioremap(paligned, offset, size, prot, caller); } else { ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot); if (ret) |