diff options
Diffstat (limited to 'arch/x86/mm/ioremap.c')
-rw-r--r-- | arch/x86/mm/ioremap.c | 191 |
1 files changed, 150 insertions, 41 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 794895c6dcc9..804de18abcc2 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -19,11 +19,7 @@ #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/pgalloc.h> - -enum ioremap_mode { - IOR_MODE_UNCACHED, - IOR_MODE_CACHED, -}; +#include <asm/pat.h> #ifdef CONFIG_X86_64 @@ -35,11 +31,23 @@ unsigned long __phys_addr(unsigned long x) } EXPORT_SYMBOL(__phys_addr); +static inline int phys_addr_valid(unsigned long addr) +{ + return addr < (1UL << boot_cpu_data.x86_phys_bits); +} + +#else + +static inline int phys_addr_valid(unsigned long addr) +{ + return 1; +} + #endif int page_is_ram(unsigned long pagenr) { - unsigned long addr, end; + resource_size_t addr, end; int i; /* @@ -78,19 +86,22 @@ int page_is_ram(unsigned long pagenr) * Fix up the linear direct mapping of the kernel to avoid cache attribute * conflicts. */ -static int ioremap_change_attr(unsigned long vaddr, unsigned long size, - enum ioremap_mode mode) +int ioremap_change_attr(unsigned long vaddr, unsigned long size, + unsigned long prot_val) { unsigned long nrpages = size >> PAGE_SHIFT; int err; - switch (mode) { - case IOR_MODE_UNCACHED: + switch (prot_val) { + case _PAGE_CACHE_UC: default: - err = set_memory_uc(vaddr, nrpages); + err = _set_memory_uc(vaddr, nrpages); + break; + case _PAGE_CACHE_WC: + err = _set_memory_wc(vaddr, nrpages); break; - case IOR_MODE_CACHED: - err = set_memory_wb(vaddr, nrpages); + case _PAGE_CACHE_WB: + err = _set_memory_wb(vaddr, nrpages); break; } @@ -106,18 +117,28 @@ static int ioremap_change_attr(unsigned long vaddr, unsigned long size, * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ -static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, - enum ioremap_mode mode) +static void __iomem *__ioremap_caller(resource_size_t phys_addr, + unsigned long size, unsigned long prot_val, void *caller) { - unsigned long pfn, offset, last_addr, vaddr; + unsigned long pfn, offset, vaddr; + resource_size_t last_addr; struct vm_struct *area; + unsigned long new_prot_val; pgprot_t prot; + int retval; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; + if (!phys_addr_valid(phys_addr)) { + printk(KERN_WARNING "ioremap: invalid physical address %llx\n", + (unsigned long long)phys_addr); + WARN_ON_ONCE(1); + return NULL; + } + /* * Don't remap the low PCI/ISA area, it's always mapped.. */ @@ -127,25 +148,14 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, /* * Don't allow anybody to remap normal RAM that we're using.. */ - for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped && - (pfn << PAGE_SHIFT) < last_addr; pfn++) { - if (page_is_ram(pfn) && pfn_valid(pfn) && - !PageReserved(pfn_to_page(pfn))) - return NULL; - } + for (pfn = phys_addr >> PAGE_SHIFT; + (pfn << PAGE_SHIFT) < last_addr; pfn++) { - switch (mode) { - case IOR_MODE_UNCACHED: - default: - /* - * FIXME: we will use UC MINUS for now, as video fb drivers - * depend on it. Upcoming ioremap_wc() will fix this behavior. - */ - prot = PAGE_KERNEL_UC_MINUS; - break; - case IOR_MODE_CACHED: - prot = PAGE_KERNEL; - break; + int is_ram = page_is_ram(pfn); + + if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) + return NULL; + WARN_ON_ONCE(is_ram); } /* @@ -155,20 +165,66 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; + retval = reserve_memtype(phys_addr, phys_addr + size, + prot_val, &new_prot_val); + if (retval) { + pr_debug("Warning: reserve_memtype returned %d\n", retval); + return NULL; + } + + if (prot_val != new_prot_val) { + /* + * Do not fallback to certain memory types with certain + * requested type: + * - request is uncached, return cannot be write-back + * - request is uncached, return cannot be write-combine + * - request is write-combine, return cannot be write-back + */ + if ((prot_val == _PAGE_CACHE_UC && + (new_prot_val == _PAGE_CACHE_WB || + new_prot_val == _PAGE_CACHE_WC)) || + (prot_val == _PAGE_CACHE_WC && + new_prot_val == _PAGE_CACHE_WB)) { + pr_debug( + "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", + (unsigned long long)phys_addr, + (unsigned long long)(phys_addr + size), + prot_val, new_prot_val); + free_memtype(phys_addr, phys_addr + size); + return NULL; + } + prot_val = new_prot_val; + } + + switch (prot_val) { + case _PAGE_CACHE_UC: + default: + prot = PAGE_KERNEL_NOCACHE; + break; + case _PAGE_CACHE_WC: + prot = PAGE_KERNEL_WC; + break; + case _PAGE_CACHE_WB: + prot = PAGE_KERNEL; + break; + } + /* * Ok, go for it.. */ - area = get_vm_area(size, VM_IOREMAP); + area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) return NULL; area->phys_addr = phys_addr; vaddr = (unsigned long) area->addr; if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { + free_memtype(phys_addr, phys_addr + size); free_vm_area(area); return NULL; } - if (ioremap_change_attr(vaddr, size, mode) < 0) { + if (ioremap_change_attr(vaddr, size, prot_val) < 0) { + free_memtype(phys_addr, phys_addr + size); vunmap(area->addr); return NULL; } @@ -199,13 +255,35 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, */ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) { - return __ioremap(phys_addr, size, IOR_MODE_UNCACHED); + return __ioremap_caller(phys_addr, size, _PAGE_CACHE_UC, + __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_nocache); +/** + * ioremap_wc - map memory into CPU space write combined + * @offset: bus address of the memory + * @size: size of the resource to map + * + * This version of ioremap ensures that the memory is marked write combining. + * Write combining allows faster writes to some hardware devices. + * + * Must be freed with iounmap. + */ +void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) +{ + if (pat_wc_enabled) + return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, + __builtin_return_address(0)); + else + return ioremap_nocache(phys_addr, size); +} +EXPORT_SYMBOL(ioremap_wc); + void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) { - return __ioremap(phys_addr, size, IOR_MODE_CACHED); + return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB, + __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_cache); @@ -252,6 +330,8 @@ void iounmap(volatile void __iomem *addr) return; } + free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); + /* Finally remove it */ o = remove_vm_area((void *)addr); BUG_ON(p != o || o == NULL); @@ -259,6 +339,35 @@ void iounmap(volatile void __iomem *addr) } EXPORT_SYMBOL(iounmap); +/* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * access + */ +void *xlate_dev_mem_ptr(unsigned long phys) +{ + void *addr; + unsigned long start = phys & PAGE_MASK; + + /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ + if (page_is_ram(start >> PAGE_SHIFT)) + return __va(phys); + + addr = (void *)ioremap(start, PAGE_SIZE); + if (addr) + addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); + + return addr; +} + +void unxlate_dev_mem_ptr(unsigned long phys, void *addr) +{ + if (page_is_ram(phys >> PAGE_SHIFT)) + return; + + iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK)); + return; +} + #ifdef CONFIG_X86_32 int __initdata early_ioremap_debug; @@ -272,8 +381,8 @@ static int __init early_ioremap_debug_setup(char *str) early_param("early_ioremap_debug", early_ioremap_debug_setup); static __initdata int after_paging_init; -static __initdata pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] - __attribute__((aligned(PAGE_SIZE))); +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] + __section(.bss.page_aligned); static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) { @@ -330,7 +439,7 @@ void __init early_ioremap_clear(void) pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); pmd_clear(pmd); - paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT); + paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT); __flush_tlb_all(); } |