summaryrefslogtreecommitdiffstats
path: root/arch/openrisc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/openrisc/mm')
-rw-r--r--arch/openrisc/mm/cache.c56
-rw-r--r--arch/openrisc/mm/init.c48
-rw-r--r--arch/openrisc/mm/ioremap.c5
3 files changed, 88 insertions, 21 deletions
diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c
index eb43b73f3855..0f265b8e73ec 100644
--- a/arch/openrisc/mm/cache.c
+++ b/arch/openrisc/mm/cache.c
@@ -14,31 +14,70 @@
#include <asm/spr_defs.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
+#include <asm/cpuinfo.h>
#include <asm/tlbflush.h>
-static __always_inline void cache_loop(struct page *page, const unsigned int reg)
+/*
+ * Check if the cache component exists.
+ */
+bool cpu_cache_is_present(const unsigned int cache_type)
{
- unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
- unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);
+ unsigned long upr = mfspr(SPR_UPR);
+ unsigned long mask = SPR_UPR_UP | cache_type;
+
+ return !((upr & mask) ^ mask);
+}
+
+static __always_inline void cache_loop(unsigned long paddr, unsigned long end,
+ const unsigned short reg, const unsigned int cache_type)
+{
+ if (!cpu_cache_is_present(cache_type))
+ return;
- while (line < paddr + PAGE_SIZE) {
- mtspr(reg, line);
- line += L1_CACHE_BYTES;
+ while (paddr < end) {
+ mtspr(reg, paddr);
+ paddr += L1_CACHE_BYTES;
}
}
+static __always_inline void cache_loop_page(struct page *page, const unsigned short reg,
+ const unsigned int cache_type)
+{
+ unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
+ unsigned long end = paddr + PAGE_SIZE;
+
+ paddr &= ~(L1_CACHE_BYTES - 1);
+
+ cache_loop(paddr, end, reg, cache_type);
+}
+
void local_dcache_page_flush(struct page *page)
{
- cache_loop(page, SPR_DCBFR);
+ cache_loop_page(page, SPR_DCBFR, SPR_UPR_DCP);
}
EXPORT_SYMBOL(local_dcache_page_flush);
void local_icache_page_inv(struct page *page)
{
- cache_loop(page, SPR_ICBIR);
+ cache_loop_page(page, SPR_ICBIR, SPR_UPR_ICP);
}
EXPORT_SYMBOL(local_icache_page_inv);
+void local_dcache_range_flush(unsigned long start, unsigned long end)
+{
+ cache_loop(start, end, SPR_DCBFR, SPR_UPR_DCP);
+}
+
+void local_dcache_range_inv(unsigned long start, unsigned long end)
+{
+ cache_loop(start, end, SPR_DCBIR, SPR_UPR_DCP);
+}
+
+void local_icache_range_inv(unsigned long start, unsigned long end)
+{
+ cache_loop(start, end, SPR_ICBIR, SPR_UPR_ICP);
+}
+
void update_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *pte)
{
@@ -58,4 +97,3 @@ void update_cache(struct vm_area_struct *vma, unsigned long address,
sync_icache_dcache(folio_page(folio, nr));
}
}
-
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index 1dcd78c8f0e9..e4904ca6f0a0 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -35,6 +35,7 @@
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
+#include <asm/cacheflush.h>
int mem_init_done;
@@ -176,8 +177,8 @@ void __init paging_init(void)
barrier();
/* Invalidate instruction caches after code modification */
- mtspr(SPR_ICBIR, 0x900);
- mtspr(SPR_ICBIR, 0xa00);
+ local_icache_block_inv(0x900);
+ local_icache_block_inv(0xa00);
/* New TLB miss handlers and kernel page tables are in now place.
* Make sure that page flags get updated for all pages in TLB by
@@ -193,20 +194,51 @@ void __init mem_init(void)
{
BUG_ON(!mem_map);
- max_mapnr = max_low_pfn;
- high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
-
/* clear the zero-page */
memset((void *)empty_zero_page, 0, PAGE_SIZE);
- /* this will put all low memory onto the freelists */
- memblock_free_all();
-
printk("mem_init_done ...........................................\n");
mem_init_done = 1;
return;
}
+static int __init map_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
+{
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ p4d = p4d_offset(pgd_offset_k(va), va);
+ pud = pud_offset(p4d, va);
+ pmd = pmd_offset(pud, va);
+ pte = pte_alloc_kernel(pmd, va);
+
+ if (pte == NULL)
+ return -ENOMEM;
+
+ if (pgprot_val(prot))
+ set_pte_at(&init_mm, va, pte, pfn_pte(pa >> PAGE_SHIFT, prot));
+ else
+ pte_clear(&init_mm, va, pte);
+
+ local_flush_tlb_page(NULL, va);
+ return 0;
+}
+
+void __init __set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t prot)
+{
+ unsigned long address = __fix_to_virt(idx);
+
+ if (idx >= __end_of_fixed_addresses) {
+ BUG();
+ return;
+ }
+
+ map_page(address, phys, prot);
+}
+
static const pgprot_t protection_map[16] = {
[VM_NONE] = PAGE_NONE,
[VM_READ] = PAGE_READONLY_X,
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index f59ea4c10b0f..8e63e86251ca 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -38,10 +38,7 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
if (likely(mem_init_done)) {
pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
} else {
- pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pte)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
+ pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
}
return pte;