summaryrefslogtreecommitdiffstats
path: root/arch/m68k
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-06-29 17:29:11 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-06-29 17:29:11 -0700
commit65090f30ab791810a3dc840317e57df05018559c (patch)
treef417526656da37109777e89613e140ffc59228bc /arch/m68k
parent349a2d52ffe59b7a0c5876fa7ee9f3eaf188b830 (diff)
parent0ed950d1f28142ccd9a9453c60df87853530d778 (diff)
downloadlinux-65090f30ab791810a3dc840317e57df05018559c.tar.gz
linux-65090f30ab791810a3dc840317e57df05018559c.tar.bz2
linux-65090f30ab791810a3dc840317e57df05018559c.zip
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: "191 patches. Subsystems affected by this patch series: kthread, ia64, scripts, ntfs, squashfs, ocfs2, kernel/watchdog, and mm (gup, pagealloc, slab, slub, kmemleak, dax, debug, pagecache, gup, swap, memcg, pagemap, mprotect, bootmem, dma, tracing, vmalloc, kasan, initialization, pagealloc, and memory-failure)" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (191 commits) mm,hwpoison: make get_hwpoison_page() call get_any_page() mm,hwpoison: send SIGBUS with error virutal address mm/page_alloc: split pcp->high across all online CPUs for cpuless nodes mm/page_alloc: allow high-order pages to be stored on the per-cpu lists mm: replace CONFIG_FLAT_NODE_MEM_MAP with CONFIG_FLATMEM mm: replace CONFIG_NEED_MULTIPLE_NODES with CONFIG_NUMA docs: remove description of DISCONTIGMEM arch, mm: remove stale mentions of DISCONIGMEM mm: remove CONFIG_DISCONTIGMEM m68k: remove support for DISCONTIGMEM arc: remove support for DISCONTIGMEM arc: update comment about HIGHMEM implementation alpha: remove DISCONTIGMEM and NUMA mm/page_alloc: move free_the_page mm/page_alloc: fix counting of managed_pages mm/page_alloc: improve memmap_pages dbg msg mm: drop SECTION_SHIFT in code comments mm/page_alloc: introduce vm.percpu_pagelist_high_fraction mm/page_alloc: limit the number of pages on PCP lists when reclaim is active mm/page_alloc: scale the number of pages that are batch freed ...
Diffstat (limited to 'arch/m68k')
-rw-r--r--arch/m68k/Kconfig.cpu10
-rw-r--r--arch/m68k/include/asm/mmzone.h10
-rw-r--r--arch/m68k/include/asm/page.h2
-rw-r--r--arch/m68k/include/asm/page_mm.h35
-rw-r--r--arch/m68k/include/asm/tlbflush.h2
-rw-r--r--arch/m68k/kernel/sys_m68k.c4
-rw-r--r--arch/m68k/mm/init.c20
7 files changed, 4 insertions, 79 deletions
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index f4d23977d2a5..29e946394fdb 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -408,10 +408,6 @@ config SINGLE_MEMORY_CHUNK
order" to save memory that could be wasted for unused memory map.
Say N if not sure.
-config ARCH_DISCONTIGMEM_ENABLE
- depends on BROKEN
- def_bool MMU && !SINGLE_MEMORY_CHUNK
-
config FORCE_MAX_ZONEORDER
int "Maximum zone order" if ADVANCED
depends on !SINGLE_MEMORY_CHUNK
@@ -451,11 +447,6 @@ config M68K_L2_CACHE
depends on MAC
default y
-config NODES_SHIFT
- int
- default "3"
- depends on DISCONTIGMEM
-
config CPU_HAS_NO_BITFIELDS
bool
@@ -553,4 +544,3 @@ config CACHE_COPYBACK
The ColdFire CPU cache is set into Copy-back mode.
endchoice
endif
-
diff --git a/arch/m68k/include/asm/mmzone.h b/arch/m68k/include/asm/mmzone.h
deleted file mode 100644
index 64573fe8e60d..000000000000
--- a/arch/m68k/include/asm/mmzone.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_M68K_MMZONE_H_
-#define _ASM_M68K_MMZONE_H_
-
-extern pg_data_t pg_data_map[];
-
-#define NODE_DATA(nid) (&pg_data_map[nid])
-#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map)
-
-#endif /* _ASM_M68K_MMZONE_H_ */
diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h
index 97087dd3ca6d..2f1c54e4725d 100644
--- a/arch/m68k/include/asm/page.h
+++ b/arch/m68k/include/asm/page.h
@@ -62,7 +62,7 @@ extern unsigned long _ramend;
#include <asm/page_no.h>
#endif
-#if !defined(CONFIG_MMU) || defined(CONFIG_DISCONTIGMEM)
+#ifndef CONFIG_MMU
#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
#endif
diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h
index 2411ea9ef578..a5b459bcb7d8 100644
--- a/arch/m68k/include/asm/page_mm.h
+++ b/arch/m68k/include/asm/page_mm.h
@@ -126,26 +126,6 @@ static inline void *__va(unsigned long x)
extern int m68k_virt_to_node_shift;
-#ifndef CONFIG_DISCONTIGMEM
-#define __virt_to_node(addr) (&pg_data_map[0])
-#else
-extern struct pglist_data *pg_data_table[];
-
-static inline __attribute_const__ int __virt_to_node_shift(void)
-{
- int shift;
-
- asm (
- "1: moveq #0,%0\n"
- m68k_fixup(%c1, 1b)
- : "=d" (shift)
- : "i" (m68k_fixup_vnode_shift));
- return shift;
-}
-
-#define __virt_to_node(addr) (pg_data_table[(unsigned long)(addr) >> __virt_to_node_shift()])
-#endif
-
#define virt_to_page(addr) ({ \
pfn_to_page(virt_to_pfn(addr)); \
})
@@ -153,23 +133,8 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
pfn_to_virt(page_to_pfn(page)); \
})
-#ifdef CONFIG_DISCONTIGMEM
-#define pfn_to_page(pfn) ({ \
- unsigned long __pfn = (pfn); \
- struct pglist_data *pgdat; \
- pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn)); \
- pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \
-})
-#define page_to_pfn(_page) ({ \
- const struct page *__p = (_page); \
- struct pglist_data *pgdat; \
- pgdat = &pg_data_map[page_to_nid(__p)]; \
- ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
-})
-#else
#define ARCH_PFN_OFFSET (m68k_memory[0].addr >> PAGE_SHIFT)
#include <asm-generic/memory_model.h>
-#endif
#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
#define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
diff --git a/arch/m68k/include/asm/tlbflush.h b/arch/m68k/include/asm/tlbflush.h
index 5337bc2c262f..a6318ccd308f 100644
--- a/arch/m68k/include/asm/tlbflush.h
+++ b/arch/m68k/include/asm/tlbflush.h
@@ -263,7 +263,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr
BUG();
}
-static inline void flush_tlb_range(struct mm_struct *mm,
+static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
BUG();
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index f55bdcb8e4f1..bd0274c7592e 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -402,8 +402,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
* to this process.
*/
mmap_read_lock(current->mm);
- vma = find_vma(current->mm, addr);
- if (!vma || addr < vma->vm_start || addr + len > vma->vm_end)
+ vma = vma_lookup(current->mm, addr);
+ if (!vma || addr + len > vma->vm_end)
goto out_unlock;
}
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 1759ab875d47..5d749e188246 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -44,28 +44,8 @@ EXPORT_SYMBOL(empty_zero_page);
int m68k_virt_to_node_shift;
-#ifdef CONFIG_DISCONTIGMEM
-pg_data_t pg_data_map[MAX_NUMNODES];
-EXPORT_SYMBOL(pg_data_map);
-
-pg_data_t *pg_data_table[65];
-EXPORT_SYMBOL(pg_data_table);
-#endif
-
void __init m68k_setup_node(int node)
{
-#ifdef CONFIG_DISCONTIGMEM
- struct m68k_mem_info *info = m68k_memory + node;
- int i, end;
-
- i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift();
- end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift();
- for (; i <= end; i++) {
- if (pg_data_table[i])
- pr_warn("overlap at %u for chunk %u\n", i, node);
- pg_data_table[i] = pg_data_map + node;
- }
-#endif
node_set_online(node);
}