summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2023-01-13 18:10:26 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-02-02 22:33:11 -0800
commit950fe885a89770619e315f9b46301eebf0aab7b3 (patch)
treee4f14a15fa22f1be3376ad771f26dbc6cfdf1d35 /mm
parentf5c3fe300c5b40ff9af5ce2c9dd9897e91ce5735 (diff)
downloadlinux-950fe885a89770619e315f9b46301eebf0aab7b3.tar.gz
linux-950fe885a89770619e315f9b46301eebf0aab7b3.tar.bz2
linux-950fe885a89770619e315f9b46301eebf0aab7b3.zip
mm: remove __HAVE_ARCH_PTE_SWP_EXCLUSIVE
__HAVE_ARCH_PTE_SWP_EXCLUSIVE is now supported by all architectures that support swp PTEs, so let's drop it. Link: https://lkml.kernel.org/r/20230113171026.582290-27-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/debug_vm_pgtable.c2
-rw-r--r--mm/memory.c4
-rw-r--r--mm/rmap.c11
3 files changed, 0 insertions, 17 deletions
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index ff8d6f6af896..af59cc7bd307 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -810,7 +810,6 @@ static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) {
static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args)
{
-#ifdef __HAVE_ARCH_PTE_SWP_EXCLUSIVE
unsigned long max_swap_offset;
swp_entry_t entry, entry2;
pte_t pte;
@@ -841,7 +840,6 @@ static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args)
WARN_ON(!is_swap_pte(pte));
entry2 = pte_to_swp_entry(pte);
WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
-#endif /* __HAVE_ARCH_PTE_SWP_EXCLUSIVE */
}
static void __init pte_swap_tests(struct pgtable_debug_args *args)
diff --git a/mm/memory.c b/mm/memory.c
index c6bacd58d032..87b33b4967c2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3864,10 +3864,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* the swap entry concurrently) for certainly exclusive pages.
*/
if (!folio_test_ksm(folio)) {
- /*
- * Note that pte_swp_exclusive() == false for architectures
- * without __HAVE_ARCH_PTE_SWP_EXCLUSIVE.
- */
exclusive = pte_swp_exclusive(vmf->orig_pte);
if (folio != swapcache) {
/*
diff --git a/mm/rmap.c b/mm/rmap.c
index 073999f78adf..0d07c500fc86 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1710,17 +1710,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
page_vma_mapped_walk_done(&pvmw);
break;
}
- /*
- * Note: We *don't* remember if the page was mapped
- * exclusively in the swap pte if the architecture
- * doesn't support __HAVE_ARCH_PTE_SWP_EXCLUSIVE. In
- * that case, swapin code has to re-determine that
- * manually and might detect the page as possibly
- * shared, for example, if there are other references on
- * the page or if the page is under writeback. We made
- * sure that there are no GUP pins on the page that
- * would rely on it, so for GUP pins this is fine.
- */
if (list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
if (list_empty(&mm->mmlist))