diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-04-08 14:31:41 -1000 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-04-08 14:31:41 -1000 |
commit | 911b2b95168c7790ed5ea2703d804086c03088df (patch) | |
tree | 221e4fa69720af910218cb676bf71d99ec4dedae /mm | |
parent | 1a3b1bba7c7a5eb8a11513cf88427cb9d77bc60a (diff) | |
parent | 4071a1b9e24ee394b7492bff7542707ee9ad986d (diff) | |
download | linux-stable-911b2b95168c7790ed5ea2703d804086c03088df.tar.gz linux-stable-911b2b95168c7790ed5ea2703d804086c03088df.tar.bz2 linux-stable-911b2b95168c7790ed5ea2703d804086c03088df.zip |
Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton:
"9 patches.
Subsystems affected by this patch series: mm (migration, highmem,
sparsemem, mremap, mempolicy, and memcg), lz4, mailmap, and
MAINTAINERS"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
MAINTAINERS: add Tom as clang reviewer
mm/list_lru.c: revert "mm/list_lru: optimize memcg_reparent_list_lru_node()"
mailmap: update Vasily Averin's email address
mm/mempolicy: fix mpol_new leak in shared_policy_replace
mmmremap.c: avoid pointless invalidate_range_start/end on mremap(old_size=0)
mm/sparsemem: fix 'mem_section' will never be NULL gcc 12 warning
lz4: fix LZ4_decompress_safe_partial read out of bound
highmem: fix checks in __kmap_local_sched_{in,out}
mm: migrate: use thp_order instead of HPAGE_PMD_ORDER for new page allocation.
Diffstat (limited to 'mm')
-rw-r--r-- | mm/highmem.c | 4 | ||||
-rw-r--r-- | mm/list_lru.c | 6 | ||||
-rw-r--r-- | mm/mempolicy.c | 1 | ||||
-rw-r--r-- | mm/mremap.c | 3 |
4 files changed, 6 insertions, 8 deletions
diff --git a/mm/highmem.c b/mm/highmem.c index 0cc0c4da7ed9..1a692997fac4 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -624,7 +624,7 @@ void __kmap_local_sched_out(void) /* With debug all even slots are unmapped and act as guard */ if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) { - WARN_ON_ONCE(!pte_none(pteval)); + WARN_ON_ONCE(pte_val(pteval) != 0); continue; } if (WARN_ON_ONCE(pte_none(pteval))) @@ -661,7 +661,7 @@ void __kmap_local_sched_in(void) /* With debug all even slots are unmapped and act as guard */ if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) { - WARN_ON_ONCE(!pte_none(pteval)); + WARN_ON_ONCE(pte_val(pteval) != 0); continue; } if (WARN_ON_ONCE(pte_none(pteval))) diff --git a/mm/list_lru.c b/mm/list_lru.c index c669d87001a6..ba76428ceece 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -395,12 +395,6 @@ static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid, struct list_lru_one *src, *dst; /* - * If there is no lru entry in this nlru, we can skip it immediately. - */ - if (!READ_ONCE(nlru->nr_items)) - return; - - /* * Since list_lru_{add,del} may be called under an IRQ-safe lock, * we have to use IRQ-safe primitives here to avoid deadlock. */ diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 649bd3be8682..8c74107a2b15 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2743,6 +2743,7 @@ alloc_new: mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!mpol_new) goto err_out; + atomic_set(&mpol_new->refcnt, 1); goto restart; } diff --git a/mm/mremap.c b/mm/mremap.c index 9d76da79594d..303d3290b938 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -486,6 +486,9 @@ unsigned long move_page_tables(struct vm_area_struct *vma, pmd_t *old_pmd, *new_pmd; pud_t *old_pud, *new_pud; + if (!len) + return 0; + old_end = old_addr + len; flush_cache_range(vma, old_addr, old_end); |