summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-05-31 17:48:46 -0700
committerDavid S. Miller <davem@davemloft.net>2020-05-31 17:48:46 -0700
commit1806c13dc2532090d742ce03847b22367fb20ad6 (patch)
tree7507ddebec3046173a4308e1e0dd8701cd498d0f /mm
parent1079a34c56c535c3e27df8def0d3c5069d2de129 (diff)
parentbdc48fa11e46f867ea4d75fa59ee87a7f48be144 (diff)
downloadlinux-stable-1806c13dc2532090d742ce03847b22367fb20ad6.tar.gz
linux-stable-1806c13dc2532090d742ce03847b22367fb20ad6.tar.bz2
linux-stable-1806c13dc2532090d742ce03847b22367fb20ad6.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
xdp_umem.c had overlapping changes between the 64-bit math fix for the calculation of npgs and the removal of the zerocopy memory type which got rid of the chunk_size_nohdr member. The mlx5 Kconfig conflict is a case where we just take the net-next copy of the Kconfig entry dependency as it takes on the ESWITCH dependency by one level of indirection which is what the 'net' conflicting change is trying to ensure. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'mm')
-rw-r--r--mm/khugepaged.c1
-rw-r--r--mm/z3fold.c3
2 files changed, 4 insertions, 0 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 99d77ffb79c2..cd280afb246e 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1692,6 +1692,7 @@ static void collapse_file(struct mm_struct *mm,
if (page_has_private(page) &&
!try_to_release_page(page, GFP_KERNEL)) {
result = SCAN_PAGE_HAS_PRIVATE;
+ putback_lru_page(page);
goto out_unlock;
}
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 8c3bb5e508b8..460b0feced26 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -43,6 +43,7 @@
#include <linux/spinlock.h>
#include <linux/zpool.h>
#include <linux/magic.h>
+#include <linux/kmemleak.h>
/*
* NCHUNKS_ORDER determines the internal allocation granularity, effectively
@@ -215,6 +216,8 @@ static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
(gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
if (slots) {
+ /* It will be freed separately in free_handle(). */
+ kmemleak_not_leak(slots);
memset(slots->slot, 0, sizeof(slots->slot));
slots->pool = (unsigned long)pool;
rwlock_init(&slots->lock);