summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVitaly Wool <vitalywool@gmail.com>2018-04-05 16:23:32 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-05 21:36:25 -0700
commit5c9bab592f53328b3e00d7293378b714abf47a2a (patch)
tree8785d51da7ca697c8a781e56d309f341e3bb13a3
parent605ca5ede7643a01f4c4a15913f9714ac297f8a6 (diff)
downloadlinux-stable-5c9bab592f53328b3e00d7293378b714abf47a2a.tar.gz
linux-stable-5c9bab592f53328b3e00d7293378b714abf47a2a.tar.bz2
linux-stable-5c9bab592f53328b3e00d7293378b714abf47a2a.zip
z3fold: limit use of stale list for allocation
Currently if z3fold couldn't find an unbuddied page it would first try to pull a page off the stale list. The problem with this approach is that we can't 100% guarantee that the page is not processed by the workqueue thread at the same time unless we run cancel_work_sync() on it, which we can't do if we're in an atomic context. So let's just limit stale list usage to non-atomic contexts only. Link: http://lkml.kernel.org/r/47ab51e7-e9c1-d30e-ab17-f734dbc3abce@gmail.com Signed-off-by: Vitaly Vul <vitaly.vul@sony.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: <Oleksiy.Avramchenko@sony.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/z3fold.c35
1 files changed, 19 insertions, 16 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c
index d589d318727f..f579ad4a8100 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -620,24 +620,27 @@ lookup:
bud = FIRST;
}
- spin_lock(&pool->stale_lock);
- zhdr = list_first_entry_or_null(&pool->stale,
- struct z3fold_header, buddy);
- /*
- * Before allocating a page, let's see if we can take one from the
- * stale pages list. cancel_work_sync() can sleep so we must make
- * sure it won't be called in case we're in atomic context.
- */
- if (zhdr && (can_sleep || !work_pending(&zhdr->work))) {
- list_del(&zhdr->buddy);
- spin_unlock(&pool->stale_lock);
- if (can_sleep)
+ page = NULL;
+ if (can_sleep) {
+ spin_lock(&pool->stale_lock);
+ zhdr = list_first_entry_or_null(&pool->stale,
+ struct z3fold_header, buddy);
+ /*
+ * Before allocating a page, let's see if we can take one from
+ * the stale pages list. cancel_work_sync() can sleep so we
+ * limit this case to the contexts where we can sleep
+ */
+ if (zhdr) {
+ list_del(&zhdr->buddy);
+ spin_unlock(&pool->stale_lock);
cancel_work_sync(&zhdr->work);
- page = virt_to_page(zhdr);
- } else {
- spin_unlock(&pool->stale_lock);
- page = alloc_page(gfp);
+ page = virt_to_page(zhdr);
+ } else {
+ spin_unlock(&pool->stale_lock);
+ }
}
+ if (!page)
+ page = alloc_page(gfp);
if (!page)
return -ENOMEM;