summaryrefslogtreecommitdiffstats
path: root/fs/ubifs/file.c
diff options
context:
space:
mode:
authorHyunchul Lee <cheol.lee@lge.com>2017-06-14 09:31:49 +0900
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-02-14 16:31:00 -0500
commit9d9f703d4f7d4591585c712678a3e0b2a3902f33 (patch)
tree163ceb9e6e242c24b2f6efab48f519c238fca38f /fs/ubifs/file.c
parent1eeb10c067dd2f67da642b50e44bec252fd28638 (diff)
downloadlinux-stable-9d9f703d4f7d4591585c712678a3e0b2a3902f33.tar.gz
linux-stable-9d9f703d4f7d4591585c712678a3e0b2a3902f33.tar.bz2
linux-stable-9d9f703d4f7d4591585c712678a3e0b2a3902f33.zip
ubifs: Change gfp flags in page allocation for bulk read
[ Upstream commit 480a1a6a3ef6fb6be4cd2f37b34314fbf64867dd ] In low memory situations, page allocations for bulk read can kill applications for reclaiming memory, and print an failure message when allocations are failed. Because bulk read is just an optimization, we don't have to do these and can stop page allocations. Though this siutation happens rarely, add __GFP_NORETRY to prevent from excessive memory reclaim and killing applications, and __GFP_WARN to suppress this failure message. For this, Use readahead_gfp_mask for gfp flags when allocating pages. Signed-off-by: Hyunchul Lee <cheol.lee@lge.com> Signed-off-by: Richard Weinberger <richard@nod.at> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'fs/ubifs/file.c')
-rw-r--r--fs/ubifs/file.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index b4fbeefba246..f2e6162f8e65 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -721,6 +721,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
int err, page_idx, page_cnt, ret = 0, n = 0;
int allocate = bu->buf ? 0 : 1;
loff_t isize;
+ gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS;
err = ubifs_tnc_get_bu_keys(c, bu);
if (err)
@@ -782,8 +783,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
if (page_offset > end_index)
break;
- page = find_or_create_page(mapping, page_offset,
- GFP_NOFS | __GFP_COLD);
+ page = find_or_create_page(mapping, page_offset, ra_gfp_mask);
if (!page)
break;
if (!PageUptodate(page))