summaryrefslogtreecommitdiffstats
path: root/fs/bcachefs/io_read.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-12-20 02:38:10 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2024-01-01 11:47:42 -0500
commit1ad36a010c698fde9148b8e2abf2303e3d6fd001 (patch)
tree6877251fbda05660d96eecda6b1c1017afa1c570 /fs/bcachefs/io_read.c
parent920388254f613410324a10695b43485a8d9f838a (diff)
downloadlinux-1ad36a010c698fde9148b8e2abf2303e3d6fd001.tar.gz
linux-1ad36a010c698fde9148b8e2abf2303e3d6fd001.tar.bz2
linux-1ad36a010c698fde9148b8e2abf2303e3d6fd001.zip
bcachefs: Use GFP_KERNEL for promote allocations
We already have btree locks dropped here - no need for GFP_NOFS. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/io_read.c')
-rw-r--r--fs/bcachefs/io_read.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index 4c9eaf7cea8d..88aa004eade8 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -174,7 +174,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_promote))
return NULL;
- op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOFS);
+ op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_KERNEL);
if (!op)
goto err;
@@ -187,7 +187,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
*/
*rbio = kzalloc(sizeof(struct bch_read_bio) +
sizeof(struct bio_vec) * pages,
- GFP_NOFS);
+ GFP_KERNEL);
if (!*rbio)
goto err;
@@ -195,7 +195,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
- GFP_NOFS))
+ GFP_KERNEL))
goto err;
(*rbio)->bounce = true;