summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruenba@redhat.com>2022-05-05 13:32:23 +0200
committerAndreas Gruenbacher <agruenba@redhat.com>2022-05-13 22:00:22 +0200
commit324d116c5a5c8204dc00e63f725a3c5ed09afb53 (patch)
tree0583a2e6757d92923fb4062e5c79ad245dc4febb /fs
parent72382264502d9348ead372f82ecc3044de5c82d2 (diff)
downloadlinux-stable-324d116c5a5c8204dc00e63f725a3c5ed09afb53.tar.gz
linux-stable-324d116c5a5c8204dc00e63f725a3c5ed09afb53.tar.bz2
linux-stable-324d116c5a5c8204dc00e63f725a3c5ed09afb53.zip
gfs2: Align read and write chunks to the page cache
Align the chunks that reads and writes are carried out in to the page cache rather than the user buffers. This will be more efficient in general, especially for allocating writes. Optimizing the case that the user buffer is gfs2 backed isn't very useful; we only need to make sure we won't deadlock. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/gfs2/file.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index ea87bef7314d..11c46407d4a8 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -771,6 +771,7 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
}
static inline bool should_fault_in_pages(struct iov_iter *i,
+ struct kiocb *iocb,
size_t *prev_count,
size_t *window_size)
{
@@ -783,15 +784,13 @@ static inline bool should_fault_in_pages(struct iov_iter *i,
return false;
size = PAGE_SIZE;
- offs = offset_in_page(i->iov[0].iov_base + i->iov_offset);
+ offs = offset_in_page(iocb->ki_pos);
if (*prev_count != count || !*window_size) {
size_t nr_dirtied;
- size = ALIGN(offs + count, PAGE_SIZE);
- size = min_t(size_t, size, SZ_1M);
nr_dirtied = max(current->nr_dirtied_pause -
current->nr_dirtied, 8);
- size = min(size, nr_dirtied << PAGE_SHIFT);
+ size = min_t(size_t, SZ_1M, nr_dirtied << PAGE_SHIFT);
}
*prev_count = count;
@@ -845,7 +844,7 @@ retry_under_glock:
if (ret > 0)
read = ret;
- if (should_fault_in_pages(to, &prev_count, &window_size)) {
+ if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
gfs2_holder_allow_demote(gh);
window_size -= fault_in_iov_iter_writeable(to, window_size);
gfs2_holder_disallow_demote(gh);
@@ -916,7 +915,7 @@ retry_under_glock:
if (ret > 0)
written = ret;
- if (should_fault_in_pages(from, &prev_count, &window_size)) {
+ if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
gfs2_holder_allow_demote(gh);
window_size -= fault_in_iov_iter_readable(from, window_size);
gfs2_holder_disallow_demote(gh);
@@ -984,7 +983,7 @@ retry_under_glock:
if (ret > 0)
read += ret;
- if (should_fault_in_pages(to, &prev_count, &window_size)) {
+ if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
gfs2_holder_allow_demote(&gh);
window_size -= fault_in_iov_iter_writeable(to, window_size);
gfs2_holder_disallow_demote(&gh);
@@ -1061,7 +1060,7 @@ retry_under_glock:
goto out_unlock;
from->count = orig_count - written;
- if (should_fault_in_pages(from, &prev_count, &window_size)) {
+ if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
gfs2_holder_allow_demote(gh);
window_size -= fault_in_iov_iter_readable(from, window_size);
gfs2_holder_disallow_demote(gh);