summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-11-15 14:56:23 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-11-15 14:56:23 -0800
commit59d0d52c30d4991ac4b329f049cc37118e00f5b0 (patch)
tree255a538cf6b539c7f9d20b5e4d1a16dd9ae60ba7 /fs
parent81e7cfa3a9eb4ba6993a9c71772fdab21bc5d870 (diff)
parent5e51c627c5acbcf82bb552e17533a79d2a6a2600 (diff)
downloadlinux-59d0d52c30d4991ac4b329f049cc37118e00f5b0.tar.gz
linux-59d0d52c30d4991ac4b329f049cc37118e00f5b0.tar.bz2
linux-59d0d52c30d4991ac4b329f049cc37118e00f5b0.zip
AMerge tag 'netfs-fixes-20221115' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs
Pull netfx fixes from David Howells: "Two fixes, affecting the functions that iterates over the pagecache unmarking or unlocking pages after an op is complete: - xas_for_each() loops must call xas_retry() first thing and immediately do a "continue" in the case that the extracted value is a special value that indicates that the walk raced with a modification. Fix the unlock and unmark loops to do this. - The maths in the unlock loop is dodgy as it could, theoretically, at some point in the future end up with a starting file pointer that is in the middle of a folio. This will cause a subtraction to go negative - but the number is unsigned. Fix the maths to use absolute file positions instead of relative page indices" * tag 'netfs-fixes-20221115' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs: netfs: Fix dodgy maths netfs: Fix missing xas_retry() calls in xarray iteration
Diffstat (limited to 'fs')
-rw-r--r--fs/netfs/buffered_read.c20
-rw-r--r--fs/netfs/io.c3
2 files changed, 16 insertions, 7 deletions
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index 0ce535852151..7679a68e8193 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -17,9 +17,9 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *subreq;
struct folio *folio;
- unsigned int iopos, account = 0;
pgoff_t start_page = rreq->start / PAGE_SIZE;
pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
+ size_t account = 0;
bool subreq_failed = false;
XA_STATE(xas, &rreq->mapping->i_pages, start_page);
@@ -39,18 +39,23 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
*/
subreq = list_first_entry(&rreq->subrequests,
struct netfs_io_subrequest, rreq_link);
- iopos = 0;
subreq_failed = (subreq->error < 0);
trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
rcu_read_lock();
xas_for_each(&xas, folio, last_page) {
- unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
- unsigned int pgend = pgpos + folio_size(folio);
+ loff_t pg_end;
bool pg_failed = false;
+ if (xas_retry(&xas, folio))
+ continue;
+
+ pg_end = folio_pos(folio) + folio_size(folio) - 1;
+
for (;;) {
+ loff_t sreq_end;
+
if (!subreq) {
pg_failed = true;
break;
@@ -58,11 +63,11 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
folio_start_fscache(folio);
pg_failed |= subreq_failed;
- if (pgend < iopos + subreq->len)
+ sreq_end = subreq->start + subreq->len - 1;
+ if (pg_end < sreq_end)
break;
account += subreq->transferred;
- iopos += subreq->len;
if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
subreq = list_next_entry(subreq, rreq_link);
subreq_failed = (subreq->error < 0);
@@ -70,7 +75,8 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
subreq = NULL;
subreq_failed = false;
}
- if (pgend == iopos)
+
+ if (pg_end == sreq_end)
break;
}
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
index 428925899282..e374767d1b68 100644
--- a/fs/netfs/io.c
+++ b/fs/netfs/io.c
@@ -121,6 +121,9 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
+ if (xas_retry(&xas, folio))
+ continue;
+
/* We might have multiple writes from the same huge
* folio, but we mustn't unlock a folio more than once.
*/