summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-25 15:34:28 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-25 15:34:28 -0700
commitf01bfc977e26d010bbd6eccdd687340548f191b3 (patch)
tree75da7da52e34239ebaad37e618b13748425f59c7
parentdd5957b78f9f17c3d4018dcff21dbae9a4486128 (diff)
parent92a56555bd576c61b27a5cab9f38a33a1e9a1df5 (diff)
downloadlinux-f01bfc977e26d010bbd6eccdd687340548f191b3.tar.gz
linux-f01bfc977e26d010bbd6eccdd687340548f191b3.tar.bz2
linux-f01bfc977e26d010bbd6eccdd687340548f191b3.zip
Merge tag 'nfs-for-3.17-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
Pull NFS client fixes from Trond Myklebust: "Highlights: - more fixes for read/write codepath regressions * sleeping while holding the inode lock * stricter enforcement of page contiguity when coalescing requests * fix up error handling in the page coalescing code - don't busy wait on SIGKILL in the file locking code" * tag 'nfs-for-3.17-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: nfs: Don't busy-wait on SIGKILL in __nfs_iocounter_wait nfs: can_coalesce_requests must enforce contiguity nfs: disallow duplicate pages in pgio page vectors nfs: don't sleep with inode lock in lock_and_join_requests nfs: fix error handling in lock_and_join_requests nfs: use blocking page_group_lock in add_request nfs: fix nonblocking calls to nfs_page_group_lock nfs: change nfs_page_group_lock argument
-rw-r--r--fs/nfs/pagelist.c84
-rw-r--r--fs/nfs/write.c21
-rw-r--r--include/linux/nfs_page.h1
3 files changed, 77 insertions, 29 deletions
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index ba491926df5f..be7cbce6e4c7 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -116,7 +116,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c)
if (atomic_read(&c->io_count) == 0)
break;
ret = nfs_wait_bit_killable(&q.key);
- } while (atomic_read(&c->io_count) != 0);
+ } while (atomic_read(&c->io_count) != 0 && !ret);
finish_wait(wq, &q.wait);
return ret;
}
@@ -139,26 +139,49 @@ nfs_iocounter_wait(struct nfs_io_counter *c)
/*
* nfs_page_group_lock - lock the head of the page group
* @req - request in group that is to be locked
+ * @nonblock - if true don't block waiting for lock
*
* this lock must be held if modifying the page group list
*
- * returns result from wait_on_bit_lock: 0 on success, < 0 on error
+ * return 0 on success, < 0 on error: -EDELAY if nonblocking or the
+ * result from wait_on_bit_lock
+ *
+ * NOTE: calling with nonblock=false should always have set the
+ * lock bit (see fs/buffer.c and other uses of wait_on_bit_lock
+ * with TASK_UNINTERRUPTIBLE), so there is no need to check the result.
*/
int
-nfs_page_group_lock(struct nfs_page *req, bool wait)
+nfs_page_group_lock(struct nfs_page *req, bool nonblock)
{
struct nfs_page *head = req->wb_head;
- int ret;
WARN_ON_ONCE(head != head->wb_head);
- do {
- ret = wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
- TASK_UNINTERRUPTIBLE);
- } while (wait && ret != 0);
+ if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
+ return 0;
- WARN_ON_ONCE(ret > 0);
- return ret;
+ if (!nonblock)
+ return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
+ TASK_UNINTERRUPTIBLE);
+
+ return -EAGAIN;
+}
+
+/*
+ * nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it
+ * @req - a request in the group
+ *
+ * This is a blocking call to wait for the group lock to be cleared.
+ */
+void
+nfs_page_group_lock_wait(struct nfs_page *req)
+{
+ struct nfs_page *head = req->wb_head;
+
+ WARN_ON_ONCE(head != head->wb_head);
+
+ wait_on_bit(&head->wb_flags, PG_HEADLOCK,
+ TASK_UNINTERRUPTIBLE);
}
/*
@@ -219,7 +242,7 @@ bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
{
bool ret;
- nfs_page_group_lock(req, true);
+ nfs_page_group_lock(req, false);
ret = nfs_page_group_sync_on_bit_locked(req, bit);
nfs_page_group_unlock(req);
@@ -701,10 +724,11 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
struct nfs_page *req;
- struct page **pages;
+ struct page **pages,
+ *last_page;
struct list_head *head = &desc->pg_list;
struct nfs_commit_info cinfo;
- unsigned int pagecount;
+ unsigned int pagecount, pageused;
pagecount = nfs_page_array_len(desc->pg_base, desc->pg_count);
if (!nfs_pgarray_set(&hdr->page_array, pagecount))
@@ -712,12 +736,23 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
pages = hdr->page_array.pagevec;
+ last_page = NULL;
+ pageused = 0;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_list_add_request(req, &hdr->pages);
- *pages++ = req->wb_page;
+
+ if (WARN_ON_ONCE(pageused >= pagecount))
+ return nfs_pgio_error(desc, hdr);
+
+ if (!last_page || last_page != req->wb_page) {
+ *pages++ = last_page = req->wb_page;
+ pageused++;
+ }
}
+ if (WARN_ON_ONCE(pageused != pagecount))
+ return nfs_pgio_error(desc, hdr);
if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
(desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
@@ -788,6 +823,14 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
return false;
if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
return false;
+ if (req->wb_page == prev->wb_page) {
+ if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
+ return false;
+ } else {
+ if (req->wb_pgbase != 0 ||
+ prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
+ return false;
+ }
}
size = pgio->pg_ops->pg_test(pgio, prev, req);
WARN_ON_ONCE(size > req->wb_bytes);
@@ -858,13 +901,8 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
struct nfs_page *subreq;
unsigned int bytes_left = 0;
unsigned int offset, pgbase;
- int ret;
- ret = nfs_page_group_lock(req, false);
- if (ret < 0) {
- desc->pg_error = ret;
- return 0;
- }
+ nfs_page_group_lock(req, false);
subreq = req;
bytes_left = subreq->wb_bytes;
@@ -886,11 +924,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
if (desc->pg_recoalesce)
return 0;
/* retry add_request for this subreq */
- ret = nfs_page_group_lock(req, false);
- if (ret < 0) {
- desc->pg_error = ret;
- return 0;
- }
+ nfs_page_group_lock(req, false);
continue;
}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index e3b5cf28bdc5..175d5d073ccf 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -241,7 +241,7 @@ static bool nfs_page_group_covers_page(struct nfs_page *req)
unsigned int pos = 0;
unsigned int len = nfs_page_length(req->wb_page);
- nfs_page_group_lock(req, true);
+ nfs_page_group_lock(req, false);
do {
tmp = nfs_page_group_search_locked(req->wb_head, pos);
@@ -478,10 +478,23 @@ try_again:
return NULL;
}
- /* lock each request in the page group */
- ret = nfs_page_group_lock(head, false);
- if (ret < 0)
+ /* holding inode lock, so always make a non-blocking call to try the
+ * page group lock */
+ ret = nfs_page_group_lock(head, true);
+ if (ret < 0) {
+ spin_unlock(&inode->i_lock);
+
+ if (!nonblock && ret == -EAGAIN) {
+ nfs_page_group_lock_wait(head);
+ nfs_release_request(head);
+ goto try_again;
+ }
+
+ nfs_release_request(head);
return ERR_PTR(ret);
+ }
+
+ /* lock each request in the page group */
subreq = head;
do {
/*
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 6ad2bbcad405..6c3e06ee2fb7 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -123,6 +123,7 @@ extern int nfs_wait_on_request(struct nfs_page *);
extern void nfs_unlock_request(struct nfs_page *req);
extern void nfs_unlock_and_release_request(struct nfs_page *);
extern int nfs_page_group_lock(struct nfs_page *, bool);
+extern void nfs_page_group_lock_wait(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);