summaryrefslogtreecommitdiffstats
path: root/fs/nfs/direct.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-09-08 09:48:54 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-08 10:22:51 -0700
commite9f7bee1df223dcf83743b46cb06c08d95497ec0 (patch)
tree67beae4733ef0286645112a52623c81c8f8a19a9 /fs/nfs/direct.c
parent016eb4a0ed06a3677d67a584da901f0e9a63c666 (diff)
downloadlinux-e9f7bee1df223dcf83743b46cb06c08d95497ec0.tar.gz
linux-e9f7bee1df223dcf83743b46cb06c08d95497ec0.tar.bz2
linux-e9f7bee1df223dcf83743b46cb06c08d95497ec0.zip
[PATCH] NFS: large non-page-aligned direct I/O clobbers memory
The logic in nfs_direct_read_schedule and nfs_direct_write_schedule can allow data->npages to be one larger than rpages. This causes a page pointer to be written beyond the end of the pagevec in nfs_read_data (or nfs_write_data). Fix this by making nfs_(read|write)_alloc() calculate the size of the pagevec array, and initialise data->npages. Also get rid of the redundant argument to nfs_commit_alloc(). Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/nfs/direct.c')
-rw-r--r--fs/nfs/direct.c50
1 files changed, 14 insertions, 36 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index fecd3b095deb..76ca1cbc38f9 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -100,25 +100,6 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
return atomic_dec_and_test(&dreq->io_count);
}
-/*
- * "size" is never larger than rsize or wsize.
- */
-static inline int nfs_direct_count_pages(unsigned long user_addr, size_t size)
-{
- int page_count;
-
- page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- page_count -= user_addr >> PAGE_SHIFT;
- BUG_ON(page_count < 0);
-
- return page_count;
-}
-
-static inline unsigned int nfs_max_pages(unsigned int size)
-{
- return (size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-}
-
/**
* nfs_direct_IO - NFS address space operation for direct I/O
* @rw: direction (read or write)
@@ -276,28 +257,24 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo
struct nfs_open_context *ctx = dreq->ctx;
struct inode *inode = ctx->dentry->d_inode;
size_t rsize = NFS_SERVER(inode)->rsize;
- unsigned int rpages = nfs_max_pages(rsize);
unsigned int pgbase;
int result;
ssize_t started = 0;
get_dreq(dreq);
- pgbase = user_addr & ~PAGE_MASK;
do {
struct nfs_read_data *data;
size_t bytes;
+ pgbase = user_addr & ~PAGE_MASK;
+ bytes = min(rsize,count);
+
result = -ENOMEM;
- data = nfs_readdata_alloc(rpages);
+ data = nfs_readdata_alloc(pgbase + bytes);
if (unlikely(!data))
break;
- bytes = rsize;
- if (count < rsize)
- bytes = count;
-
- data->npages = nfs_direct_count_pages(user_addr, bytes);
down_read(&current->mm->mmap_sem);
result = get_user_pages(current, current->mm, user_addr,
data->npages, 1, 0, data->pagevec, NULL);
@@ -344,8 +321,10 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo
started += bytes;
user_addr += bytes;
pos += bytes;
+ /* FIXME: Remove this unnecessary math from final patch */
pgbase += bytes;
pgbase &= ~PAGE_MASK;
+ BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
count -= bytes;
} while (count != 0);
@@ -524,7 +503,7 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
{
- dreq->commit_data = nfs_commit_alloc(0);
+ dreq->commit_data = nfs_commit_alloc();
if (dreq->commit_data != NULL)
dreq->commit_data->req = (struct nfs_page *) dreq;
}
@@ -605,28 +584,24 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l
struct nfs_open_context *ctx = dreq->ctx;
struct inode *inode = ctx->dentry->d_inode;
size_t wsize = NFS_SERVER(inode)->wsize;
- unsigned int wpages = nfs_max_pages(wsize);
unsigned int pgbase;
int result;
ssize_t started = 0;
get_dreq(dreq);
- pgbase = user_addr & ~PAGE_MASK;
do {
struct nfs_write_data *data;
size_t bytes;
+ pgbase = user_addr & ~PAGE_MASK;
+ bytes = min(wsize,count);
+
result = -ENOMEM;
- data = nfs_writedata_alloc(wpages);
+ data = nfs_writedata_alloc(pgbase + bytes);
if (unlikely(!data))
break;
- bytes = wsize;
- if (count < wsize)
- bytes = count;
-
- data->npages = nfs_direct_count_pages(user_addr, bytes);
down_read(&current->mm->mmap_sem);
result = get_user_pages(current, current->mm, user_addr,
data->npages, 0, 0, data->pagevec, NULL);
@@ -676,8 +651,11 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l
started += bytes;
user_addr += bytes;
pos += bytes;
+
+ /* FIXME: Remove this useless math from the final patch */
pgbase += bytes;
pgbase &= ~PAGE_MASK;
+ BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
count -= bytes;
} while (count != 0);