diff options
Diffstat (limited to 'fs/nfs/file.c')
-rw-r--r-- | fs/nfs/file.c | 80 |
1 files changed, 60 insertions, 20 deletions
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 08c7c7387fce..d29f90d00aa2 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -306,27 +306,50 @@ nfs_fsync(struct file *file, struct dentry *dentry, int datasync) } /* - * This does the "real" work of the write. The generic routine has - * allocated the page, locked it, done all the page alignment stuff - * calculations etc. Now we should just copy the data from user - * space and write it back to the real medium.. + * This does the "real" work of the write. We must allocate and lock the + * page to be sent back to the generic routine, which then copies the + * data from user space. * * If the writer ends up delaying the write, the writer needs to * increment the page use counts until he is done with the page. */ -static int nfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to) +static int nfs_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) { - return nfs_flush_incompatible(file, page); + int ret; + pgoff_t index; + struct page *page; + index = pos >> PAGE_CACHE_SHIFT; + + page = __grab_cache_page(mapping, index); + if (!page) + return -ENOMEM; + *pagep = page; + + ret = nfs_flush_incompatible(file, page); + if (ret) { + unlock_page(page); + page_cache_release(page); + } + return ret; } -static int nfs_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to) +static int nfs_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) { - long status; + unsigned offset = pos & (PAGE_CACHE_SIZE - 1); + int status; lock_kernel(); - status = nfs_updatepage(file, page, offset, to-offset); + status = nfs_updatepage(file, page, offset, copied); unlock_kernel(); - return status; + + unlock_page(page); + page_cache_release(page); + + return status < 0 ? status : copied; } static void nfs_invalidate_page(struct page *page, unsigned long offset) @@ -354,8 +377,8 @@ const struct address_space_operations nfs_file_aops = { .set_page_dirty = __set_page_dirty_nobuffers, .writepage = nfs_writepage, .writepages = nfs_writepages, - .prepare_write = nfs_prepare_write, - .commit_write = nfs_commit_write, + .write_begin = nfs_write_begin, + .write_end = nfs_write_end, .invalidatepage = nfs_invalidate_page, .releasepage = nfs_release_page, #ifdef CONFIG_NFS_DIRECTIO @@ -369,18 +392,35 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page) struct file *filp = vma->vm_file; unsigned pagelen; int ret = -EINVAL; + void *fsdata; + struct address_space *mapping; + loff_t offset; lock_page(page); - if (page->mapping != vma->vm_file->f_path.dentry->d_inode->i_mapping) - goto out_unlock; + mapping = page->mapping; + if (mapping != vma->vm_file->f_path.dentry->d_inode->i_mapping) { + unlock_page(page); + return -EINVAL; + } pagelen = nfs_page_length(page); - if (pagelen == 0) - goto out_unlock; - ret = nfs_prepare_write(filp, page, 0, pagelen); - if (!ret) - ret = nfs_commit_write(filp, page, 0, pagelen); -out_unlock: + offset = (loff_t)page->index << PAGE_CACHE_SHIFT; unlock_page(page); + + /* + * we can use mapping after releasing the page lock, because: + * we hold mmap_sem on the fault path, which should pin the vma + * which should pin the file, which pins the dentry which should + * hold a reference on inode. + */ + + if (pagelen) { + struct page *page2 = NULL; + ret = nfs_write_begin(filp, mapping, offset, pagelen, + 0, &page2, &fsdata); + if (!ret) + ret = nfs_write_end(filp, mapping, offset, pagelen, + pagelen, page2, fsdata); + } return ret; } |