summaryrefslogtreecommitdiffstats
path: root/fs/udf/file.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2023-01-19 10:51:21 +0100
committerJan Kara <jack@suse.cz>2023-01-26 16:46:35 +0100
commit3c21204818ae45504b5d8ce8902748ef2306f0f3 (patch)
tree5768468b7032d20266917b5fb0f61ee614f6ce5c /fs/udf/file.c
parentb9a861fd527ab123e76effb492b4eb7e8115d4ca (diff)
downloadlinux-3c21204818ae45504b5d8ce8902748ef2306f0f3.tar.gz
linux-3c21204818ae45504b5d8ce8902748ef2306f0f3.tar.bz2
linux-3c21204818ae45504b5d8ce8902748ef2306f0f3.zip
udf: Allocate blocks on write page fault
Currently if file with holes is mapped, udf allocates blocks for dirtied pages during page writeback. This however creates problems when to truncate final extent to proper size and currently we leave the last extent untruncated which violates UDF standard. So allocate blocks on write page fault instead. In that case the last extent gets truncated the file is closed and everything is happy. Signed-off-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'fs/udf/file.c')
-rw-r--r--fs/udf/file.c61
1 files changed, 60 insertions, 1 deletions
diff --git a/fs/udf/file.c b/fs/udf/file.c
index cf050bdffd9e..322115c8369d 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -134,6 +134,57 @@ const struct address_space_operations udf_adinicb_aops = {
.direct_IO = udf_adinicb_direct_IO,
};
+static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct inode *inode = file_inode(vma->vm_file);
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page = vmf->page;
+ loff_t size;
+ unsigned int end;
+ vm_fault_t ret = VM_FAULT_LOCKED;
+ int err;
+
+ sb_start_pagefault(inode->i_sb);
+ file_update_time(vma->vm_file);
+ filemap_invalidate_lock_shared(mapping);
+ lock_page(page);
+ size = i_size_read(inode);
+ if (page->mapping != inode->i_mapping || page_offset(page) >= size) {
+ unlock_page(page);
+ ret = VM_FAULT_NOPAGE;
+ goto out_unlock;
+ }
+ /* Space is already allocated for in-ICB file */
+ if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
+ goto out_dirty;
+ if (page->index == size >> PAGE_SHIFT)
+ end = size & ~PAGE_MASK;
+ else
+ end = PAGE_SIZE;
+ err = __block_write_begin(page, 0, end, udf_get_block);
+ if (!err)
+ err = block_commit_write(page, 0, end);
+ if (err < 0) {
+ unlock_page(page);
+ ret = block_page_mkwrite_return(err);
+ goto out_unlock;
+ }
+out_dirty:
+ set_page_dirty(page);
+ wait_for_stable_page(page);
+out_unlock:
+ filemap_invalidate_unlock_shared(mapping);
+ sb_end_pagefault(inode->i_sb);
+ return ret;
+}
+
+static const struct vm_operations_struct udf_file_vm_ops = {
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = udf_page_mkwrite,
+};
+
static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
ssize_t retval;
@@ -238,11 +289,19 @@ static int udf_release_file(struct inode *inode, struct file *filp)
return 0;
}
+static int udf_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ file_accessed(file);
+ vma->vm_ops = &udf_file_vm_ops;
+
+ return 0;
+}
+
const struct file_operations udf_file_operations = {
.read_iter = generic_file_read_iter,
.unlocked_ioctl = udf_ioctl,
.open = generic_file_open,
- .mmap = generic_file_mmap,
+ .mmap = udf_file_mmap,
.write_iter = udf_file_write_iter,
.release = udf_release_file,
.fsync = generic_file_fsync,