summaryrefslogtreecommitdiffstats
path: root/fs/dax.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-05-12 18:29:20 +0200
committerRoss Zwisler <ross.zwisler@linux.intel.com>2016-05-19 15:28:40 -0600
commit4d9a2c8746671efbb0c27d3ae28c7474597a7aad (patch)
tree34fb766e63a1e503ba48482b34325dbb7bd3aafe /fs/dax.c
parentbc2466e4257369d0ebee2b6265070d323343fa72 (diff)
downloadlinux-4d9a2c8746671efbb0c27d3ae28c7474597a7aad.tar.gz
linux-4d9a2c8746671efbb0c27d3ae28c7474597a7aad.tar.bz2
linux-4d9a2c8746671efbb0c27d3ae28c7474597a7aad.zip
dax: Remove i_mmap_lock protection
Currently faults are protected against truncate by filesystem specific i_mmap_sem and page lock in case of hole page. Cow faults are protected DAX radix tree entry locking. So there's no need for i_mmap_lock in DAX code. Remove it. Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Diffstat (limited to 'fs/dax.c')
-rw-r--r--fs/dax.c24
1 files changed, 5 insertions, 19 deletions
diff --git a/fs/dax.c b/fs/dax.c
index be74635e05a6..6dbe6021cab7 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -798,29 +798,19 @@ static int dax_insert_mapping(struct address_space *mapping,
.sector = to_sector(bh, mapping->host),
.size = bh->b_size,
};
- int error;
void *ret;
void *entry = *entryp;
- i_mmap_lock_read(mapping);
-
- if (dax_map_atomic(bdev, &dax) < 0) {
- error = PTR_ERR(dax.addr);
- goto out;
- }
+ if (dax_map_atomic(bdev, &dax) < 0)
+ return PTR_ERR(dax.addr);
dax_unmap_atomic(bdev, &dax);
ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector);
- if (IS_ERR(ret)) {
- error = PTR_ERR(ret);
- goto out;
- }
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
*entryp = ret;
- error = vm_insert_mixed(vma, vaddr, dax.pfn);
- out:
- i_mmap_unlock_read(mapping);
- return error;
+ return vm_insert_mixed(vma, vaddr, dax.pfn);
}
/**
@@ -1058,8 +1048,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
truncate_pagecache_range(inode, lstart, lend);
}
- i_mmap_lock_read(mapping);
-
if (!write && !buffer_mapped(&bh)) {
spinlock_t *ptl;
pmd_t entry;
@@ -1148,8 +1136,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
}
out:
- i_mmap_unlock_read(mapping);
-
return result;
fallback: