diff options
author | Jan Kara <jack@suse.cz> | 2017-11-01 16:36:33 +0100 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2017-11-03 06:26:23 -0700 |
commit | 5e161e4066d3ebeaff95a4b979b42f8bf00494d5 (patch) | |
tree | a2a0e0118ca82153089ea9ce6f6c0827ea51bdeb /fs/dax.c | |
parent | 31a6f1a6e5a4a26040b67d8fa4256539b36f5893 (diff) | |
download | linux-5e161e4066d3ebeaff95a4b979b42f8bf00494d5.tar.gz linux-5e161e4066d3ebeaff95a4b979b42f8bf00494d5.tar.bz2 linux-5e161e4066d3ebeaff95a4b979b42f8bf00494d5.zip |
dax: Factor out getting of pfn out of iomap
Factor out code to get pfn out of iomap that is shared between PTE and
PMD fault path.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'fs/dax.c')
-rw-r--r-- | fs/dax.c | 83 |
1 files changed, 43 insertions, 40 deletions
@@ -825,30 +825,53 @@ static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); } -static int dax_insert_mapping(struct vm_fault *vmf, struct iomap *iomap, - loff_t pos, void *entry) +static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size, + pfn_t *pfnp) { const sector_t sector = dax_iomap_sector(iomap, pos); - struct vm_area_struct *vma = vmf->vma; - struct address_space *mapping = vma->vm_file->f_mapping; - unsigned long vaddr = vmf->address; - void *ret, *kaddr; pgoff_t pgoff; + void *kaddr; int id, rc; - pfn_t pfn; + long length; - rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff); + rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff); if (rc) return rc; - id = dax_read_lock(); - rc = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), - &kaddr, &pfn); - if (rc < 0) { - dax_read_unlock(id); - return rc; + length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), + &kaddr, pfnp); + if (length < 0) { + rc = length; + goto out; } + rc = -EINVAL; + if (PFN_PHYS(length) < size) + goto out; + if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) + goto out; + /* For larger pages we need devmap */ + if (length > 1 && !pfn_t_devmap(*pfnp)) + goto out; + rc = 0; +out: dax_read_unlock(id); + return rc; +} + +static int dax_insert_mapping(struct vm_fault *vmf, struct iomap *iomap, + loff_t pos, void *entry) +{ + const sector_t sector = dax_iomap_sector(iomap, pos); + struct vm_area_struct *vma = vmf->vma; + struct address_space *mapping = vma->vm_file->f_mapping; + unsigned long vaddr = vmf->address; + void *ret; + int rc; + pfn_t pfn; + + rc = dax_iomap_pfn(iomap, pos, PAGE_SIZE, &pfn); + if (rc < 0) + return rc; ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0); if (IS_ERR(ret)) @@ -1223,46 +1246,26 @@ static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap, { struct address_space *mapping = vmf->vma->vm_file->f_mapping; const sector_t sector = dax_iomap_sector(iomap, pos); - struct dax_device *dax_dev = iomap->dax_dev; - struct block_device *bdev = iomap->bdev; struct inode *inode = mapping->host; - const size_t size = PMD_SIZE; - void *ret = NULL, *kaddr; - long length = 0; - pgoff_t pgoff; + void *ret = NULL; pfn_t pfn = {}; - int id; + int rc; - if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0) + rc = dax_iomap_pfn(iomap, pos, PMD_SIZE, &pfn); + if (rc < 0) goto fallback; - id = dax_read_lock(); - length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn); - if (length < 0) - goto unlock_fallback; - length = PFN_PHYS(length); - - if (length < size) - goto unlock_fallback; - if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR) - goto unlock_fallback; - if (!pfn_t_devmap(pfn)) - goto unlock_fallback; - dax_read_unlock(id); - ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, RADIX_DAX_PMD); if (IS_ERR(ret)) goto fallback; - trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret); + trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, ret); return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, pfn, vmf->flags & FAULT_FLAG_WRITE); -unlock_fallback: - dax_read_unlock(id); fallback: - trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret); + trace_dax_pmd_insert_mapping_fallback(inode, vmf, PMD_SIZE, pfn, ret); return VM_FAULT_FALLBACK; } |