diff options
author | Dave Jiang <dave.jiang@intel.com> | 2017-02-24 14:56:41 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-24 17:46:54 -0800 |
commit | 11bac80004499ea59f361ef2a5516c84b6eab675 (patch) | |
tree | b971df98b3fa9d4e62b8f4f7b5ec950181df4daa /drivers | |
parent | 374ad05ab64d696303cec5cc8ec3a65d457b7b1c (diff) | |
download | linux-11bac80004499ea59f361ef2a5516c84b6eab675.tar.gz linux-11bac80004499ea59f361ef2a5516c84b6eab675.tar.bz2 linux-11bac80004499ea59f361ef2a5516c84b6eab675.zip |
mm, fs: reduce fault, page_mkwrite, and pfn_mkwrite to take only vmf
->fault(), ->page_mkwrite(), and ->pfn_mkwrite() calls do not need to
take a vma and vmf parameter when the vma already resides in vmf.
Remove the vma parameter to simplify things.
[arnd@arndb.de: fix ARM build]
Link: http://lkml.kernel.org/r/20170125223558.1451224-1-arnd@arndb.de
Link: http://lkml.kernel.org/r/148521301778.19116.10840599906674778980.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jan Kara <jack@suse.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
45 files changed, 120 insertions, 108 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 15b263a420e8..2bbcdc6fdfee 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -3342,7 +3342,7 @@ static void binder_vma_close(struct vm_area_struct *vma) binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); } -static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int binder_vm_fault(struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c index 737187865269..53fe633df1e8 100644 --- a/drivers/char/agp/alpha-agp.c +++ b/drivers/char/agp/alpha-agp.c @@ -11,15 +11,14 @@ #include "agp.h" -static int alpha_core_agp_vm_fault(struct vm_area_struct *vma, - struct vm_fault *vmf) +static int alpha_core_agp_vm_fault(struct vm_fault *vmf) { alpha_agp_info *agp = agp_bridge->dev_private_data; dma_addr_t dma_addr; unsigned long pa; struct page *page; - dma_addr = vmf->address - vma->vm_start + agp->aperture.bus_base; + dma_addr = vmf->address - vmf->vma->vm_start + agp->aperture.bus_base; pa = agp->ops->translate(agp, dma_addr); if (pa == (unsigned long)-EINVAL) diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c index a697ca0cab1e..a9c2fa3c81e5 100644 --- a/drivers/char/mspec.c +++ b/drivers/char/mspec.c @@ -191,12 +191,12 @@ mspec_close(struct vm_area_struct *vma) * Creates a mspec page and maps it to user space. */ static int -mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +mspec_fault(struct vm_fault *vmf) { unsigned long paddr, maddr; unsigned long pfn; pgoff_t index = vmf->pgoff; - struct vma_data *vdata = vma->vm_private_data; + struct vma_data *vdata = vmf->vma->vm_private_data; maddr = (volatile unsigned long) vdata->maddr[index]; if (maddr == 0) { @@ -227,7 +227,7 @@ mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf) * be because another thread has installed the pte first, so it * is no problem. */ - vm_insert_pfn(vma, vmf->address, pfn); + vm_insert_pfn(vmf->vma, vmf->address, pfn); return VM_FAULT_NOPAGE; } diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index 18e9875f6277..0261f332bf3e 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c @@ -419,8 +419,7 @@ static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff, return -1; } -static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma, - struct vm_fault *vmf) +static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) { struct device *dev = &dax_dev->dev; struct dax_region *dax_region; @@ -428,7 +427,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma, phys_addr_t phys; pfn_t pfn; - if (check_vma(dax_dev, vma, __func__)) + if (check_vma(dax_dev, vmf->vma, __func__)) return VM_FAULT_SIGBUS; dax_region = dax_dev->region; @@ -446,7 +445,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma, pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); - rc = vm_insert_mixed(vma, vmf->address, pfn); + rc = vm_insert_mixed(vmf->vma, vmf->address, pfn); if (rc == -ENOMEM) return VM_FAULT_OOM; @@ -456,8 +455,9 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma, return VM_FAULT_NOPAGE; } -static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int dax_dev_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; int rc; struct file *filp = vma->vm_file; struct dax_dev *dax_dev = filp->private_data; @@ -466,7 +466,7 @@ static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf) current->comm, (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read", vma->vm_start, vma->vm_end); rcu_read_lock(); - rc = __dax_dev_fault(dax_dev, vma, vmf); + rc = __dax_dev_fault(dax_dev, vmf); rcu_read_unlock(); return rc; diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 560d416deab2..1597458d884e 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -14,14 +14,15 @@ #include <drm/armada_drm.h> #include "armada_ioctlP.h" -static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int armada_gem_vm_fault(struct vm_fault *vmf) { - struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data); + struct drm_gem_object *gobj = vmf->vma->vm_private_data; + struct armada_gem_object *obj = drm_to_armada_gem(gobj); unsigned long pfn = obj->phys_addr >> PAGE_SHIFT; int ret; - pfn += (vmf->address - vma->vm_start) >> PAGE_SHIFT; - ret = vm_insert_pfn(vma, vmf->address, pfn); + pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT; + ret = vm_insert_pfn(vmf->vma, vmf->address, pfn); switch (ret) { case 0: diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index bd311c77c254..bae6e26038ee 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -96,8 +96,9 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) * map, get the page, increment the use count and return it. */ #if IS_ENABLED(CONFIG_AGP) -static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int drm_do_vm_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; struct drm_local_map *map = NULL; @@ -168,7 +169,7 @@ vm_fault_error: return VM_FAULT_SIGBUS; /* Disallow mremap */ } #else -static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int drm_do_vm_fault(struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } @@ -184,8 +185,9 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) * Get the mapping, find the real physical page to map, get the page, and * return it. */ -static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int drm_do_vm_shm_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct drm_local_map *map = vma->vm_private_data; unsigned long offset; unsigned long i; @@ -280,14 +282,14 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) /** * \c fault method for DMA virtual memory. * - * \param vma virtual memory area. * \param address access address. * \return pointer to the page structure. * * Determine the page number from the page offset and get it from drm_device_dma::pagelist. */ -static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int drm_do_vm_dma_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; struct drm_device_dma *dma = dev->dma; @@ -315,14 +317,14 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /** * \c fault method for scatter-gather virtual memory. * - * \param vma virtual memory area. * \param address access address. * \return pointer to the page structure. * * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. */ -static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int drm_do_vm_sg_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct drm_local_map *map = vma->vm_private_data; struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; @@ -347,24 +349,24 @@ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return 0; } -static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int drm_vm_fault(struct vm_fault *vmf) { - return drm_do_vm_fault(vma, vmf); + return drm_do_vm_fault(vmf); } -static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int drm_vm_shm_fault(struct vm_fault *vmf) { - return drm_do_vm_shm_fault(vma, vmf); + return drm_do_vm_shm_fault(vmf); } -static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int drm_vm_dma_fault(struct vm_fault *vmf) { - return drm_do_vm_dma_fault(vma, vmf); + return drm_do_vm_dma_fault(vmf); } -static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int drm_vm_sg_fault(struct vm_fault *vmf) { - return drm_do_vm_sg_fault(vma, vmf); + return drm_do_vm_sg_fault(vmf); } /** AGP virtual memory operations */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index c255eda40526..e41f38667c1c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -73,7 +73,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file); int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma); -int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); +int etnaviv_gem_fault(struct vm_fault *vmf); int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset); struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj); void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index aa6e35ddc87f..e78f1406885d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -175,8 +175,9 @@ int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma) return obj->ops->mmap(obj, vma); } -int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +int etnaviv_gem_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj = vma->vm_private_data; struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); struct page **pages, *page; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 57b81460fec8..4c28f7ffcc4d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -447,8 +447,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, return ret; } -int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +int exynos_drm_gem_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj = vma->vm_private_data; struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); unsigned long pfn; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index df7c543d6558..85457255fcd1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -116,7 +116,7 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, uint64_t *offset); /* page fault handler and mmap fault address(virtual) to physical memory. */ -int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); +int exynos_drm_gem_fault(struct vm_fault *vmf); /* set vm_flags and we can change the vm attribute to other one at here. */ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index da42d2e1d397..ffe6b4ffa1a8 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -111,8 +111,9 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info) return 0; } -static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int psbfb_vm_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct psb_framebuffer *psbfb = vma->vm_private_data; struct drm_device *dev = psbfb->base.dev; struct drm_psb_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index 527c62917660..7da061aab729 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -164,8 +164,9 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, * vma->vm_private_data points to the GEM object that is backing this * mapping. */ -int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +int psb_gem_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj; struct gtt_range *r; int ret; diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 05d7aaf47eea..83e22fd4cfc0 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -752,7 +752,7 @@ extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset); -extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); +extern int psb_gem_fault(struct vm_fault *vmf); /* psb_device.c */ extern const struct psb_ops psb_chip_ops; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bcc81912b5e5..0a4b42d31391 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3352,7 +3352,7 @@ int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, unsigned int flags); int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); void i915_gem_resume(struct drm_i915_private *dev_priv); -int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); +int i915_gem_fault(struct vm_fault *vmf); int i915_gem_object_wait(struct drm_i915_gem_object *obj, unsigned int flags, long timeout, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 88f3628b4e29..6908123162d1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1772,7 +1772,6 @@ compute_partial_view(struct drm_i915_gem_object *obj, /** * i915_gem_fault - fault a page into the GTT - * @area: CPU VMA in question * @vmf: fault info * * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped @@ -1789,9 +1788,10 @@ compute_partial_view(struct drm_i915_gem_object *obj, * The current feature set supported by i915_gem_fault() and thus GTT mmaps * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version). */ -int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) +int i915_gem_fault(struct vm_fault *vmf) { #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */ + struct vm_area_struct *area = vmf->vma; struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index cdd7b2f8e977..c3b14876edaa 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -206,7 +206,7 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev); int msm_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma); int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); -int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); +int msm_gem_fault(struct vm_fault *vmf); uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, uint64_t *iova); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index e140b05af134..59811f29607d 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -191,8 +191,9 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma) return msm_gem_mmap_obj(vma->vm_private_data, vma); } -int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +int msm_gem_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj = vma->vm_private_data; struct drm_device *dev = obj->dev; struct msm_drm_private *priv = dev->dev_private; diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 36d93ce84a29..65977982f15f 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -188,7 +188,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma); int omap_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma); -int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); +int omap_gem_fault(struct vm_fault *vmf); int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op); int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op); int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op); diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 74a9968df421..5d5a9f517c30 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -518,7 +518,6 @@ static int fault_2d(struct drm_gem_object *obj, /** * omap_gem_fault - pagefault handler for GEM objects - * @vma: the VMA of the GEM object * @vmf: fault detail * * Invoked when a fault occurs on an mmap of a GEM managed area. GEM @@ -529,8 +528,9 @@ static int fault_2d(struct drm_gem_object *obj, * vma->vm_private_data points to the GEM object that is backing this * mapping. */ -int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +int omap_gem_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj = vma->vm_private_data; struct omap_gem_object *omap_obj = to_omap_bo(obj); struct drm_device *dev = obj->dev; diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 4e1a40389964..7d1cab57c89e 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -105,15 +105,15 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev) static struct vm_operations_struct qxl_ttm_vm_ops; static const struct vm_operations_struct *ttm_vm_ops; -static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int qxl_ttm_fault(struct vm_fault *vmf) { struct ttm_buffer_object *bo; int r; - bo = (struct ttm_buffer_object *)vma->vm_private_data; + bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data; if (bo == NULL) return VM_FAULT_NOPAGE; - r = ttm_vm_ops->fault(vma, vmf); + r = ttm_vm_ops->fault(vmf); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 7a10b3852970..684f1703aa5c 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -979,19 +979,19 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) static struct vm_operations_struct radeon_ttm_vm_ops; static const struct vm_operations_struct *ttm_vm_ops = NULL; -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int radeon_ttm_fault(struct vm_fault *vmf) { struct ttm_buffer_object *bo; struct radeon_device *rdev; int r; - bo = (struct ttm_buffer_object *)vma->vm_private_data; + bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data; if (bo == NULL) { return VM_FAULT_NOPAGE; } rdev = radeon_get_rdev(bo->bdev); down_read(&rdev->pm.mclk_lock); - r = ttm_vm_ops->fault(vma, vmf); + r = ttm_vm_ops->fault(vmf); up_read(&rdev->pm.mclk_lock); return r; } diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index b523a5d4a38c..17e62ecb5d4d 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -441,8 +441,9 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, return 0; } -static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int tegra_bo_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *gem = vma->vm_private_data; struct tegra_bo *bo = to_tegra_bo(gem); struct page *page; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 88169141bef5..35ffb3754feb 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -43,7 +43,6 @@ #define TTM_BO_VM_NUM_PREFAULT 16 static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, - struct vm_area_struct *vma, struct vm_fault *vmf) { int ret = 0; @@ -67,7 +66,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, goto out_unlock; ttm_bo_reference(bo); - up_read(&vma->vm_mm->mmap_sem); + up_read(&vmf->vma->vm_mm->mmap_sem); (void) dma_fence_wait(bo->moving, true); ttm_bo_unreserve(bo); ttm_bo_unref(&bo); @@ -92,8 +91,9 @@ out_unlock: return ret; } -static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int ttm_bo_vm_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct ttm_buffer_object *bo = (struct ttm_buffer_object *) vma->vm_private_data; struct ttm_bo_device *bdev = bo->bdev; @@ -124,7 +124,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) { if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { ttm_bo_reference(bo); - up_read(&vma->vm_mm->mmap_sem); + up_read(&vmf->vma->vm_mm->mmap_sem); (void) ttm_bo_wait_unreserved(bo); ttm_bo_unref(&bo); } @@ -168,7 +168,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) * Wait for buffer data in transit, due to a pipelined * move. */ - ret = ttm_bo_vm_fault_idle(bo, vma, vmf); + ret = ttm_bo_vm_fault_idle(bo, vmf); if (unlikely(ret != 0)) { retval = ret; diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index 6c4286e57362..2a75ab80527a 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -134,7 +134,7 @@ void udl_gem_put_pages(struct udl_gem_object *obj); int udl_gem_vmap(struct udl_gem_object *obj); void udl_gem_vunmap(struct udl_gem_object *obj); int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); -int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); +int udl_gem_fault(struct vm_fault *vmf); int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, int width, int height); diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 3c0c4bd3f750..775c50e4f02c 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -100,8 +100,9 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) return ret; } -int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +int udl_gem_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data); struct page *page; unsigned int page_offset; diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 477e07f0ecb6..7ccbb03e98de 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -50,8 +50,9 @@ static void vgem_gem_free_object(struct drm_gem_object *obj) kfree(vgem_obj); } -static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int vgem_gem_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct drm_vgem_gem_object *obj = vma->vm_private_data; /* We don't use vmf->pgoff since that has the fake offset */ unsigned long vaddr = vmf->address; diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c index 9cc7079f7aca..70ec8ca8d9b1 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c +++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c @@ -114,18 +114,17 @@ static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev) static struct vm_operations_struct virtio_gpu_ttm_vm_ops; static const struct vm_operations_struct *ttm_vm_ops; -static int virtio_gpu_ttm_fault(struct vm_area_struct *vma, - struct vm_fault *vmf) +static int virtio_gpu_ttm_fault(struct vm_fault *vmf) { struct ttm_buffer_object *bo; struct virtio_gpu_device *vgdev; int r; - bo = (struct ttm_buffer_object *)vma->vm_private_data; + bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data; if (bo == NULL) return VM_FAULT_NOPAGE; vgdev = virtio_gpu_get_vgdev(bo->bdev); - r = ttm_vm_ops->fault(vma, vmf); + r = ttm_vm_ops->fault(vmf); return r; } #endif diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c index 3deef6cc7d7c..7175e6bedf21 100644 --- a/drivers/hsi/clients/cmt_speech.c +++ b/drivers/hsi/clients/cmt_speech.c @@ -1098,9 +1098,9 @@ static void cs_hsi_stop(struct cs_hsi_iface *hi) kfree(hi); } -static int cs_char_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int cs_char_vma_fault(struct vm_fault *vmf) { - struct cs_char *csdata = vma->vm_private_data; + struct cs_char *csdata = vmf->vma->vm_private_data; struct page *page; page = virt_to_page(csdata->mmap_base); diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index e8d55a153a65..e88afe1a435c 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -1188,9 +1188,9 @@ static void msc_mmap_close(struct vm_area_struct *vma) mutex_unlock(&msc->buf_mutex); } -static int msc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int msc_mmap_fault(struct vm_fault *vmf) { - struct msc_iter *iter = vma->vm_file->private_data; + struct msc_iter *iter = vmf->vma->vm_file->private_data; struct msc *msc = iter->msc; vmf->page = msc_buffer_get_page(msc, vmf->pgoff); @@ -1198,7 +1198,7 @@ static int msc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_SIGBUS; get_page(vmf->page); - vmf->page->mapping = vma->vm_file->f_mapping; + vmf->page->mapping = vmf->vma->vm_file->f_mapping; vmf->page->index = vmf->pgoff; return 0; diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index bd786b7bd30b..f46033984d07 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -92,7 +92,7 @@ static unsigned int poll_next(struct file *, struct poll_table_struct *); static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long); static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16); static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int); -static int vma_fault(struct vm_area_struct *, struct vm_fault *); +static int vma_fault(struct vm_fault *); static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); @@ -695,7 +695,7 @@ done: * Local (non-chip) user memory is not mapped right away but as it is * accessed by the user-level code. */ -static int vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int vma_fault(struct vm_fault *vmf) { struct page *page; diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 2d1eacf1dfed..9396c1807cc3 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -893,7 +893,7 @@ bail: /* * qib_file_vma_fault - handle a VMA page fault. */ -static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int qib_file_vma_fault(struct vm_fault *vmf) { struct page *page; diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index ba63ca57ed7e..36bd904946bd 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -434,8 +434,9 @@ static void videobuf_vm_close(struct vm_area_struct *vma) * now ...). Bounce buffers don't work very well for the data rates * video capture has. */ -static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int videobuf_vm_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct page *page; dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n", diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 3907387b6d15..062bf6ca2625 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -121,8 +121,9 @@ void cxl_context_set_mapping(struct cxl_context *ctx, mutex_unlock(&ctx->mapping_lock); } -static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int cxl_mmap_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct cxl_context *ctx = vma->vm_file->private_data; u64 area, offset; diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index af2e077da4b8..3641f1334cf0 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c @@ -926,8 +926,9 @@ again: * * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries. */ -int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +int gru_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct gru_thread_state *gts; unsigned long paddr, vaddr; unsigned long expires; diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index 5c3ce2459675..b5e308b50ed1 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h @@ -665,7 +665,7 @@ extern unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count, char *cbmap); extern unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count, char *dsmap); -extern int gru_fault(struct vm_area_struct *, struct vm_fault *vmf); +extern int gru_fault(struct vm_fault *vmf); extern struct gru_mm_struct *gru_register_mmu_notifier(void); extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms); diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index 90869cee2b20..ef5bf55f08a4 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -1053,7 +1053,6 @@ out: /** * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor - * @vma: VM area associated with mapping. * @vmf: VM fault associated with current fault. * * To support error notification via MMIO, faults are 'caught' by this routine @@ -1067,8 +1066,9 @@ out: * * Return: 0 on success, VM_FAULT_SIGBUS on failure */ -static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int cxlflash_mmap_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct file *file = vma->vm_file; struct cxl_context *ctx = cxl_fops_get_context(file); struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, @@ -1097,7 +1097,7 @@ static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (likely(!ctxi->err_recovery_active)) { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - rc = ctxi->cxl_mmap_vmops->fault(vma, vmf); + rc = ctxi->cxl_mmap_vmops->fault(vmf); } else { dev_dbg(dev, "%s: err recovery active, use err_page\n", __func__); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index e831e01f9fa6..29b86505f796 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1185,8 +1185,9 @@ sg_fasync(int fd, struct file *filp, int mode) } static int -sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +sg_vma_fault(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; Sg_fd *sfp; unsigned long offset, len, sa; Sg_scatter_hold *rsv_schp; diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 969600779e44..2c3ffbcbd621 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -870,9 +870,9 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer, mutex_unlock(&buffer->lock); } -static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int ion_vm_fault(struct vm_fault *vmf) { - struct ion_buffer *buffer = vma->vm_private_data; + struct ion_buffer *buffer = vmf->vma->vm_private_data; unsigned long pfn; int ret; @@ -881,7 +881,7 @@ static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); - ret = vm_insert_pfn(vma, vmf->address, pfn); + ret = vm_insert_pfn(vmf->vma, vmf->address, pfn); mutex_unlock(&buffer->lock); if (ret) return VM_FAULT_ERROR; diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c index 9afa6bec3e6f..896196c74cd2 100644 --- a/drivers/staging/lustre/lustre/llite/llite_mmap.c +++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c @@ -321,7 +321,7 @@ out: return fault_ret; } -static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int ll_fault(struct vm_fault *vmf) { int count = 0; bool printed = false; @@ -335,7 +335,7 @@ static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf) set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM)); restart: - result = ll_fault0(vma, vmf); + result = ll_fault0(vmf->vma, vmf); LASSERT(!(result & VM_FAULT_LOCKED)); if (result == 0) { struct page *vmpage = vmf->page; @@ -362,8 +362,9 @@ restart: return result; } -static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) +static int ll_page_mkwrite(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; int count = 0; bool printed = false; bool retry; diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c index 3e9cf710501b..4c57755e06e7 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_io.c +++ b/drivers/staging/lustre/lustre/llite/vvp_io.c @@ -1014,7 +1014,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) { struct vm_fault *vmf = cfio->ft_vmf; - cfio->ft_flags = filemap_fault(cfio->ft_vma, vmf); + cfio->ft_flags = filemap_fault(vmf); cfio->ft_flags_valid = 1; if (vmf->page) { diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 8041710b6972..5c1cb2df3a54 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -783,15 +783,15 @@ static int tcmu_find_mem_index(struct vm_area_struct *vma) return -1; } -static int tcmu_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int tcmu_vma_fault(struct vm_fault *vmf) { - struct tcmu_dev *udev = vma->vm_private_data; + struct tcmu_dev *udev = vmf->vma->vm_private_data; struct uio_info *info = &udev->uio_info; struct page *page; unsigned long offset; void *addr; - int mi = tcmu_find_mem_index(vma); + int mi = tcmu_find_mem_index(vmf->vma); if (mi < 0) return VM_FAULT_SIGBUS; diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index fba021f5736a..31d95dc9c202 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -597,14 +597,14 @@ static int uio_find_mem_index(struct vm_area_struct *vma) return -1; } -static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int uio_vma_fault(struct vm_fault *vmf) { - struct uio_device *idev = vma->vm_private_data; + struct uio_device *idev = vmf->vma->vm_private_data; struct page *page; unsigned long offset; void *addr; - int mi = uio_find_mem_index(vma); + int mi = uio_find_mem_index(vmf->vma); if (mi < 0) return VM_FAULT_SIGBUS; diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index 91c22276c03b..9fb8b1e6ecc2 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -1223,9 +1223,9 @@ static void mon_bin_vma_close(struct vm_area_struct *vma) /* * Map ring pages to user space. */ -static int mon_bin_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int mon_bin_vma_fault(struct vm_fault *vmf) { - struct mon_reader_bin *rp = vma->vm_private_data; + struct mon_reader_bin *rp = vmf->vma->vm_private_data; unsigned long offset, chunk_idx; struct page *pageptr; diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index 74b5bcac8bf2..37f69c061210 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -37,12 +37,11 @@ static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs } /* this is to find and return the vmalloc-ed fb pages */ -static int fb_deferred_io_fault(struct vm_area_struct *vma, - struct vm_fault *vmf) +static int fb_deferred_io_fault(struct vm_fault *vmf) { unsigned long offset; struct page *page; - struct fb_info *info = vma->vm_private_data; + struct fb_info *info = vmf->vma->vm_private_data; offset = vmf->pgoff << PAGE_SHIFT; if (offset >= info->fix.smem_len) @@ -54,8 +53,8 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma, get_page(page); - if (vma->vm_file) - page->mapping = vma->vm_file->f_mapping; + if (vmf->vma->vm_file) + page->mapping = vmf->vma->vm_file->f_mapping; else printk(KERN_ERR "no mapping available\n"); @@ -91,11 +90,10 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); /* vm_ops->page_mkwrite handler */ -static int fb_deferred_io_mkwrite(struct vm_area_struct *vma, - struct vm_fault *vmf) +static int fb_deferred_io_mkwrite(struct vm_fault *vmf) { struct page *page = vmf->page; - struct fb_info *info = vma->vm_private_data; + struct fb_info *info = vmf->vma->vm_private_data; struct fb_deferred_io *fbdefio = info->fbdefio; struct page *cur; @@ -105,7 +103,7 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma, deferred framebuffer IO. then if userspace touches a page again, we repeat the same scheme */ - file_update_time(vma->vm_file); + file_update_time(vmf->vma->vm_file); /* protect against the workqueue changing the page list */ mutex_lock(&fbdefio->lock); diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 2077a3ac7c0c..7a92a5e1d40c 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -804,10 +804,10 @@ static void privcmd_close(struct vm_area_struct *vma) kfree(pages); } -static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int privcmd_fault(struct vm_fault *vmf) { printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", - vma, vma->vm_start, vma->vm_end, + vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end, vmf->pgoff, (void *)vmf->address); return VM_FAULT_SIGBUS; |