summaryrefslogtreecommitdiffstats
path: root/include/linux/hmm.h
diff options
context:
space:
mode:
authorJérôme Glisse <jglisse@redhat.com>2019-05-13 17:20:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-14 09:47:48 -0700
commit63d5066f6e5a1713d0247ef38f0add545408896b (patch)
treec84b722a21279d762e4fea9943e272a44464c25f /include/linux/hmm.h
parent023a019a9b4e90b9df8ed5be591787b5c914d74f (diff)
downloadlinux-63d5066f6e5a1713d0247ef38f0add545408896b.tar.gz
linux-63d5066f6e5a1713d0247ef38f0add545408896b.tar.bz2
linux-63d5066f6e5a1713d0247ef38f0add545408896b.zip
mm/hmm: mirror hugetlbfs (snapshoting, faulting and DMA mapping)
HMM mirror is a device driver helpers to mirror range of virtual address. It means that the process jobs running on the device can access the same virtual address as the CPU threads of that process. This patch adds support for hugetlbfs mapping (ie range of virtual address that are mmap of a hugetlbfs). [rcampbell@nvidia.com: fix initial PFN for hugetlbfs pages] Link: http://lkml.kernel.org/r/20190419233536.8080-1-rcampbell@nvidia.com Link: http://lkml.kernel.org/r/20190403193318.16478-9-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Signed-off-by: Ralph Campbell <rcampbell@nvidia.com> Reviewed-by: Ralph Campbell <rcampbell@nvidia.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Dan Carpenter <dan.carpenter@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Souptick Joarder <jrdr.linux@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/hmm.h')
-rw-r--r--include/linux/hmm.h27
1 files changed, 25 insertions, 2 deletions
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index dee2f8953b2e..e5834082de60 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -181,11 +181,32 @@ struct hmm_range {
const uint64_t *values;
uint64_t default_flags;
uint64_t pfn_flags_mask;
+ uint8_t page_shift;
uint8_t pfn_shift;
bool valid;
};
/*
+ * hmm_range_page_shift() - return the page shift for the range
+ * @range: range being queried
+ * Returns: page shift (page size = 1 << page shift) for the range
+ */
+static inline unsigned hmm_range_page_shift(const struct hmm_range *range)
+{
+ return range->page_shift;
+}
+
+/*
+ * hmm_range_page_size() - return the page size for the range
+ * @range: range being queried
+ * Returns: page size for the range in bytes
+ */
+static inline unsigned long hmm_range_page_size(const struct hmm_range *range)
+{
+ return 1UL << hmm_range_page_shift(range);
+}
+
+/*
* hmm_range_wait_until_valid() - wait for range to be valid
* @range: range affected by invalidation to wait on
* @timeout: time out for wait in ms (ie abort wait after that period of time)
@@ -424,7 +445,8 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror);
int hmm_range_register(struct hmm_range *range,
struct mm_struct *mm,
unsigned long start,
- unsigned long end);
+ unsigned long end,
+ unsigned page_shift);
void hmm_range_unregister(struct hmm_range *range);
long hmm_range_snapshot(struct hmm_range *range);
long hmm_range_fault(struct hmm_range *range, bool block);
@@ -462,7 +484,8 @@ static inline int hmm_vma_fault(struct hmm_range *range, bool block)
range->pfn_flags_mask = -1UL;
ret = hmm_range_register(range, range->vma->vm_mm,
- range->start, range->end);
+ range->start, range->end,
+ PAGE_SHIFT);
if (ret)
return (int)ret;