summaryrefslogtreecommitdiffstats
path: root/mm/hmm.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-03-16 20:32:16 +0100
committerJason Gunthorpe <jgg@mellanox.com>2020-03-26 14:33:38 -0300
commit08ddddda667b3b7aaac10641418283f78118c5cd (patch)
tree55b987ad250ce79a5e0dc6a0a0d6e91c289dec33 /mm/hmm.c
parent17ffdc482982af92bddb59692af1c5e1de23d184 (diff)
downloadlinux-stable-08ddddda667b3b7aaac10641418283f78118c5cd.tar.gz
linux-stable-08ddddda667b3b7aaac10641418283f78118c5cd.tar.bz2
linux-stable-08ddddda667b3b7aaac10641418283f78118c5cd.zip
mm/hmm: check the device private page owner in hmm_range_fault()
hmm_range_fault() will succeed for any kind of device private memory, even if it doesn't belong to the calling entity. While nouveau has some crude checks for that, they are broken because they assume nouveau is the only user of device private memory. Fix this by passing in an expected pgmap owner in the hmm_range_fault structure. If a device_private page is found and doesn't match the owner then it is treated as an non-present and non-faultable page. This prevents a bug in amdgpu, where it doesn't know how to handle device_private pages, but hmm_range_fault would return them anyhow. Fixes: 4ef589dc9b10 ("mm/hmm/devmem: device memory hotplug using ZONE_DEVICE") Link: https://lore.kernel.org/r/20200316193216.920734-5-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Reviewed-by: Ralph Campbell <rcampbell@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'mm/hmm.c')
-rw-r--r--mm/hmm.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index 613b34950c30..a491d9aaafe4 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -218,6 +218,14 @@ int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
unsigned long end, uint64_t *pfns, pmd_t pmd);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline bool hmm_is_device_private_entry(struct hmm_range *range,
+ swp_entry_t entry)
+{
+ return is_device_private_entry(entry) &&
+ device_private_entry_to_page(entry)->pgmap->owner ==
+ range->dev_private_owner;
+}
+
static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
{
if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
@@ -256,7 +264,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
* Never fault in device private pages pages, but just report
* the PFN even if not present.
*/
- if (is_device_private_entry(entry)) {
+ if (hmm_is_device_private_entry(range, entry)) {
*pfn = hmm_device_entry_from_pfn(range,
swp_offset(entry));
*pfn |= range->flags[HMM_PFN_VALID];