diff options
author | Vishal Moola (Oracle) <vishal.moola@gmail.com> | 2024-09-14 12:41:19 -0700 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-09-17 00:58:04 -0700 |
commit | 98b74bb4d7e96b4da5ef3126511febe55b76b807 (patch) | |
tree | ad5288b180e5f24c8a98f504fb18eb356b53ef04 /mm | |
parent | 2a058ab3286d6475b2082b90c2d2182d2fea4b39 (diff) | |
download | linux-98b74bb4d7e96b4da5ef3126511febe55b76b807.tar.gz linux-98b74bb4d7e96b4da5ef3126511febe55b76b807.tar.bz2 linux-98b74bb4d7e96b4da5ef3126511febe55b76b807.zip |
mm/hugetlb.c: fix UAF of vma in hugetlb fault pathway
Syzbot reports a UAF in hugetlb_fault(). This happens because
vmf_anon_prepare() could drop the per-VMA lock and allow the current VMA
to be freed before hugetlb_vma_unlock_read() is called.
We can fix this by using a modified version of vmf_anon_prepare() that
doesn't release the VMA lock on failure, and then release it ourselves
after hugetlb_vma_unlock_read().
Link: https://lkml.kernel.org/r/20240914194243.245-2-vishal.moola@gmail.com
Fixes: 9acad7ba3e25 ("hugetlb: use vmf_anon_prepare() instead of anon_vma_prepare()")
Reported-by: syzbot+2dab93857ee95f2eeb08@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/linux-mm/00000000000067c20b06219fbc26@google.com/
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 20 |
1 files changed, 18 insertions, 2 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index aaf508be0a2b..9a3a6e2dee97 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6048,7 +6048,7 @@ retry_avoidcopy: * When the original hugepage is shared one, it does not have * anon_vma prepared. */ - ret = vmf_anon_prepare(vmf); + ret = __vmf_anon_prepare(vmf); if (unlikely(ret)) goto out_release_all; @@ -6247,7 +6247,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, } if (!(vma->vm_flags & VM_MAYSHARE)) { - ret = vmf_anon_prepare(vmf); + ret = __vmf_anon_prepare(vmf); if (unlikely(ret)) goto out; } @@ -6378,6 +6378,14 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, folio_unlock(folio); out: hugetlb_vma_unlock_read(vma); + + /* + * We must check to release the per-VMA lock. __vmf_anon_prepare() is + * the only way ret can be set to VM_FAULT_RETRY. + */ + if (unlikely(ret & VM_FAULT_RETRY)) + vma_end_read(vma); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); return ret; @@ -6599,6 +6607,14 @@ out_ptl: } out_mutex: hugetlb_vma_unlock_read(vma); + + /* + * We must check to release the per-VMA lock. __vmf_anon_prepare() in + * hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY. + */ + if (unlikely(ret & VM_FAULT_RETRY)) + vma_end_read(vma); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); /* * Generally it's safe to hold refcount during waiting page lock. But |