summaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorGreg Thelen <gthelen@google.com>2021-11-10 18:18:14 -0800
committerPeter Zijlstra <peterz@infradead.org>2021-11-11 13:09:34 +0100
commit4716023a8f6a0f4a28047f14dd7ebdc319606b84 (patch)
treeb349726860336a253e7b136e72fc5da581a2fdc5 /kernel/events
parent5863702561e625903ec678551cb056a4b19e0b8a (diff)
downloadlinux-stable-4716023a8f6a0f4a28047f14dd7ebdc319606b84.tar.gz
linux-stable-4716023a8f6a0f4a28047f14dd7ebdc319606b84.tar.bz2
linux-stable-4716023a8f6a0f4a28047f14dd7ebdc319606b84.zip
perf/core: Avoid put_page() when GUP fails
PEBS PERF_SAMPLE_PHYS_ADDR events use perf_virt_to_phys() to convert PMU sampled virtual addresses to physical using get_user_page_fast_only() and page_to_phys(). Some get_user_page_fast_only() error cases return false, indicating no page reference, but still initialize the output page pointer with an unreferenced page. In these error cases perf_virt_to_phys() calls put_page(). This causes page reference count underflow, which can lead to unintentional page sharing. Fix perf_virt_to_phys() to only put_page() if get_user_page_fast_only() returns a referenced page. Fixes: fc7ce9c74c3ad ("perf/core, x86: Add PERF_SAMPLE_PHYS_ADDR") Signed-off-by: Greg Thelen <gthelen@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20211111021814.757086-1-gthelen@google.com
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f2253ea729a2..523106a506ee 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7154,7 +7154,6 @@ void perf_output_sample(struct perf_output_handle *handle,
static u64 perf_virt_to_phys(u64 virt)
{
u64 phys_addr = 0;
- struct page *p = NULL;
if (!virt)
return 0;
@@ -7173,14 +7172,15 @@ static u64 perf_virt_to_phys(u64 virt)
* If failed, leave phys_addr as 0.
*/
if (current->mm != NULL) {
+ struct page *p;
+
pagefault_disable();
- if (get_user_page_fast_only(virt, 0, &p))
+ if (get_user_page_fast_only(virt, 0, &p)) {
phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
+ put_page(p);
+ }
pagefault_enable();
}
-
- if (p)
- put_page(p);
}
return phys_addr;