diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-02-09 12:04:09 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-02-09 12:04:09 -0800 |
commit | ca21b9b37059ee07176028de415cc4699db259cb (patch) | |
tree | a52257ef2593ad0fe8168a09c3b5131a5fd48808 /kernel/events | |
parent | 2fbc23c738350f1a47007da7ad92ae2e4ea63951 (diff) | |
parent | 45f035748b2aa29840fec6ba01cd8e44c63034c2 (diff) | |
download | linux-ca21b9b37059ee07176028de415cc4699db259cb.tar.gz linux-ca21b9b37059ee07176028de415cc4699db259cb.tar.bz2 linux-ca21b9b37059ee07176028de415cc4699db259cb.zip |
Merge tag 'perf-urgent-2020-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Thomas Gleixner:
"A set of fixes and improvements for the perf subsystem:
Kernel fixes:
- Install cgroup events to the correct CPU context to prevent a
potential list double add
- Prevent an integer underflow in the perf mlock accounting
- Add a missing prototype for arch_perf_update_userpage()
Tooling:
- Add a missing unlock in the error path of maps__insert() in perf
maps.
- Fix the build with the latest libbfd
- Fix the perf parser so it does not delete parse event terms, which
caused a regression for using perf with the ARM CoreSight as the
sink configuration was missing due to the deletion.
- Fix the double free in the perf CPU map merging test case
- Add the missing ustring support for the perf probe command"
* tag 'perf-urgent-2020-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf maps: Add missing unlock to maps__insert() error case
perf probe: Add ustring support for perf probe command
perf: Make perf able to build with latest libbfd
perf test: Fix test case Merge cpu map
perf parse: Copy string to perf_evsel_config_term
perf parse: Refactor 'struct perf_evsel_config_term'
kernel/events: Add a missing prototype for arch_perf_update_userpage()
perf/cgroups: Install cgroup events to correct cpuctx
perf/core: Fix mlock accounting in perf_mmap()
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 17f9a4a909eb..e453589da97c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -951,9 +951,9 @@ list_update_cgroup_event(struct perf_event *event, /* * Because cgroup events are always per-cpu events, - * this will always be called from the right CPU. + * @ctx == &cpuctx->ctx. */ - cpuctx = __get_cpu_context(ctx); + cpuctx = container_of(ctx, struct perf_cpu_context, ctx); /* * Since setting cpuctx->cgrp is conditional on the current @cgrp @@ -979,7 +979,8 @@ list_update_cgroup_event(struct perf_event *event, cpuctx_entry = &cpuctx->cgrp_cpuctx_entry; if (add) - list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list)); + list_add(cpuctx_entry, + per_cpu_ptr(&cgrp_cpuctx_list, event->cpu)); else list_del(cpuctx_entry); } @@ -5916,7 +5917,15 @@ accounting: */ user_lock_limit *= num_online_cpus(); - user_locked = atomic_long_read(&user->locked_vm) + user_extra; + user_locked = atomic_long_read(&user->locked_vm); + + /* + * sysctl_perf_event_mlock may have changed, so that + * user->locked_vm > user_lock_limit + */ + if (user_locked > user_lock_limit) + user_locked = user_lock_limit; + user_locked += user_extra; if (user_locked > user_lock_limit) { /* |