summaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorStephane Eranian <eranian@google.com>2013-07-05 00:30:11 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-06-11 12:03:27 -0700
commita4a108e8d7fba75a2dbfbe0818a3a4dec79d6d57 (patch)
tree1534c7881409c6c106a70d7c0d4ccc62bc35f461 /kernel/events
parent3cd49fd7da79541a1e87bfa5750f5a939c6626df (diff)
downloadlinux-stable-a4a108e8d7fba75a2dbfbe0818a3a4dec79d6d57.tar.gz
linux-stable-a4a108e8d7fba75a2dbfbe0818a3a4dec79d6d57.tar.bz2
linux-stable-a4a108e8d7fba75a2dbfbe0818a3a4dec79d6d57.zip
perf: Fix interrupt handler timing harness
commit e5302920da9ef23f9d19d4e9ac85704cc25bee7a upstream. This patch fixes a serious bug in: 14c63f17b1fd perf: Drop sample rate when sampling is too slow There was an misunderstanding on the API of the do_div() macro. It returns the remainder of the division and this was not what the function expected leading to disabling the interrupt latency watchdog. This patch also remove a duplicate assignment in perf_sample_event_took(). Signed-off-by: Stephane Eranian <eranian@google.com> Cc: peterz@infradead.org Cc: dave.hansen@linux.intel.com Cc: ak@linux.intel.com Cc: jolsa@redhat.com Link: http://lkml.kernel.org/r/20130704223010.GA30625@quad Signed-off-by: Ingo Molnar <mingo@kernel.org> Cc: Weng Meiling <wengmeiling.weng@huawei.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index c0f6d9b7d78a..bffea4900db1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -182,7 +182,7 @@ void update_perf_cpu_limits(void)
u64 tmp = perf_sample_period_ns;
tmp *= sysctl_perf_cpu_time_max_percent;
- tmp = do_div(tmp, 100);
+ do_div(tmp, 100);
atomic_set(&perf_sample_allowed_ns, tmp);
}
@@ -230,7 +230,7 @@ DEFINE_PER_CPU(u64, running_sample_length);
void perf_sample_event_took(u64 sample_len_ns)
{
u64 avg_local_sample_len;
- u64 local_samples_len = __get_cpu_var(running_sample_length);
+ u64 local_samples_len;
if (atomic_read(&perf_sample_allowed_ns) == 0)
return;