summaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-11 11:25:05 +0200
committerIngo Molnar <mingo@elte.hu>2009-06-11 16:48:38 +0200
commitdf58ab24bf26b166874bfb18b3b5a2e0a8e63179 (patch)
tree388b2fb9d94864c9bd6d6ab9329c31760b7366ae /kernel/perf_counter.c
parent0764771dab80d7b84b9a271bee7f1b21a04a3f0c (diff)
downloadlinux-df58ab24bf26b166874bfb18b3b5a2e0a8e63179.tar.gz
linux-df58ab24bf26b166874bfb18b3b5a2e0a8e63179.tar.bz2
linux-df58ab24bf26b166874bfb18b3b5a2e0a8e63179.zip
perf_counter: Rename perf_counter_limit sysctl
Rename perf_counter_limit to perf_counter_max_sample_rate and prohibit creation of counters with a known higher sample frequency. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c27
1 files changed, 19 insertions, 8 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 63f1987c1c1c..3b2829de5590 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -44,11 +44,12 @@ static atomic_t nr_mmap_counters __read_mostly;
static atomic_t nr_comm_counters __read_mostly;
/*
- * 0 - not paranoid
- * 1 - disallow cpu counters to unpriv
- * 2 - disallow kernel profiling to unpriv
+ * perf counter paranoia level:
+ * 0 - not paranoid
+ * 1 - disallow cpu counters to unpriv
+ * 2 - disallow kernel profiling to unpriv
*/
-int sysctl_perf_counter_paranoid __read_mostly; /* do we need to be privileged */
+int sysctl_perf_counter_paranoid __read_mostly;
static inline bool perf_paranoid_cpu(void)
{
@@ -61,7 +62,11 @@ static inline bool perf_paranoid_kernel(void)
}
int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
-int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
+
+/*
+ * max perf counter sample rate
+ */
+int sysctl_perf_counter_sample_rate __read_mostly = 100000;
static atomic64_t perf_counter_id;
@@ -1244,7 +1249,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
if (interrupts == MAX_INTERRUPTS) {
perf_log_throttle(counter, 1);
counter->pmu->unthrottle(counter);
- interrupts = 2*sysctl_perf_counter_limit/HZ;
+ interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
}
if (!counter->attr.freq || !counter->attr.sample_freq)
@@ -1682,7 +1687,7 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
spin_lock_irq(&ctx->lock);
if (counter->attr.freq) {
- if (value > sysctl_perf_counter_limit) {
+ if (value > sysctl_perf_counter_sample_rate) {
ret = -EINVAL;
goto unlock;
}
@@ -2979,7 +2984,8 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
} else {
if (hwc->interrupts != MAX_INTERRUPTS) {
hwc->interrupts++;
- if (HZ * hwc->interrupts > (u64)sysctl_perf_counter_limit) {
+ if (HZ * hwc->interrupts >
+ (u64)sysctl_perf_counter_sample_rate) {
hwc->interrupts = MAX_INTERRUPTS;
perf_log_throttle(counter, 0);
ret = 1;
@@ -3639,6 +3645,11 @@ SYSCALL_DEFINE5(perf_counter_open,
return -EACCES;
}
+ if (attr.freq) {
+ if (attr.sample_freq > sysctl_perf_counter_sample_rate)
+ return -EINVAL;
+ }
+
/*
* Get the target context (task or percpu):
*/