summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-10-19 14:55:33 +0200
committerIngo Molnar <mingo@elte.hu>2010-10-22 14:18:26 +0200
commit96681fc3c9e7d1f89ab64e5eec40b6467c97680f (patch)
tree2c161656d3f1641c4c2156652440960e3f866cf2 /arch/x86/kernel
parentf80c9e304b8e8062230b0cda2c2fdd586149c771 (diff)
downloadlinux-96681fc3c9e7d1f89ab64e5eec40b6467c97680f.tar.gz
linux-96681fc3c9e7d1f89ab64e5eec40b6467c97680f.tar.bz2
linux-96681fc3c9e7d1f89ab64e5eec40b6467c97680f.zip
perf, x86: Use NUMA aware allocations for PEBS/BTS/DS allocations
For performance reasons its best to use memory node local memory for per-cpu buffers. This logic comes from a much larger patch proposed by Stephane. Suggested-by: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Stephane Eranian <eranian@google.com> LKML-Reference: <20101019134808.514465326@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 8a7f81cbd617..b7dcd9f2b8a0 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -77,13 +77,14 @@ static void fini_debug_store_on_cpu(int cpu)
static int alloc_pebs_buffer(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+ int node = cpu_to_node(cpu);
int max, thresh = 1; /* always use a single PEBS record */
void *buffer;
if (!x86_pmu.pebs)
return 0;
- buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL);
+ buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
if (unlikely(!buffer))
return -ENOMEM;
@@ -114,13 +115,14 @@ static void release_pebs_buffer(int cpu)
static int alloc_bts_buffer(int cpu)
{
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+ int node = cpu_to_node(cpu);
int max, thresh;
void *buffer;
if (!x86_pmu.bts)
return 0;
- buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
+ buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
if (unlikely(!buffer))
return -ENOMEM;
@@ -150,9 +152,10 @@ static void release_bts_buffer(int cpu)
static int alloc_ds_buffer(int cpu)
{
+ int node = cpu_to_node(cpu);
struct debug_store *ds;
- ds = kzalloc(sizeof(*ds), GFP_KERNEL);
+ ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node);
if (unlikely(!ds))
return -ENOMEM;