summaryrefslogtreecommitdiffstats
path: root/include/linux/perf_counter.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-11 08:38:42 +0100
committerIngo Molnar <mingo@elte.hu>2008-12-11 15:45:49 +0100
commit04289bb9891882202d7e961c4c04d2376930e9f9 (patch)
tree13340847915efc809a62bf91b3cd45e0e0416deb /include/linux/perf_counter.h
parent9f66a3810fe0d4100972db84290f3ae4a4d77025 (diff)
downloadlinux-stable-04289bb9891882202d7e961c4c04d2376930e9f9.tar.gz
linux-stable-04289bb9891882202d7e961c4c04d2376930e9f9.tar.bz2
linux-stable-04289bb9891882202d7e961c4c04d2376930e9f9.zip
perf counters: add support for group counters
Impact: add group counters This patch adds the "counter groups" abstraction. Groups of counters behave much like normal 'single' counters, with a few semantic and behavioral extensions on top of that. A counter group is created by creating a new counter with the open() syscall's group-leader group_fd file descriptor parameter pointing to another, already existing counter. Groups of counters are scheduled in and out in one atomic group, and they are also roundrobin-scheduled atomically. Counters that are member of a group can also record events with an (atomic) extended timestamp that extends to all members of the group, if the record type is set to PERF_RECORD_GROUP. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/perf_counter.h')
-rw-r--r--include/linux/perf_counter.h8
1 files changed, 6 insertions, 2 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index a2b4852e2d70..7af7d8965460 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -117,7 +117,10 @@ struct perf_data {
* struct perf_counter - performance counter kernel representation:
*/
struct perf_counter {
- struct list_head list;
+ struct list_head list_entry;
+ struct list_head sibling_list;
+ struct perf_counter *group_leader;
+
int active;
#if BITS_PER_LONG == 64
atomic64_t count;
@@ -158,7 +161,8 @@ struct perf_counter_context {
* Protect the list of counters:
*/
spinlock_t lock;
- struct list_head counters;
+
+ struct list_head counter_list;
int nr_counters;
int nr_active;
struct task_struct *task;