summaryrefslogtreecommitdiffstats
path: root/tools/perf
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-02-26 12:08:34 -0300
committerIngo Molnar <mingo@elte.hu>2010-02-26 16:28:45 +0100
commit4385d580f2278abab6d336e52522e9a6f5452a11 (patch)
treeab35d78343741a1130b779cd1d98f5ac0041ac37 /tools/perf
parentf22f54f4491acd987a6c5a92de52b60ca8b58b61 (diff)
downloadlinux-4385d580f2278abab6d336e52522e9a6f5452a11.tar.gz
linux-4385d580f2278abab6d336e52522e9a6f5452a11.tar.bz2
linux-4385d580f2278abab6d336e52522e9a6f5452a11.zip
perf tools: Flush maps on COMM events
Even though we don't register the counters until the child is right about to exec(), we're still going to get at least a few events while the fork()'d child is still executing 'perf' and in particular we're going to get the MMAP events. We can't distinguish the ones in the newly executed process because the PID will be the same. One way to solve this would be to have a PERF_RECORD_EXEC event, and when this is seen 'perf' can flush it's map cache. We can't use PERF_RECORD_COMM since that's generated by other things, not just exec(). Actually, thinking about it some more, using PERF_RECORD_COMM might be a good enough approximation. Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frédéric Weisbecker <fweisbec@gmail.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <1267196914-16238-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/util/thread.c34
1 files changed, 30 insertions, 4 deletions
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index c090654cb6c0..21b92162282b 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -31,15 +31,41 @@ static struct thread *thread__new(pid_t pid)
return self;
}
+static void map_groups__flush(struct map_groups *self)
+{
+ int type;
+
+ for (type = 0; type < MAP__NR_TYPES; type++) {
+ struct rb_root *root = &self->maps[type];
+ struct rb_node *next = rb_first(root);
+
+ while (next) {
+ struct map *pos = rb_entry(next, struct map, rb_node);
+ next = rb_next(&pos->rb_node);
+ rb_erase(&pos->rb_node, root);
+ /*
+ * We may have references to this map, for
+ * instance in some hist_entry instances, so
+ * just move them to a separate list.
+ */
+ list_add_tail(&pos->node, &self->removed_maps[pos->type]);
+ }
+ }
+}
+
int thread__set_comm(struct thread *self, const char *comm)
{
+ int err;
+
if (self->comm)
free(self->comm);
self->comm = strdup(comm);
- if (self->comm == NULL)
- return -ENOMEM;
- self->comm_set = true;
- return 0;
+ err = self->comm == NULL ? -ENOMEM : 0;
+ if (!err) {
+ self->comm_set = true;
+ map_groups__flush(&self->mg);
+ }
+ return err;
}
int thread__comm_len(struct thread *self)