summaryrefslogtreecommitdiffstats
path: root/tools/perf/util/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/mmap.c')
-rw-r--r--tools/perf/util/mmap.c185
1 files changed, 93 insertions, 92 deletions
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 33c5b5495482..a35dc57d5995 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -12,6 +12,7 @@
#include <linux/zalloc.h>
#include <stdlib.h>
#include <string.h>
+#include <unistd.h> // sysconf()
#ifdef HAVE_LIBNUMA_SUPPORT
#include <numaif.h>
#endif
@@ -20,25 +21,25 @@
#include "event.h"
#include "mmap.h"
#include "../perf.h"
-#include "util.h" /* page_size */
+#include <internal/lib.h> /* page_size */
-size_t perf_mmap__mmap_len(struct perf_mmap *map)
+size_t perf_mmap__mmap_len(struct mmap *map)
{
- return map->mask + 1 + page_size;
+ return map->core.mask + 1 + page_size;
}
/* When check_messup is true, 'end' must points to a good entry */
-static union perf_event *perf_mmap__read(struct perf_mmap *map,
+static union perf_event *perf_mmap__read(struct mmap *map,
u64 *startp, u64 end)
{
- unsigned char *data = map->base + page_size;
+ unsigned char *data = map->core.base + page_size;
union perf_event *event = NULL;
int diff = end - *startp;
if (diff >= (int)sizeof(event->header)) {
size_t size;
- event = (union perf_event *)&data[*startp & map->mask];
+ event = (union perf_event *)&data[*startp & map->core.mask];
size = event->header.size;
if (size < sizeof(event->header) || diff < (int)size)
@@ -48,20 +49,20 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
* Event straddles the mmap boundary -- header should always
* be inside due to u64 alignment of output.
*/
- if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
+ if ((*startp & map->core.mask) + size != ((*startp + size) & map->core.mask)) {
unsigned int offset = *startp;
unsigned int len = min(sizeof(*event), size), cpy;
- void *dst = map->event_copy;
+ void *dst = map->core.event_copy;
do {
- cpy = min(map->mask + 1 - (offset & map->mask), len);
- memcpy(dst, &data[offset & map->mask], cpy);
+ cpy = min(map->core.mask + 1 - (offset & map->core.mask), len);
+ memcpy(dst, &data[offset & map->core.mask], cpy);
offset += cpy;
dst += cpy;
len -= cpy;
} while (len);
- event = (union perf_event *)map->event_copy;
+ event = (union perf_event *)map->core.event_copy;
}
*startp += size;
@@ -82,55 +83,55 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
* }
* perf_mmap__read_done()
*/
-union perf_event *perf_mmap__read_event(struct perf_mmap *map)
+union perf_event *perf_mmap__read_event(struct mmap *map)
{
union perf_event *event;
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
- if (!refcount_read(&map->refcnt))
+ if (!refcount_read(&map->core.refcnt))
return NULL;
/* non-overwirte doesn't pause the ringbuffer */
- if (!map->overwrite)
- map->end = perf_mmap__read_head(map);
+ if (!map->core.overwrite)
+ map->core.end = perf_mmap__read_head(map);
- event = perf_mmap__read(map, &map->start, map->end);
+ event = perf_mmap__read(map, &map->core.start, map->core.end);
- if (!map->overwrite)
- map->prev = map->start;
+ if (!map->core.overwrite)
+ map->core.prev = map->core.start;
return event;
}
-static bool perf_mmap__empty(struct perf_mmap *map)
+static bool perf_mmap__empty(struct mmap *map)
{
- return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
+ return perf_mmap__read_head(map) == map->core.prev && !map->auxtrace_mmap.base;
}
-void perf_mmap__get(struct perf_mmap *map)
+void perf_mmap__get(struct mmap *map)
{
- refcount_inc(&map->refcnt);
+ refcount_inc(&map->core.refcnt);
}
-void perf_mmap__put(struct perf_mmap *map)
+void perf_mmap__put(struct mmap *map)
{
- BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
+ BUG_ON(map->core.base && refcount_read(&map->core.refcnt) == 0);
- if (refcount_dec_and_test(&map->refcnt))
+ if (refcount_dec_and_test(&map->core.refcnt))
perf_mmap__munmap(map);
}
-void perf_mmap__consume(struct perf_mmap *map)
+void perf_mmap__consume(struct mmap *map)
{
- if (!map->overwrite) {
- u64 old = map->prev;
+ if (!map->core.overwrite) {
+ u64 old = map->core.prev;
perf_mmap__write_tail(map, old);
}
- if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
+ if (refcount_read(&map->core.refcnt) == 1 && perf_mmap__empty(map))
perf_mmap__put(map);
}
@@ -161,13 +162,13 @@ void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __mayb
}
#ifdef HAVE_AIO_SUPPORT
-static int perf_mmap__aio_enabled(struct perf_mmap *map)
+static int perf_mmap__aio_enabled(struct mmap *map)
{
return map->aio.nr_cblocks > 0;
}
#ifdef HAVE_LIBNUMA_SUPPORT
-static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
+static int perf_mmap__aio_alloc(struct mmap *map, int idx)
{
map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
@@ -179,7 +180,7 @@ static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
return 0;
}
-static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
+static void perf_mmap__aio_free(struct mmap *map, int idx)
{
if (map->aio.data[idx]) {
munmap(map->aio.data[idx], perf_mmap__mmap_len(map));
@@ -187,7 +188,7 @@ static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
}
}
-static int perf_mmap__aio_bind(struct perf_mmap *map, int idx, int cpu, int affinity)
+static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
{
void *data;
size_t mmap_len;
@@ -207,7 +208,7 @@ static int perf_mmap__aio_bind(struct perf_mmap *map, int idx, int cpu, int affi
return 0;
}
#else /* !HAVE_LIBNUMA_SUPPORT */
-static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
+static int perf_mmap__aio_alloc(struct mmap *map, int idx)
{
map->aio.data[idx] = malloc(perf_mmap__mmap_len(map));
if (map->aio.data[idx] == NULL)
@@ -216,19 +217,19 @@ static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx)
return 0;
}
-static void perf_mmap__aio_free(struct perf_mmap *map, int idx)
+static void perf_mmap__aio_free(struct mmap *map, int idx)
{
zfree(&(map->aio.data[idx]));
}
-static int perf_mmap__aio_bind(struct perf_mmap *map __maybe_unused, int idx __maybe_unused,
+static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused,
int cpu __maybe_unused, int affinity __maybe_unused)
{
return 0;
}
#endif
-static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
+static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
{
int delta_max, i, prio, ret;
@@ -256,7 +257,7 @@ static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
pr_debug2("failed to allocate data buffer area, error %m");
return -1;
}
- ret = perf_mmap__aio_bind(map, i, map->cpu, mp->affinity);
+ ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
if (ret == -1)
return -1;
/*
@@ -282,7 +283,7 @@ static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
return 0;
}
-static void perf_mmap__aio_munmap(struct perf_mmap *map)
+static void perf_mmap__aio_munmap(struct mmap *map)
{
int i;
@@ -294,34 +295,34 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map)
zfree(&map->aio.aiocb);
}
#else /* !HAVE_AIO_SUPPORT */
-static int perf_mmap__aio_enabled(struct perf_mmap *map __maybe_unused)
+static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
{
return 0;
}
-static int perf_mmap__aio_mmap(struct perf_mmap *map __maybe_unused,
+static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused,
struct mmap_params *mp __maybe_unused)
{
return 0;
}
-static void perf_mmap__aio_munmap(struct perf_mmap *map __maybe_unused)
+static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
{
}
#endif
-void perf_mmap__munmap(struct perf_mmap *map)
+void perf_mmap__munmap(struct mmap *map)
{
perf_mmap__aio_munmap(map);
if (map->data != NULL) {
munmap(map->data, perf_mmap__mmap_len(map));
map->data = NULL;
}
- if (map->base != NULL) {
- munmap(map->base, perf_mmap__mmap_len(map));
- map->base = NULL;
- map->fd = -1;
- refcount_set(&map->refcnt, 0);
+ if (map->core.base != NULL) {
+ munmap(map->core.base, perf_mmap__mmap_len(map));
+ map->core.base = NULL;
+ map->core.fd = -1;
+ refcount_set(&map->core.refcnt, 0);
}
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
@@ -343,16 +344,16 @@ static void build_node_mask(int node, cpu_set_t *mask)
}
}
-static void perf_mmap__setup_affinity_mask(struct perf_mmap *map, struct mmap_params *mp)
+static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
{
CPU_ZERO(&map->affinity_mask);
if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
- build_node_mask(cpu__get_node(map->cpu), &map->affinity_mask);
+ build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
else if (mp->affinity == PERF_AFFINITY_CPU)
- CPU_SET(map->cpu, &map->affinity_mask);
+ CPU_SET(map->core.cpu, &map->affinity_mask);
}
-int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
+int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
{
/*
* The last one will be done at perf_mmap__consume(), so that we
@@ -367,23 +368,23 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
* evlist layer can't just drop it when filtering events in
* perf_evlist__filter_pollfd().
*/
- refcount_set(&map->refcnt, 2);
- map->prev = 0;
- map->mask = mp->mask;
- map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
+ refcount_set(&map->core.refcnt, 2);
+ map->core.prev = 0;
+ map->core.mask = mp->mask;
+ map->core.base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
MAP_SHARED, fd, 0);
- if (map->base == MAP_FAILED) {
+ if (map->core.base == MAP_FAILED) {
pr_debug2("failed to mmap perf event ring buffer, error %d\n",
errno);
- map->base = NULL;
+ map->core.base = NULL;
return -1;
}
- map->fd = fd;
- map->cpu = cpu;
+ map->core.fd = fd;
+ map->core.cpu = cpu;
perf_mmap__setup_affinity_mask(map, mp);
- map->flush = mp->flush;
+ map->core.flush = mp->flush;
map->comp_level = mp->comp_level;
@@ -399,7 +400,7 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
}
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
- &mp->auxtrace_mp, map->base, fd))
+ &mp->auxtrace_mp, map->core.base, fd))
return -1;
return perf_mmap__aio_mmap(map, mp);
@@ -440,25 +441,25 @@ static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
/*
* Report the start and end of the available data in ringbuffer
*/
-static int __perf_mmap__read_init(struct perf_mmap *md)
+static int __perf_mmap__read_init(struct mmap *md)
{
u64 head = perf_mmap__read_head(md);
- u64 old = md->prev;
- unsigned char *data = md->base + page_size;
+ u64 old = md->core.prev;
+ unsigned char *data = md->core.base + page_size;
unsigned long size;
- md->start = md->overwrite ? head : old;
- md->end = md->overwrite ? old : head;
+ md->core.start = md->core.overwrite ? head : old;
+ md->core.end = md->core.overwrite ? old : head;
- if ((md->end - md->start) < md->flush)
+ if ((md->core.end - md->core.start) < md->core.flush)
return -EAGAIN;
- size = md->end - md->start;
- if (size > (unsigned long)(md->mask) + 1) {
- if (!md->overwrite) {
+ size = md->core.end - md->core.start;
+ if (size > (unsigned long)(md->core.mask) + 1) {
+ if (!md->core.overwrite) {
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
- md->prev = head;
+ md->core.prev = head;
perf_mmap__consume(md);
return -EAGAIN;
}
@@ -467,29 +468,29 @@ static int __perf_mmap__read_init(struct perf_mmap *md)
* Backward ring buffer is full. We still have a chance to read
* most of data from it.
*/
- if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end))
+ if (overwrite_rb_find_range(data, md->core.mask, &md->core.start, &md->core.end))
return -EINVAL;
}
return 0;
}
-int perf_mmap__read_init(struct perf_mmap *map)
+int perf_mmap__read_init(struct mmap *map)
{
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
- if (!refcount_read(&map->refcnt))
+ if (!refcount_read(&map->core.refcnt))
return -ENOENT;
return __perf_mmap__read_init(map);
}
-int perf_mmap__push(struct perf_mmap *md, void *to,
- int push(struct perf_mmap *map, void *to, void *buf, size_t size))
+int perf_mmap__push(struct mmap *md, void *to,
+ int push(struct mmap *map, void *to, void *buf, size_t size))
{
u64 head = perf_mmap__read_head(md);
- unsigned char *data = md->base + page_size;
+ unsigned char *data = md->core.base + page_size;
unsigned long size;
void *buf;
int rc = 0;
@@ -498,12 +499,12 @@ int perf_mmap__push(struct perf_mmap *md, void *to,
if (rc < 0)
return (rc == -EAGAIN) ? 1 : -1;
- size = md->end - md->start;
+ size = md->core.end - md->core.start;
- if ((md->start & md->mask) + size != (md->end & md->mask)) {
- buf = &data[md->start & md->mask];
- size = md->mask + 1 - (md->start & md->mask);
- md->start += size;
+ if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) {
+ buf = &data[md->core.start & md->core.mask];
+ size = md->core.mask + 1 - (md->core.start & md->core.mask);
+ md->core.start += size;
if (push(md, to, buf, size) < 0) {
rc = -1;
@@ -511,16 +512,16 @@ int perf_mmap__push(struct perf_mmap *md, void *to,
}
}
- buf = &data[md->start & md->mask];
- size = md->end - md->start;
- md->start += size;
+ buf = &data[md->core.start & md->core.mask];
+ size = md->core.end - md->core.start;
+ md->core.start += size;
if (push(md, to, buf, size) < 0) {
rc = -1;
goto out;
}
- md->prev = head;
+ md->core.prev = head;
perf_mmap__consume(md);
out:
return rc;
@@ -529,16 +530,16 @@ out:
/*
* Mandatory for overwrite mode
* The direction of overwrite mode is backward.
- * The last perf_mmap__read() will set tail to map->prev.
- * Need to correct the map->prev to head which is the end of next read.
+ * The last perf_mmap__read() will set tail to map->core.prev.
+ * Need to correct the map->core.prev to head which is the end of next read.
*/
-void perf_mmap__read_done(struct perf_mmap *map)
+void perf_mmap__read_done(struct mmap *map)
{
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
- if (!refcount_read(&map->refcnt))
+ if (!refcount_read(&map->core.refcnt))
return;
- map->prev = perf_mmap__read_head(map);
+ map->core.prev = perf_mmap__read_head(map);
}