summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorJames Clark <james.clark@arm.com>2020-11-26 16:13:18 +0200
committerArnaldo Carvalho de Melo <acme@redhat.com>2020-12-24 10:03:54 -0300
commit91585846f105ef2e3f479a5124a264ebb770f6ab (patch)
treef4cab0658779b531343f87cb1a6d47b46c8d724b /tools
parent23331eeb731a503aaa74d167055eeedc2073ff09 (diff)
downloadlinux-91585846f105ef2e3f479a5124a264ebb770f6ab.tar.gz
linux-91585846f105ef2e3f479a5124a264ebb770f6ab.tar.bz2
linux-91585846f105ef2e3f479a5124a264ebb770f6ab.zip
perf cpumap: Use existing allocator to avoid using malloc
Use the existing allocator for perf_cpu_map to avoid use of raw malloc. This could cause an issue in later commits where the size of perf_cpu_map is changed. No functional changes. Signed-off-by: James Clark <james.clark@arm.com> Acked-by: Namhyung Kim <namhyung@kernel.org> Acked-by: Jiri Olsa <jolsa@redhat.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Tested-by: John Garry <john.garry@huawei.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Richter <tmricht@linux.ibm.com> Link: https://lore.kernel.org/r/20201126141328.6509-3-james.clark@arm.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools')
-rw-r--r--tools/perf/util/cpumap.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index dc5c5e6fc502..20e3a75953fc 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -132,15 +132,16 @@ int cpu_map__build_map(struct perf_cpu_map *cpus, struct perf_cpu_map **res,
int (*f)(struct perf_cpu_map *map, int cpu, void *data),
void *data)
{
- struct perf_cpu_map *c;
int nr = cpus->nr;
+ struct perf_cpu_map *c = perf_cpu_map__empty_new(nr);
int cpu, s1, s2;
- /* allocate as much as possible */
- c = calloc(1, sizeof(*c) + nr * sizeof(int));
if (!c)
return -1;
+ /* Reset size as it may only be partially filled */
+ c->nr = 0;
+
for (cpu = 0; cpu < nr; cpu++) {
s1 = f(cpus, cpu, data);
for (s2 = 0; s2 < c->nr; s2++) {
@@ -155,7 +156,6 @@ int cpu_map__build_map(struct perf_cpu_map *cpus, struct perf_cpu_map **res,
/* ensure we process id in increasing order */
qsort(c->map, c->nr, sizeof(int), cmp_ids);
- refcount_set(&c->refcnt, 1);
*res = c;
return 0;
}