diff options
author | Ian Rogers <irogers@google.com> | 2023-05-26 20:43:19 -0700 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2023-06-12 18:18:14 -0300 |
commit | 232418a0b2e8b8e72dac003b19352f1b647cdb31 (patch) | |
tree | 34ee18156043df2c628632fd2aaaf5af45b5141d /tools/perf/builtin-sched.c | |
parent | e57d739334d55688bfbf161b1501426467d02c86 (diff) | |
download | linux-stable-232418a0b2e8b8e72dac003b19352f1b647cdb31.tar.gz linux-stable-232418a0b2e8b8e72dac003b19352f1b647cdb31.tar.bz2 linux-stable-232418a0b2e8b8e72dac003b19352f1b647cdb31.zip |
perf sched: Avoid large stack allocations
Commit 5ded57ac1bdb ("perf inject: Remove static variables") moved
static variables to local, however, in this case 3 MAX_CPUS (4096)
sized arrays were moved onto the stack making the stack frame quite
large. Avoid the stack usage by dynamically allocating the arrays.
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20230527034324.2597593-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-sched.c')
-rw-r--r-- | tools/perf/builtin-sched.c | 26 |
1 files changed, 22 insertions, 4 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index c9ddf73689cd..9ab300b6f131 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -193,8 +193,8 @@ struct perf_sched { * weird events, such as a task being switched away that is not current. */ struct perf_cpu max_cpu; - u32 curr_pid[MAX_CPUS]; - struct thread *curr_thread[MAX_CPUS]; + u32 *curr_pid; + struct thread **curr_thread; char next_shortname1; char next_shortname2; unsigned int replay_repeat; @@ -224,7 +224,7 @@ struct perf_sched { u64 run_avg; u64 all_runtime; u64 all_count; - u64 cpu_last_switched[MAX_CPUS]; + u64 *cpu_last_switched; struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root; struct list_head sort_list, cmp_pid; bool force; @@ -3595,7 +3595,22 @@ int cmd_sched(int argc, const char **argv) mutex_init(&sched.start_work_mutex); mutex_init(&sched.work_done_wait_mutex); - for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++) + sched.curr_thread = calloc(MAX_CPUS, sizeof(*sched.curr_thread)); + if (!sched.curr_thread) { + ret = -ENOMEM; + goto out; + } + sched.cpu_last_switched = calloc(MAX_CPUS, sizeof(*sched.cpu_last_switched)); + if (!sched.cpu_last_switched) { + ret = -ENOMEM; + goto out; + } + sched.curr_pid = malloc(MAX_CPUS * sizeof(*sched.curr_pid)); + if (!sched.curr_pid) { + ret = -ENOMEM; + goto out; + } + for (i = 0; i < MAX_CPUS; i++) sched.curr_pid[i] = -1; argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands, @@ -3664,6 +3679,9 @@ int cmd_sched(int argc, const char **argv) } out: + free(sched.curr_pid); + free(sched.cpu_last_switched); + free(sched.curr_thread); mutex_destroy(&sched.start_work_mutex); mutex_destroy(&sched.work_done_wait_mutex); |