summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-03-03 16:15:39 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-03-24 20:51:49 -0400
commit42c269c88dc146982a54a8267f71abc99f12852a (patch)
tree2fecaaec16fe65febc290be5d67b396d0d0f75e3 /kernel/trace
parentdbeafd0d6131d0f6ae8cd7551f5f4bf8c54aa49a (diff)
downloadlinux-42c269c88dc146982a54a8267f71abc99f12852a.tar.gz
linux-42c269c88dc146982a54a8267f71abc99f12852a.tar.bz2
linux-42c269c88dc146982a54a8267f71abc99f12852a.zip
ftrace: Allow for function tracing to record init functions on boot up
Adding a hook into free_reserve_area() that informs ftrace that boot up init text is being free, lets ftrace safely remove those init functions from its records, which keeps ftrace from trying to modify text that no longer exists. Note, this still does not allow for tracing .init text of modules, as modules require different work for freeing its init code. Link: http://lkml.kernel.org/r/1488502497.7212.24.camel@linux.intel.com Cc: linux-mm@kvack.org Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Peter Zijlstra <peterz@infradead.org> Requested-by: Todd Brandt <todd.e.brandt@linux.intel.com> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c44
1 files changed, 44 insertions, 0 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b9691ee8f6c1..0556a202c055 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -5262,6 +5262,50 @@ void ftrace_module_init(struct module *mod)
}
#endif /* CONFIG_MODULES */
+void ftrace_free_mem(void *start_ptr, void *end_ptr)
+{
+ unsigned long start = (unsigned long)start_ptr;
+ unsigned long end = (unsigned long)end_ptr;
+ struct ftrace_page **last_pg = &ftrace_pages_start;
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec;
+ struct dyn_ftrace key;
+ int order;
+
+ key.ip = start;
+ key.flags = end; /* overload flags, as it is unsigned long */
+
+ mutex_lock(&ftrace_lock);
+
+ for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
+ if (end < pg->records[0].ip ||
+ start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
+ continue;
+ again:
+ rec = bsearch(&key, pg->records, pg->index,
+ sizeof(struct dyn_ftrace),
+ ftrace_cmp_recs);
+ if (!rec)
+ continue;
+ pg->index--;
+ if (!pg->index) {
+ *last_pg = pg->next;
+ order = get_count_order(pg->size / ENTRIES_PER_PAGE);
+ free_pages((unsigned long)pg->records, order);
+ kfree(pg);
+ pg = container_of(last_pg, struct ftrace_page, next);
+ if (!(*last_pg))
+ ftrace_pages = pg;
+ continue;
+ }
+ memmove(rec, rec + 1,
+ (pg->index - (rec - pg->records)) * sizeof(*rec));
+ /* More than one function may be in this block */
+ goto again;
+ }
+ mutex_unlock(&ftrace_lock);
+}
+
void __init ftrace_init(void)
{
extern unsigned long __start_mcount_loc[];