summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDouglas Anderson <dianders@chromium.org>2019-03-19 10:12:06 -0700
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2019-05-02 21:32:55 -0400
commit03197fc02b356606355d7ede343b18e3e3737771 (patch)
tree5641f5a8bc09f64d7c3c39d0d74e76a55a176107 /kernel
parentecffc8a8c7301f6f3c731ba23e38cd049a046416 (diff)
downloadlinux-03197fc02b356606355d7ede343b18e3e3737771.tar.gz
linux-03197fc02b356606355d7ede343b18e3e3737771.tar.bz2
linux-03197fc02b356606355d7ede343b18e3e3737771.zip
tracing: kdb: Allow ftdump to skip all but the last few entries
The 'ftdump' command in kdb is currently a bit of a last resort, at least if you have lots of traces turned on. It's going to print a whole boatload of data out your serial port which is probably running at 115200. This could easily take many, many minutes. Usually you're most interested in what's at the _end_ of the ftrace buffer, AKA what happened most recently. That means you've got to wait the full time for the dump. The 'ftdump' command does attempt to help you a little bit by allowing you to skip a fixed number of entries. Unfortunately it provides no way for you to know how many entries you should skip. Let's do similar to python and allow you to use a negative number to indicate that you want to skip all entries except the last few. This allows you to quickly see what you want. Note that we also change the printout in ftdump to print the (positive) number of entries actually skipped since that could be helpful to know when you've specified a negative skip count. Link: http://lkml.kernel.org/r/20190319171206.97107-3-dianders@chromium.org Signed-off-by: Douglas Anderson <dianders@chromium.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace_kdb.c45
1 files changed, 31 insertions, 14 deletions
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index 4b666643d69f..6c1ae6b752d1 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -17,29 +17,25 @@
#include "trace.h"
#include "trace_output.h"
+static struct trace_iterator iter;
+static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
+
static void ftrace_dump_buf(int skip_entries, long cpu_file)
{
- /* use static because iter can be a bit big for the stack */
- static struct trace_iterator iter;
- static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
struct trace_array *tr;
unsigned int old_userobj;
int cnt = 0, cpu;
- trace_init_global_iter(&iter);
- iter.buffer_iter = buffer_iter;
tr = iter.tr;
- for_each_tracing_cpu(cpu) {
- atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
- }
-
old_userobj = tr->trace_flags;
/* don't look at user memory in panic mode */
tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
kdb_printf("Dumping ftrace buffer:\n");
+ if (skip_entries)
+ kdb_printf("(skipping %d entries)\n", skip_entries);
/* reset all but tr, trace, and overruns */
memset(&iter.seq, 0,
@@ -90,10 +86,6 @@ out:
tr->trace_flags = old_userobj;
for_each_tracing_cpu(cpu) {
- atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
- }
-
- for_each_tracing_cpu(cpu) {
if (iter.buffer_iter[cpu]) {
ring_buffer_read_finish(iter.buffer_iter[cpu]);
iter.buffer_iter[cpu] = NULL;
@@ -109,6 +101,8 @@ static int kdb_ftdump(int argc, const char **argv)
int skip_entries = 0;
long cpu_file;
char *cp;
+ int cnt;
+ int cpu;
if (argc > 2)
return KDB_ARGCOUNT;
@@ -129,7 +123,29 @@ static int kdb_ftdump(int argc, const char **argv)
}
kdb_trap_printk++;
+
+ trace_init_global_iter(&iter);
+ iter.buffer_iter = buffer_iter;
+
+ for_each_tracing_cpu(cpu) {
+ atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
+ }
+
+ /* A negative skip_entries means skip all but the last entries */
+ if (skip_entries < 0) {
+ if (cpu_file == RING_BUFFER_ALL_CPUS)
+ cnt = trace_total_entries(NULL);
+ else
+ cnt = trace_total_entries_cpu(NULL, cpu_file);
+ skip_entries = max(cnt + skip_entries, 0);
+ }
+
ftrace_dump_buf(skip_entries, cpu_file);
+
+ for_each_tracing_cpu(cpu) {
+ atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
+ }
+
kdb_trap_printk--;
return 0;
@@ -138,7 +154,8 @@ static int kdb_ftdump(int argc, const char **argv)
static __init int kdb_ftrace_register(void)
{
kdb_register_flags("ftdump", kdb_ftdump, "[skip_#entries] [cpu]",
- "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
+ "Dump ftrace log; -skip dumps last #entries", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
return 0;
}