summaryrefslogtreecommitdiffstats
path: root/kernel/trace/bpf_trace.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2020-07-01 08:22:09 -0700
committerAlexei Starovoitov <ast@kernel.org>2020-07-01 08:26:22 -0700
commit64f0013c073a0162a6bd31b33def10cb2b2fe6e0 (patch)
treea29d6395f179a6937340702516a4a2d5bd066e0e /kernel/trace/bpf_trace.c
parentbba1dc0b55ac462d24ed1228ad49800c238cd6d7 (diff)
parentc7568114bc56cf3ec0bd9eb117bbe7cad3d30e11 (diff)
downloadlinux-stable-64f0013c073a0162a6bd31b33def10cb2b2fe6e0.tar.gz
linux-stable-64f0013c073a0162a6bd31b33def10cb2b2fe6e0.tar.bz2
linux-stable-64f0013c073a0162a6bd31b33def10cb2b2fe6e0.zip
Merge branch 'bpf_get_task_stack'
Song Liu says: ==================== This set introduces a new helper bpf_get_task_stack(). The primary use case is to dump all /proc/*/stack to seq_file via bpf_iter__task. A few different approaches have been explored and compared: 1. A simple wrapper around stack_trace_save_tsk(), as v1 [1]. This approach introduces new syntax, which is different to existing helper bpf_get_stack(). Therefore, this is not ideal. 2. Extend get_perf_callchain() to support "task" as argument. This approach reuses most of bpf_get_stack(). However, extending get_perf_callchain() requires non-trivial changes to architecture specific code. Which is error prone. 3. Current (v2) approach, leverages most of existing bpf_get_stack(), and uses stack_trace_save_tsk() to handle architecture specific logic. [1] https://lore.kernel.org/netdev/20200623070802.2310018-1-songliubraving@fb.com/ Changes v4 => v5: 1. Rebase and work around git-am issue. (Alexei) 2. Update commit log for 4/4. (Yonghong) Changes v3 => v4: 1. Simplify the selftests with bpf_iter.h. (Yonghong) 2. Add example output to commit log of 4/4. (Yonghong) Changes v2 => v3: 1. Rebase on top of bpf-next. (Yonghong) 2. Sanitize get_callchain_entry(). (Peter) 3. Use has_callchain_buf for bpf_get_task_stack. (Andrii) 4. Other small clean up. (Yonghong, Andrii). Changes v1 => v2: 1. Reuse most of bpf_get_stack() logic. (Andrii) 2. Fix unsigned long vs. u64 mismatch for 32-bit systems. (Yonghong) 3. Add %pB support in bpf_trace_printk(). (Daniel) 4. Fix buffer size to bytes. ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/trace/bpf_trace.c')
-rw-r--r--kernel/trace/bpf_trace.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 5d59dda5f661..1d874d8e4384 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -376,7 +376,7 @@ static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
/*
* Only limited trace_printk() conversion specifiers allowed:
- * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pks %pus %s
+ * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s
*/
BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
u64, arg2, u64, arg3)
@@ -420,6 +420,11 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
goto fmt_str;
}
+ if (fmt[i + 1] == 'B') {
+ i++;
+ goto fmt_next;
+ }
+
/* disallow any further format extensions */
if (fmt[i + 1] != 0 &&
!isspace(fmt[i + 1]) &&
@@ -636,7 +641,8 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
if (fmt[i] == 'p') {
if (fmt[i + 1] == 0 ||
fmt[i + 1] == 'K' ||
- fmt[i + 1] == 'x') {
+ fmt[i + 1] == 'x' ||
+ fmt[i + 1] == 'B') {
/* just kernel pointers */
params[fmt_cnt] = args[fmt_cnt];
fmt_cnt++;
@@ -1137,6 +1143,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_ringbuf_query_proto;
case BPF_FUNC_jiffies64:
return &bpf_jiffies64_proto;
+ case BPF_FUNC_get_task_stack:
+ return &bpf_get_task_stack_proto;
default:
return NULL;
}