diff options
author | Steven Rostedt (Google) <rostedt@goodmis.org> | 2023-01-24 15:22:40 -0500 |
---|---|---|
committer | Steven Rostedt (Google) <rostedt@goodmis.org> | 2023-01-25 10:31:24 -0500 |
commit | dc513fd5321d4c15ac1f820c224b2e5220b6e14f (patch) | |
tree | 1974b68b0b73f4d6f4766ff980944e8e41012de8 /include/trace | |
parent | 92a22cea4c847b518af646a809cd662d55f9d8ac (diff) | |
download | linux-stable-dc513fd5321d4c15ac1f820c224b2e5220b6e14f.tar.gz linux-stable-dc513fd5321d4c15ac1f820c224b2e5220b6e14f.tar.bz2 linux-stable-dc513fd5321d4c15ac1f820c224b2e5220b6e14f.zip |
bpf/tracing: Use stage6 of tracing to not duplicate macros
The bpf events are created by the same macro magic as tracefs trace
events are. But to hook into bpf, it has its own code. It duplicates many
of the same macros as the tracefs macros and this is an issue because it
misses bug fixes as well as any new enhancements that come with the other
trace macros.
As the trace macros have been put into their own staging files, have bpf
take advantage of this and use the tracefs stage 6 macros that the "fast
ssign" portion of the trace event macro uses.
Link: https://lkml.kernel.org/r/20230124202515.873075730@goodmis.org
Link: https://lore.kernel.org/lkml/1671181385-5719-1-git-send-email-quic_linyyuan@quicinc.com/
Cc: bpf@vger.kernel.org
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Reported-by: Linyu Yuan <quic_linyyuan@quicinc.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Diffstat (limited to 'include/trace')
-rw-r--r-- | include/trace/bpf_probe.h | 45 |
1 files changed, 1 insertions, 44 deletions
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h index 155c495b89ea..1f7fc1fc590c 100644 --- a/include/trace/bpf_probe.h +++ b/include/trace/bpf_probe.h @@ -4,50 +4,7 @@ #ifdef CONFIG_BPF_EVENTS -#undef __entry -#define __entry entry - -#undef __get_dynamic_array -#define __get_dynamic_array(field) \ - ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) - -#undef __get_dynamic_array_len -#define __get_dynamic_array_len(field) \ - ((__entry->__data_loc_##field >> 16) & 0xffff) - -#undef __get_str -#define __get_str(field) ((char *)__get_dynamic_array(field)) - -#undef __get_bitmask -#define __get_bitmask(field) (char *)__get_dynamic_array(field) - -#undef __get_cpumask -#define __get_cpumask(field) (char *)__get_dynamic_array(field) - -#undef __get_sockaddr -#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field)) - -#undef __get_rel_dynamic_array -#define __get_rel_dynamic_array(field) \ - ((void *)(&__entry->__rel_loc_##field) + \ - sizeof(__entry->__rel_loc_##field) + \ - (__entry->__rel_loc_##field & 0xffff)) - -#undef __get_rel_dynamic_array_len -#define __get_rel_dynamic_array_len(field) \ - ((__entry->__rel_loc_##field >> 16) & 0xffff) - -#undef __get_rel_str -#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field)) - -#undef __get_rel_bitmask -#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field) - -#undef __get_rel_cpumask -#define __get_rel_cpumask(field) (char *)__get_rel_dynamic_array(field) - -#undef __get_rel_sockaddr -#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field)) +#include "stages/stage6_event_callback.h" #undef __perf_count #define __perf_count(c) (c) |