summaryrefslogtreecommitdiffstats
path: root/tools/lib/bpf/bpf_helpers.h
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2022-09-09 16:15:11 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2022-09-09 16:24:51 +0200
commit665f5d3577ef43e929d59cf39683037887c351bf (patch)
tree50c5fe3c5eb2ca005568d84e82c9e45fe7a8dac6 /tools/lib/bpf/bpf_helpers.h
parentb239da34203f49c40b5d656220c39647c3ff0b3c (diff)
downloadlinux-stable-665f5d3577ef43e929d59cf39683037887c351bf.tar.gz
linux-stable-665f5d3577ef43e929d59cf39683037887c351bf.tar.bz2
linux-stable-665f5d3577ef43e929d59cf39683037887c351bf.zip
libbpf: Remove gcc support for bpf_tail_call_static for now
This reverts commit 14e5ce79943a ("libbpf: Add GCC support for bpf_tail_call_static"). Reason is that gcc invented their own BPF asm which is not conform with LLVM one, and going forward this would be more painful to maintain here and in other areas of the library. Thus remove it; ask to gcc folks is to align with LLVM one to use exact same syntax. Fixes: 14e5ce79943a ("libbpf: Add GCC support for bpf_tail_call_static") Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: James Hilliard <james.hilliard1@gmail.com> Cc: Jose E. Marchesi <jose.marchesi@oracle.com>
Diffstat (limited to 'tools/lib/bpf/bpf_helpers.h')
-rw-r--r--tools/lib/bpf/bpf_helpers.h19
1 files changed, 6 insertions, 13 deletions
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index 867b734839dd..7349b16b8e2f 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -131,7 +131,7 @@
/*
* Helper function to perform a tail call with a constant/immediate map slot.
*/
-#if (!defined(__clang__) || __clang_major__ >= 8) && defined(__bpf__)
+#if __clang_major__ >= 8 && defined(__bpf__)
static __always_inline void
bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
{
@@ -139,8 +139,8 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
__bpf_unreachable();
/*
- * Provide a hard guarantee that the compiler won't optimize setting r2
- * (map pointer) and r3 (constant map index) from _different paths_ ending
+ * Provide a hard guarantee that LLVM won't optimize setting r2 (map
+ * pointer) and r3 (constant map index) from _different paths_ ending
* up at the _same_ call insn as otherwise we won't be able to use the
* jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
* given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
@@ -148,19 +148,12 @@ bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
*
* Note on clobber list: we need to stay in-line with BPF calling
* convention, so even if we don't end up using r0, r4, r5, we need
- * to mark them as clobber so that the compiler doesn't end up using
- * them before / after the call.
+ * to mark them as clobber so that LLVM doesn't end up using them
+ * before / after the call.
*/
- asm volatile(
-#ifdef __clang__
- "r1 = %[ctx]\n\t"
+ asm volatile("r1 = %[ctx]\n\t"
"r2 = %[map]\n\t"
"r3 = %[slot]\n\t"
-#else
- "mov %%r1,%[ctx]\n\t"
- "mov %%r2,%[map]\n\t"
- "mov %%r3,%[slot]\n\t"
-#endif
"call 12"
:: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
: "r0", "r1", "r2", "r3", "r4", "r5");