diff options
author | Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> | 2019-11-08 19:20:22 +0300 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2019-11-08 16:14:32 -0800 |
commit | f091d5a426447cc427680bdd3adc7773aa2867df (patch) | |
tree | e255dd3806463334c58d03aff06dee6e3b3268c3 /arch/arc/include | |
parent | 2f4ecf68a048de44d72157d637bf9cbbbdb357b0 (diff) | |
download | linux-f091d5a426447cc427680bdd3adc7773aa2867df.tar.gz linux-f091d5a426447cc427680bdd3adc7773aa2867df.tar.bz2 linux-f091d5a426447cc427680bdd3adc7773aa2867df.zip |
ARC: ARCv2: jump label: implement jump label patching
Implement jump label patching for ARC. Jump labels provide
an interface to generate dynamic branches using
self-modifying code.
This allows us to implement conditional branches where
changing branch direction is expensive but branch selection
is basically 'free'
This implementation uses 32-bit NOP and BRANCH instructions
which forced to be aligned by 4 to guarantee that they don't
cross L1 cache line boundary and can be update atomically.
Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/include')
-rw-r--r-- | arch/arc/include/asm/cache.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/jump_label.h | 72 |
2 files changed, 74 insertions, 0 deletions
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index 918804c7c1a4..d8ece4292388 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -25,6 +25,8 @@ #ifndef __ASSEMBLY__ +#include <linux/build_bug.h> + /* Uncached access macros */ #define arc_read_uncached_32(ptr) \ ({ \ diff --git a/arch/arc/include/asm/jump_label.h b/arch/arc/include/asm/jump_label.h new file mode 100644 index 000000000000..9d9618079739 --- /dev/null +++ b/arch/arc/include/asm/jump_label.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARC_JUMP_LABEL_H +#define _ASM_ARC_JUMP_LABEL_H + +#ifndef __ASSEMBLY__ + +#include <linux/stringify.h> +#include <linux/types.h> + +#define JUMP_LABEL_NOP_SIZE 4 + +/* + * NOTE about '.balign 4': + * + * To make atomic update of patched instruction available we need to guarantee + * that this instruction doesn't cross L1 cache line boundary. + * + * As of today we simply align instruction which can be patched by 4 byte using + * ".balign 4" directive. In that case patched instruction is aligned with one + * 16-bit NOP_S if this is required. + * However 'align by 4' directive is much stricter than it actually required. + * It's enough that our 32-bit instruction don't cross L1 cache line boundary / + * L1 I$ fetch block boundary which can be achieved by using + * ".bundle_align_mode" assembler directive. That will save us from adding + * useless NOP_S padding in most of the cases. + * + * TODO: switch to ".bundle_align_mode" directive using whin it will be + * supported by ARC toolchain. + */ + +static __always_inline bool arch_static_branch(struct static_key *key, + bool branch) +{ + asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n" + "1: \n" + "nop \n" + ".pushsection __jump_table, \"aw\" \n" + ".word 1b, %l[l_yes], %c0 \n" + ".popsection \n" + : : "i" (&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, + bool branch) +{ + asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n" + "1: \n" + "b %l[l_yes] \n" + ".pushsection __jump_table, \"aw\" \n" + ".word 1b, %l[l_yes], %c0 \n" + ".popsection \n" + : : "i" (&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +typedef u32 jump_label_t; + +struct jump_entry { + jump_label_t code; + jump_label_t target; + jump_label_t key; +}; + +#endif /* __ASSEMBLY__ */ +#endif |