summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-05-07 17:22:09 -0700
committerDavid S. Miller <davem@davemloft.net>2019-05-07 17:22:09 -0700
commita9e41a529681b38087c91ebc0bb91e12f510ca2d (patch)
tree544bf5861113e2f7e676a333a1267d887c800870 /arch
parent23bfaa594002f4bba085e0a1ae3c9847b988d816 (diff)
parenta3147770bea76c8dbad73eca3a24c2118da5e719 (diff)
downloadlinux-stable-a9e41a529681b38087c91ebc0bb91e12f510ca2d.tar.gz
linux-stable-a9e41a529681b38087c91ebc0bb91e12f510ca2d.tar.bz2
linux-stable-a9e41a529681b38087c91ebc0bb91e12f510ca2d.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Minor conflict with the DSA legacy code removal. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/net/bpf_jit_comp32.c236
1 files changed, 172 insertions, 64 deletions
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index 0d9cdffce6ac..b29e82f190c7 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -117,6 +117,8 @@ static bool is_simm32(s64 value)
#define IA32_JLE 0x7E
#define IA32_JG 0x7F
+#define COND_JMP_OPCODE_INVALID (0xFF)
+
/*
* Map eBPF registers to IA32 32bit registers or stack scratch space.
*
@@ -698,19 +700,12 @@ static inline void emit_ia32_neg64(const u8 dst[], bool dstk, u8 **pprog)
STACK_VAR(dst_hi));
}
- /* xor ecx,ecx */
- EMIT2(0x31, add_2reg(0xC0, IA32_ECX, IA32_ECX));
- /* sub dreg_lo,ecx */
- EMIT2(0x2B, add_2reg(0xC0, dreg_lo, IA32_ECX));
- /* mov dreg_lo,ecx */
- EMIT2(0x89, add_2reg(0xC0, dreg_lo, IA32_ECX));
-
- /* xor ecx,ecx */
- EMIT2(0x31, add_2reg(0xC0, IA32_ECX, IA32_ECX));
- /* sbb dreg_hi,ecx */
- EMIT2(0x19, add_2reg(0xC0, dreg_hi, IA32_ECX));
- /* mov dreg_hi,ecx */
- EMIT2(0x89, add_2reg(0xC0, dreg_hi, IA32_ECX));
+ /* neg dreg_lo */
+ EMIT2(0xF7, add_1reg(0xD8, dreg_lo));
+ /* adc dreg_hi,0x0 */
+ EMIT3(0x83, add_1reg(0xD0, dreg_hi), 0x00);
+ /* neg dreg_hi */
+ EMIT2(0xF7, add_1reg(0xD8, dreg_hi));
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
@@ -1613,6 +1608,75 @@ static inline void emit_push_r64(const u8 src[], u8 **pprog)
*pprog = prog;
}
+static u8 get_cond_jmp_opcode(const u8 op, bool is_cmp_lo)
+{
+ u8 jmp_cond;
+
+ /* Convert BPF opcode to x86 */
+ switch (op) {
+ case BPF_JEQ:
+ jmp_cond = IA32_JE;
+ break;
+ case BPF_JSET:
+ case BPF_JNE:
+ jmp_cond = IA32_JNE;
+ break;
+ case BPF_JGT:
+ /* GT is unsigned '>', JA in x86 */
+ jmp_cond = IA32_JA;
+ break;
+ case BPF_JLT:
+ /* LT is unsigned '<', JB in x86 */
+ jmp_cond = IA32_JB;
+ break;
+ case BPF_JGE:
+ /* GE is unsigned '>=', JAE in x86 */
+ jmp_cond = IA32_JAE;
+ break;
+ case BPF_JLE:
+ /* LE is unsigned '<=', JBE in x86 */
+ jmp_cond = IA32_JBE;
+ break;
+ case BPF_JSGT:
+ if (!is_cmp_lo)
+ /* Signed '>', GT in x86 */
+ jmp_cond = IA32_JG;
+ else
+ /* GT is unsigned '>', JA in x86 */
+ jmp_cond = IA32_JA;
+ break;
+ case BPF_JSLT:
+ if (!is_cmp_lo)
+ /* Signed '<', LT in x86 */
+ jmp_cond = IA32_JL;
+ else
+ /* LT is unsigned '<', JB in x86 */
+ jmp_cond = IA32_JB;
+ break;
+ case BPF_JSGE:
+ if (!is_cmp_lo)
+ /* Signed '>=', GE in x86 */
+ jmp_cond = IA32_JGE;
+ else
+ /* GE is unsigned '>=', JAE in x86 */
+ jmp_cond = IA32_JAE;
+ break;
+ case BPF_JSLE:
+ if (!is_cmp_lo)
+ /* Signed '<=', LE in x86 */
+ jmp_cond = IA32_JLE;
+ else
+ /* LE is unsigned '<=', JBE in x86 */
+ jmp_cond = IA32_JBE;
+ break;
+ default: /* to silence GCC warning */
+ jmp_cond = COND_JMP_OPCODE_INVALID;
+ break;
+ }
+
+ return jmp_cond;
+}
+
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
int oldproglen, struct jit_context *ctx)
{
@@ -2069,10 +2133,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JLE | BPF_X:
- case BPF_JMP | BPF_JSGT | BPF_X:
- case BPF_JMP | BPF_JSLE | BPF_X:
- case BPF_JMP | BPF_JSLT | BPF_X:
- case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP32 | BPF_JEQ | BPF_X:
case BPF_JMP32 | BPF_JNE | BPF_X:
case BPF_JMP32 | BPF_JGT | BPF_X:
@@ -2118,6 +2178,40 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
goto emit_cond_jmp;
}
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_X: {
+ u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
+ u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
+ u8 sreg_lo = sstk ? IA32_ECX : src_lo;
+ u8 sreg_hi = sstk ? IA32_EBX : src_hi;
+
+ if (dstk) {
+ EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
+ STACK_VAR(dst_lo));
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EDX),
+ STACK_VAR(dst_hi));
+ }
+
+ if (sstk) {
+ EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
+ STACK_VAR(src_lo));
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EBX),
+ STACK_VAR(src_hi));
+ }
+
+ /* cmp dreg_hi,sreg_hi */
+ EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
+ EMIT2(IA32_JNE, 10);
+ /* cmp dreg_lo,sreg_lo */
+ EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
+ goto emit_cond_jmp_signed;
+ }
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_X: {
bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
@@ -2194,10 +2288,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
- case BPF_JMP | BPF_JSGT | BPF_K:
- case BPF_JMP | BPF_JSLE | BPF_K:
- case BPF_JMP | BPF_JSLT | BPF_K:
- case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP32 | BPF_JEQ | BPF_K:
case BPF_JMP32 | BPF_JNE | BPF_K:
case BPF_JMP32 | BPF_JGT | BPF_K:
@@ -2238,50 +2328,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
/* cmp dreg_lo,sreg_lo */
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
-emit_cond_jmp: /* Convert BPF opcode to x86 */
- switch (BPF_OP(code)) {
- case BPF_JEQ:
- jmp_cond = IA32_JE;
- break;
- case BPF_JSET:
- case BPF_JNE:
- jmp_cond = IA32_JNE;
- break;
- case BPF_JGT:
- /* GT is unsigned '>', JA in x86 */
- jmp_cond = IA32_JA;
- break;
- case BPF_JLT:
- /* LT is unsigned '<', JB in x86 */
- jmp_cond = IA32_JB;
- break;
- case BPF_JGE:
- /* GE is unsigned '>=', JAE in x86 */
- jmp_cond = IA32_JAE;
- break;
- case BPF_JLE:
- /* LE is unsigned '<=', JBE in x86 */
- jmp_cond = IA32_JBE;
- break;
- case BPF_JSGT:
- /* Signed '>', GT in x86 */
- jmp_cond = IA32_JG;
- break;
- case BPF_JSLT:
- /* Signed '<', LT in x86 */
- jmp_cond = IA32_JL;
- break;
- case BPF_JSGE:
- /* Signed '>=', GE in x86 */
- jmp_cond = IA32_JGE;
- break;
- case BPF_JSLE:
- /* Signed '<=', LE in x86 */
- jmp_cond = IA32_JLE;
- break;
- default: /* to silence GCC warning */
+emit_cond_jmp: jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false);
+ if (jmp_cond == COND_JMP_OPCODE_INVALID)
return -EFAULT;
- }
jmp_offset = addrs[i + insn->off] - addrs[i];
if (is_imm8(jmp_offset)) {
EMIT2(jmp_cond, jmp_offset);
@@ -2291,7 +2340,66 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
pr_err("cond_jmp gen bug %llx\n", jmp_offset);
return -EFAULT;
}
+ break;
+ }
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_K:
+ case BPF_JMP | BPF_JSGE | BPF_K: {
+ u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
+ u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
+ u8 sreg_lo = IA32_ECX;
+ u8 sreg_hi = IA32_EBX;
+ u32 hi;
+ if (dstk) {
+ EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
+ STACK_VAR(dst_lo));
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EDX),
+ STACK_VAR(dst_hi));
+ }
+
+ /* mov ecx,imm32 */
+ EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
+ hi = imm32 & (1 << 31) ? (u32)~0 : 0;
+ /* mov ebx,imm32 */
+ EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi);
+ /* cmp dreg_hi,sreg_hi */
+ EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
+ EMIT2(IA32_JNE, 10);
+ /* cmp dreg_lo,sreg_lo */
+ EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
+
+ /*
+ * For simplicity of branch offset computation,
+ * let's use fixed jump coding here.
+ */
+emit_cond_jmp_signed: /* Check the condition for low 32-bit comparison */
+ jmp_cond = get_cond_jmp_opcode(BPF_OP(code), true);
+ if (jmp_cond == COND_JMP_OPCODE_INVALID)
+ return -EFAULT;
+ jmp_offset = addrs[i + insn->off] - addrs[i] + 8;
+ if (is_simm32(jmp_offset)) {
+ EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
+ } else {
+ pr_err("cond_jmp gen bug %llx\n", jmp_offset);
+ return -EFAULT;
+ }
+ EMIT2(0xEB, 6);
+
+ /* Check the condition for high 32-bit comparison */
+ jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false);
+ if (jmp_cond == COND_JMP_OPCODE_INVALID)
+ return -EFAULT;
+ jmp_offset = addrs[i + insn->off] - addrs[i];
+ if (is_simm32(jmp_offset)) {
+ EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
+ } else {
+ pr_err("cond_jmp gen bug %llx\n", jmp_offset);
+ return -EFAULT;
+ }
break;
}
case BPF_JMP | BPF_JA: