This is the mail archive of the
elfutils-devel@sourceware.org
mailing list for the elfutils project.
[PATCH] Fix bpf disassembler for _FORTIFY_SOURCE
- From: Richard Henderson <rth at redhat dot com>
- To: elfutils-devel at lists dot fedorahosted dot org
- Date: Wed, 10 Aug 2016 12:41:21 +0530
- Subject: [PATCH] Fix bpf disassembler for _FORTIFY_SOURCE
It's illegal to skip positional operands for printf. Rearrange the
printing of the instructions to use exactly the operands required.
Also, fix printing of mod operations: s/%/%%/ in the print format.
Also, fix printing of endian operations: remove extra spaces.
---
libcpu/bpf_disasm.c | 539 +++++++++++++++++++++++++------------
tests/testfile-bpf-dis1.expect.bz2 | Bin 1497 -> 1467 bytes
2 files changed, 366 insertions(+), 173 deletions(-)
diff --git a/libcpu/bpf_disasm.c b/libcpu/bpf_disasm.c
index 6301dcc..153dba9 100644
--- a/libcpu/bpf_disasm.c
+++ b/libcpu/bpf_disasm.c
@@ -52,136 +52,24 @@ static const char class_string[8][8] = {
[BPF_ALU64] = "alu64",
};
-/* Dest = 1$, Src = 2$, Imm = 3$, Off = 4$, Branch = 5$. */
-
-#define DST "r%1$d"
-#define DSTU "(u32)" DST
-#define DSTS "(s64)" DST
-
-#define SRC "r%2$d"
-#define SRCU "(u32)" SRC
-#define SRCS "(s64)" SRC
-
-#define IMMS "%3$d"
-#define IMMX "%3$#x"
-#define OFF "%4$+d"
-#define JMP "%5$#x"
-
-#define A32(O, S) DST " = " DSTU " " #O " " S
-#define A64(O, S) DST " " #O "= " S
-#define J64(D, O, S) "if " D " " #O " " S " goto " JMP
-#define LOAD(T) DST " = *(" #T " *)(" SRC OFF ")"
-#define STORE(T, S) "*(" #T " *)(" DST OFF ") = " S
-#define XADD(T, S) "lock *(" #T " *)(" DST OFF ") += " S
-#define LDSKB(T, S) "r0 = *(" #T " *)skb[" S "]"
-/* 8 character field between opcode and arguments. */
-static const char * const code_fmts[256] = {
- [BPF_ALU | BPF_ADD | BPF_K] = A32(+, IMMS),
- [BPF_ALU | BPF_SUB | BPF_K] = A32(-, IMMS),
- [BPF_ALU | BPF_MUL | BPF_K] = A32(*, IMMS),
- [BPF_ALU | BPF_DIV | BPF_K] = A32(/, IMMS),
- [BPF_ALU | BPF_OR | BPF_K] = A32(|, IMMX),
- [BPF_ALU | BPF_AND | BPF_K] = A32(&, IMMX),
- [BPF_ALU | BPF_LSH | BPF_K] = A32(<<, IMMS),
- [BPF_ALU | BPF_RSH | BPF_K] = A32(>>, IMMS),
- [BPF_ALU | BPF_MOD | BPF_K] = A32(%, IMMS),
- [BPF_ALU | BPF_XOR | BPF_K] = A32(^, IMMX),
- [BPF_ALU | BPF_MOV | BPF_K] = DST " = " IMMX,
- [BPF_ALU | BPF_ARSH | BPF_K] = DST " = (u32)((s32)" DST " >> " IMMS ")",
-
- [BPF_ALU | BPF_ADD | BPF_X] = A32(+, SRCU),
- [BPF_ALU | BPF_SUB | BPF_X] = A32(-, SRCU),
- [BPF_ALU | BPF_MUL | BPF_X] = A32(*, SRCU),
- [BPF_ALU | BPF_DIV | BPF_X] = A32(/, SRCU),
- [BPF_ALU | BPF_OR | BPF_X] = A32(|, SRCU),
- [BPF_ALU | BPF_AND | BPF_X] = A32(&, SRCU),
- [BPF_ALU | BPF_LSH | BPF_X] = A32(<<, SRCU),
- [BPF_ALU | BPF_RSH | BPF_X] = A32(>>, SRCU),
- [BPF_ALU | BPF_MOD | BPF_X] = A32(%, SRCU),
- [BPF_ALU | BPF_XOR | BPF_X] = A32(^, SRCU),
- [BPF_ALU | BPF_MOV | BPF_X] = DST " = " SRCU,
- [BPF_ALU | BPF_ARSH | BPF_X] = DST " = (u32)((s32)" DST " >> " SRC ")",
-
- [BPF_ALU64 | BPF_ADD | BPF_K] = A64(+, IMMS),
- [BPF_ALU64 | BPF_SUB | BPF_K] = A64(-, IMMS),
- [BPF_ALU64 | BPF_MUL | BPF_K] = A64(*, IMMS),
- [BPF_ALU64 | BPF_DIV | BPF_K] = A64(/, IMMS),
- [BPF_ALU64 | BPF_OR | BPF_K] = A64(|, IMMS),
- [BPF_ALU64 | BPF_AND | BPF_K] = A64(&, IMMS),
- [BPF_ALU64 | BPF_LSH | BPF_K] = A64(<<, IMMS),
- [BPF_ALU64 | BPF_RSH | BPF_K] = A64(>>, IMMS),
- [BPF_ALU64 | BPF_MOD | BPF_K] = A64(%, IMMS),
- [BPF_ALU64 | BPF_XOR | BPF_K] = A64(^, IMMS),
- [BPF_ALU64 | BPF_MOV | BPF_K] = DST " = " IMMS,
- [BPF_ALU64 | BPF_ARSH | BPF_K] = DST " = (s64)" DST " >> " IMMS,
-
- [BPF_ALU64 | BPF_ADD | BPF_X] = A64(+, SRC),
- [BPF_ALU64 | BPF_SUB | BPF_X] = A64(-, SRC),
- [BPF_ALU64 | BPF_MUL | BPF_X] = A64(*, SRC),
- [BPF_ALU64 | BPF_DIV | BPF_X] = A64(/, SRC),
- [BPF_ALU64 | BPF_OR | BPF_X] = A64(|, SRC),
- [BPF_ALU64 | BPF_AND | BPF_X] = A64(&, SRC),
- [BPF_ALU64 | BPF_LSH | BPF_X] = A64(<<, SRC),
- [BPF_ALU64 | BPF_RSH | BPF_X] = A64(>>, SRC),
- [BPF_ALU64 | BPF_MOD | BPF_X] = A64(%, SRC),
- [BPF_ALU64 | BPF_XOR | BPF_X] = A64(^, SRC),
- [BPF_ALU64 | BPF_MOV | BPF_X] = DST " = " SRC,
- [BPF_ALU64 | BPF_ARSH | BPF_X] = DST " = (s64)" DST " >> " SRC,
-
- [BPF_ALU | BPF_NEG] = DST " = (u32)-" DST,
- [BPF_ALU64 | BPF_NEG] = DST " = -" DST,
-
- /* The imm field contains {16,32,64}. */
- [BPF_ALU | BPF_END | BPF_TO_LE] = DST " = le%3$-6d(" DST ")",
- [BPF_ALU | BPF_END | BPF_TO_BE] = DST " = be%3$-6d(" DST ")",
-
- [BPF_JMP | BPF_JEQ | BPF_K] = J64(DST, ==, IMMS),
- [BPF_JMP | BPF_JGT | BPF_K] = J64(DST, >, IMMS),
- [BPF_JMP | BPF_JGE | BPF_K] = J64(DST, >=, IMMS),
- [BPF_JMP | BPF_JSET | BPF_K] = J64(DST, &, IMMS),
- [BPF_JMP | BPF_JNE | BPF_K] = J64(DST, !=, IMMS),
- [BPF_JMP | BPF_JSGT | BPF_K] = J64(DSTS, >, IMMS),
- [BPF_JMP | BPF_JSGE | BPF_K] = J64(DSTS, >=, IMMS),
-
- [BPF_JMP | BPF_JEQ | BPF_X] = J64(DST, ==, SRC),
- [BPF_JMP | BPF_JGT | BPF_X] = J64(DST, >, SRC),
- [BPF_JMP | BPF_JGE | BPF_X] = J64(DST, >=, SRC),
- [BPF_JMP | BPF_JSET | BPF_X] = J64(DST, &, SRC),
- [BPF_JMP | BPF_JNE | BPF_X] = J64(DST, !=, SRC),
- [BPF_JMP | BPF_JSGT | BPF_X] = J64(DSTS, >, SRCS),
- [BPF_JMP | BPF_JSGE | BPF_X] = J64(DSTS, >=, SRCS),
-
- [BPF_JMP | BPF_JA] = "goto " JMP,
- [BPF_JMP | BPF_CALL] = "call " IMMS,
- [BPF_JMP | BPF_EXIT] = "exit",
-
- [BPF_LDX | BPF_MEM | BPF_B] = LOAD(u8),
- [BPF_LDX | BPF_MEM | BPF_H] = LOAD(u16),
- [BPF_LDX | BPF_MEM | BPF_W] = LOAD(u32),
- [BPF_LDX | BPF_MEM | BPF_DW] = LOAD(u64),
-
- [BPF_STX | BPF_MEM | BPF_B] = STORE(u8, SRC),
- [BPF_STX | BPF_MEM | BPF_H] = STORE(u16, SRC),
- [BPF_STX | BPF_MEM | BPF_W] = STORE(u32, SRC),
- [BPF_STX | BPF_MEM | BPF_DW] = STORE(u64, SRC),
-
- [BPF_STX | BPF_XADD | BPF_W] = XADD(u32, SRC),
- [BPF_STX | BPF_XADD | BPF_DW] = XADD(u64, SRC),
-
- [BPF_ST | BPF_MEM | BPF_B] = STORE(u8, IMMS),
- [BPF_ST | BPF_MEM | BPF_H] = STORE(u16, IMMS),
- [BPF_ST | BPF_MEM | BPF_W] = STORE(u32, IMMS),
- [BPF_ST | BPF_MEM | BPF_DW] = STORE(u64, IMMS),
-
- [BPF_LD | BPF_ABS | BPF_B] = LDSKB(u8, IMMS),
- [BPF_LD | BPF_ABS | BPF_H] = LDSKB(u16, IMMS),
- [BPF_LD | BPF_ABS | BPF_W] = LDSKB(u32, IMMS),
-
- [BPF_LD | BPF_IND | BPF_B] = LDSKB(u8, SRC "+" IMMS),
- [BPF_LD | BPF_IND | BPF_H] = LDSKB(u16, SRC "+" IMMS),
- [BPF_LD | BPF_IND | BPF_W] = LDSKB(u32, SRC "+" IMMS),
-};
+#define REG(N) "r%" #N "$d"
+#define REGU(N) "(u32)" REG(N)
+#define REGS(N) "(s64)" REG(N)
+
+#define IMMS(N) "%" #N "$d"
+#define IMMX(N) "%" #N "$#x"
+
+#define OFF(N) "%" #N "$+d"
+#define JMP(N) "%" #N "$#x"
+
+#define A32(O, S) REG(1) " = " REGU(1) " " #O " " S
+#define A64(O, S) REG(1) " " #O "= " S
+#define J64(D, O, S) "if " D " " #O " " S " goto " JMP(3)
+#define LOAD(T) REG(1) " = *(" #T " *)(" REG(2) OFF(3) ")"
+#define STORE(T, S) "*(" #T " *)(" REG(1) OFF(3) ") = " S
+#define XADD(T, S) "lock *(" #T " *)(" REG(1) OFF(3) ") += " S
+#define LDSKB(T, S) "r0 = *(" #T " *)skb[" S "]"
static void
bswap_bpf_insn (struct bpf_insn *p)
@@ -222,59 +110,364 @@ bpf_disasm (Ebl *ebl, const uint8_t **startp, const uint8_t *end,
memcpy(&i, start, sizeof(struct bpf_insn));
if (need_bswap)
bswap_bpf_insn (&i);
+
start += sizeof(struct bpf_insn);
addr += sizeof(struct bpf_insn);
-
- /* ??? We really should pass in CTX, so that we can detect
- wrong endianness and do some swapping. */
+ jmp = addr + i.off * sizeof(struct bpf_insn);
code = i.code;
- code_fmt = code_fmts[code];
-
- if (code == (BPF_LD | BPF_IMM | BPF_DW))
+ switch (code)
{
- struct bpf_insn i2;
- uint64_t imm64;
-
- if (start + sizeof(struct bpf_insn) > end)
- {
- start -= sizeof(struct bpf_insn);
- *startp = start;
- goto done;
- }
- memcpy(&i2, start, sizeof(struct bpf_insn));
- if (need_bswap)
- bswap_bpf_insn (&i2);
- start += sizeof(struct bpf_insn);
- addr += sizeof(struct bpf_insn);
-
- imm64 = (uint32_t)i.imm | ((uint64_t)i2.imm << 32);
- switch (i.src_reg)
- {
- case 0:
- code_fmt = DST " = %2$#" PRIx64;
- break;
- case BPF_PSEUDO_MAP_FD:
- code_fmt = DST " = map_fd(%2$#" PRIx64 ")";
- break;
- default:
- code_fmt = DST " = ld_pseudo(%3$d, %2$#" PRIx64 ")";
- break;
- }
+ case BPF_LD | BPF_IMM | BPF_DW:
+ {
+ struct bpf_insn i2;
+ uint64_t imm64;
+
+ if (start + sizeof(struct bpf_insn) > end)
+ {
+ start -= sizeof(struct bpf_insn);
+ *startp = start;
+ goto done;
+ }
+ memcpy(&i2, start, sizeof(struct bpf_insn));
+ if (need_bswap)
+ bswap_bpf_insn (&i2);
+ start += sizeof(struct bpf_insn);
+ addr += sizeof(struct bpf_insn);
+
+ imm64 = (uint32_t)i.imm | ((uint64_t)i2.imm << 32);
+ switch (i.src_reg)
+ {
+ case 0:
+ code_fmt = REG(1) " = %2$#" PRIx64;
+ break;
+ case BPF_PSEUDO_MAP_FD:
+ code_fmt = REG(1) " = map_fd(%2$#" PRIx64 ")";
+ break;
+ default:
+ code_fmt = REG(1) " = ld_pseudo(%3$d, %2$#" PRIx64 ")";
+ break;
+ }
+ len = snprintf(buf, sizeof(buf), code_fmt,
+ i.dst_reg, imm64, i.src_reg);
+ }
+ break;
+
+ case BPF_JMP | BPF_EXIT:
+ len = snprintf(buf, sizeof(buf), "exit");
+ break;
+ case BPF_JMP | BPF_JA:
+ len = snprintf(buf, sizeof(buf), "goto " JMP(1), jmp);
+ break;
+ case BPF_JMP | BPF_CALL:
+ code_fmt = "call " IMMS(1);
+ goto do_imm;
+
+ case BPF_ALU | BPF_END | BPF_TO_LE:
+ /* The imm field contains {16,32,64}. */
+ code_fmt = REG(1) " = le" IMMS(2) "(" REG(1) ")";
+ goto do_dst_imm;
+ case BPF_ALU | BPF_END | BPF_TO_BE:
+ code_fmt = REG(1) " = be" IMMS(2) "(" REG(1) ")";
+ goto do_dst_imm;
+
+ case BPF_ALU | BPF_ADD | BPF_K:
+ code_fmt = A32(+, IMMS(2));
+ goto do_dst_imm;
+ case BPF_ALU | BPF_SUB | BPF_K:
+ code_fmt = A32(-, IMMS(2));
+ goto do_dst_imm;
+ case BPF_ALU | BPF_MUL | BPF_K:
+ code_fmt = A32(*, IMMS(2));
+ goto do_dst_imm;
+ case BPF_ALU | BPF_DIV | BPF_K:
+ code_fmt = A32(/, IMMS(2));
+ goto do_dst_imm;
+ case BPF_ALU | BPF_OR | BPF_K:
+ code_fmt = A32(|, IMMX(2));
+ goto do_dst_imm;
+ case BPF_ALU | BPF_AND | BPF_K:
+ code_fmt = A32(&, IMMX(2));
+ goto do_dst_imm;
+ case BPF_ALU | BPF_LSH | BPF_K:
+ code_fmt = A32(<<, IMMS(2));
+ goto do_dst_imm;
+ case BPF_ALU | BPF_RSH | BPF_K:
+ code_fmt = A32(>>, IMMS(2));
+ goto do_dst_imm;
+ case BPF_ALU | BPF_MOD | BPF_K:
+ code_fmt = A32(%%, IMMS(2));
+ goto do_dst_imm;
+ case BPF_ALU | BPF_XOR | BPF_K:
+ code_fmt = A32(^, IMMX(2));
+ goto do_dst_imm;
+ case BPF_ALU | BPF_MOV | BPF_K:
+ code_fmt = REG(1) " = " IMMX(2);
+ goto do_dst_imm;
+ case BPF_ALU | BPF_ARSH | BPF_K:
+ code_fmt = REG(1) " = (u32)((s32)" REG(1) " >> " IMMS(2) ")";
+ goto do_dst_imm;
+
+ case BPF_ALU | BPF_ADD | BPF_X:
+ code_fmt = A32(+, REGU(2));
+ goto do_dst_src;
+ case BPF_ALU | BPF_SUB | BPF_X:
+ code_fmt = A32(-, REGU(2));
+ goto do_dst_src;
+ case BPF_ALU | BPF_MUL | BPF_X:
+ code_fmt = A32(*, REGU(2));
+ goto do_dst_src;
+ case BPF_ALU | BPF_DIV | BPF_X:
+ code_fmt = A32(/, REGU(2));
+ goto do_dst_src;
+ case BPF_ALU | BPF_OR | BPF_X:
+ code_fmt = A32(|, REGU(2));
+ goto do_dst_src;
+ case BPF_ALU | BPF_AND | BPF_X:
+ code_fmt = A32(&, REGU(2));
+ goto do_dst_src;
+ case BPF_ALU | BPF_LSH | BPF_X:
+ code_fmt = A32(<<, REGU(2));
+ goto do_dst_src;
+ case BPF_ALU | BPF_RSH | BPF_X:
+ code_fmt = A32(>>, REGU(2));
+ goto do_dst_src;
+ case BPF_ALU | BPF_MOD | BPF_X:
+ code_fmt = A32(%%, REGU(2));
+ goto do_dst_src;
+ case BPF_ALU | BPF_XOR | BPF_X:
+ code_fmt = A32(^, REGU(2));
+ goto do_dst_src;
+ case BPF_ALU | BPF_MOV | BPF_X:
+ code_fmt = REG(1) " = " REGU(2);
+ goto do_dst_src;
+ case BPF_ALU | BPF_ARSH | BPF_X:
+ code_fmt = REG(1) " = (u32)((s32)" REG(1) " >> " REG(2) ")";
+ goto do_dst_src;
+
+ case BPF_ALU64 | BPF_ADD | BPF_K:
+ code_fmt = A64(+, IMMS(2));
+ goto do_dst_imm;
+ case BPF_ALU64 | BPF_SUB | BPF_K:
+ code_fmt = A64(-, IMMS(2));
+ goto do_dst_imm;
+ case BPF_ALU64 | BPF_MUL | BPF_K:
+ code_fmt = A64(*, IMMS(2));
+ goto do_dst_imm;
+ case BPF_ALU64 | BPF_DIV | BPF_K:
+ code_fmt = A64(/, IMMS(2));
+ goto do_dst_imm;
+ case BPF_ALU64 | BPF_OR | BPF_K:
+ code_fmt = A64(|, IMMS(2));
+ goto do_dst_imm;
+ case BPF_ALU64 | BPF_AND | BPF_K:
+ code_fmt = A64(&, IMMS(2));
+ goto do_dst_imm;
+ case BPF_ALU64 | BPF_LSH | BPF_K:
+ code_fmt = A64(<<, IMMS(2));
+ goto do_dst_imm;
+ case BPF_ALU64 | BPF_RSH | BPF_K:
+ code_fmt = A64(>>, IMMS(2));
+ goto do_dst_imm;
+ case BPF_ALU64 | BPF_MOD | BPF_K:
+ code_fmt = A64(%%, IMMS(2));
+ goto do_dst_imm;
+ case BPF_ALU64 | BPF_XOR | BPF_K:
+ code_fmt = A64(^, IMMS(2));
+ goto do_dst_imm;
+ case BPF_ALU64 | BPF_MOV | BPF_K:
+ code_fmt = REG(1) " = " IMMS(2);
+ goto do_dst_imm;
+ case BPF_ALU64 | BPF_ARSH | BPF_K:
+ code_fmt = REG(1) " = (s64)" REG(1) " >> " IMMS(2);
+ goto do_dst_imm;
+
+ case BPF_ALU64 | BPF_ADD | BPF_X:
+ code_fmt = A64(+, REG(2));
+ goto do_dst_src;
+ case BPF_ALU64 | BPF_SUB | BPF_X:
+ code_fmt = A64(-, REG(2));
+ goto do_dst_src;
+ case BPF_ALU64 | BPF_MUL | BPF_X:
+ code_fmt = A64(*, REG(2));
+ goto do_dst_src;
+ case BPF_ALU64 | BPF_DIV | BPF_X:
+ code_fmt = A64(/, REG(2));
+ goto do_dst_src;
+ case BPF_ALU64 | BPF_OR | BPF_X:
+ code_fmt = A64(|, REG(2));
+ goto do_dst_src;
+ case BPF_ALU64 | BPF_AND | BPF_X:
+ code_fmt = A64(&, REG(2));
+ goto do_dst_src;
+ case BPF_ALU64 | BPF_LSH | BPF_X:
+ code_fmt = A64(<<, REG(2));
+ goto do_dst_src;
+ case BPF_ALU64 | BPF_RSH | BPF_X:
+ code_fmt = A64(>>, REG(2));
+ goto do_dst_src;
+ case BPF_ALU64 | BPF_MOD | BPF_X:
+ code_fmt = A64(%%, REG(2));
+ goto do_dst_src;
+ case BPF_ALU64 | BPF_XOR | BPF_X:
+ code_fmt = A64(^, REG(2));
+ goto do_dst_src;
+ case BPF_ALU64 | BPF_MOV | BPF_X:
+ code_fmt = REG(1) " = " REG(2);
+ goto do_dst_src;
+ case BPF_ALU64 | BPF_ARSH | BPF_X:
+ code_fmt = REG(1) " = (s64)" REG(1) " >> " REG(2);
+ goto do_dst_src;
+
+ case BPF_ALU | BPF_NEG:
+ code_fmt = REG(1) " = (u32)-" REG(1);
+ goto do_dst_src;
+ case BPF_ALU64 | BPF_NEG:
+ code_fmt = REG(1) " = -" REG(1);
+ goto do_dst_src;
+
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ code_fmt = J64(REG(1), ==, IMMS(2));
+ goto do_dst_imm_jmp;
+ case BPF_JMP | BPF_JGT | BPF_K:
+ code_fmt = J64(REG(1), >, IMMS(2));
+ goto do_dst_imm_jmp;
+ case BPF_JMP | BPF_JGE | BPF_K:
+ code_fmt = J64(REG(1), >=, IMMS(2));
+ goto do_dst_imm_jmp;
+ case BPF_JMP | BPF_JSET | BPF_K:
+ code_fmt = J64(REG(1), &, IMMS(2));
+ goto do_dst_imm_jmp;
+ case BPF_JMP | BPF_JNE | BPF_K:
+ code_fmt = J64(REG(1), !=, IMMS(2));
+ goto do_dst_imm_jmp;
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ code_fmt = J64(REGS(1), >, IMMS(2));
+ goto do_dst_imm_jmp;
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ code_fmt = J64(REGS(1), >=, IMMS(2));
+ goto do_dst_imm_jmp;
+
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ code_fmt = J64(REG(1), ==, REG(2));
+ goto do_dst_src_jmp;
+ case BPF_JMP | BPF_JGT | BPF_X:
+ code_fmt = J64(REG(1), >, REG(2));
+ goto do_dst_src_jmp;
+ case BPF_JMP | BPF_JGE | BPF_X:
+ code_fmt = J64(REG(1), >=, REG(2));
+ goto do_dst_src_jmp;
+ case BPF_JMP | BPF_JSET | BPF_X:
+ code_fmt = J64(REG(1), &, REG(2));
+ goto do_dst_src_jmp;
+ case BPF_JMP | BPF_JNE | BPF_X:
+ code_fmt = J64(REG(1), !=, REG(2));
+ goto do_dst_src_jmp;
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ code_fmt = J64(REGS(1), >, REGS(2));
+ goto do_dst_src_jmp;
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ code_fmt = J64(REGS(1), >=, REGS(2));
+ goto do_dst_src_jmp;
+
+ case BPF_LDX | BPF_MEM | BPF_B:
+ code_fmt = LOAD(u8);
+ goto do_dst_src_off;
+ case BPF_LDX | BPF_MEM | BPF_H:
+ code_fmt = LOAD(u16);
+ goto do_dst_src_off;
+ case BPF_LDX | BPF_MEM | BPF_W:
+ code_fmt = LOAD(u32);
+ goto do_dst_src_off;
+ case BPF_LDX | BPF_MEM | BPF_DW:
+ code_fmt = LOAD(u64);
+ goto do_dst_src_off;
+
+ case BPF_STX | BPF_MEM | BPF_B:
+ code_fmt = STORE(u8, REG(2));
+ goto do_dst_src_off;
+ case BPF_STX | BPF_MEM | BPF_H:
+ code_fmt = STORE(u16, REG(2));
+ goto do_dst_src_off;
+ case BPF_STX | BPF_MEM | BPF_W:
+ code_fmt = STORE(u32, REG(2));
+ goto do_dst_src_off;
+ case BPF_STX | BPF_MEM | BPF_DW:
+ code_fmt = STORE(u64, REG(2));
+ goto do_dst_src_off;
+
+ case BPF_STX | BPF_XADD | BPF_W:
+ code_fmt = XADD(u32, REG(2));
+ goto do_dst_src_off;
+ case BPF_STX | BPF_XADD | BPF_DW:
+ code_fmt = XADD(u64, REG(2));
+ goto do_dst_src_off;
+
+ case BPF_ST | BPF_MEM | BPF_B:
+ code_fmt = STORE(u8, IMMS(2));
+ goto do_dst_imm_off;
+ case BPF_ST | BPF_MEM | BPF_H:
+ code_fmt = STORE(u16, IMMS(2));
+ goto do_dst_imm_off;
+ case BPF_ST | BPF_MEM | BPF_W:
+ code_fmt = STORE(u32, IMMS(2));
+ goto do_dst_imm_off;
+ case BPF_ST | BPF_MEM | BPF_DW:
+ code_fmt = STORE(u64, IMMS(2));
+ goto do_dst_imm_off;
+
+ case BPF_LD | BPF_ABS | BPF_B:
+ code_fmt = LDSKB(u8, IMMS(1));
+ goto do_imm;
+ case BPF_LD | BPF_ABS | BPF_H:
+ code_fmt = LDSKB(u16, IMMS(1));
+ goto do_imm;
+ case BPF_LD | BPF_ABS | BPF_W:
+ code_fmt = LDSKB(u32, IMMS(1));
+ goto do_imm;
+
+ case BPF_LD | BPF_IND | BPF_B:
+ code_fmt = LDSKB(u8, REG(1) "+" IMMS(2));
+ goto do_src_imm;
+ case BPF_LD | BPF_IND | BPF_H:
+ code_fmt = LDSKB(u16, REG(1) "+" IMMS(2));
+ goto do_src_imm;
+ case BPF_LD | BPF_IND | BPF_W:
+ code_fmt = LDSKB(u32, REG(1) "+" IMMS(2));
+ goto do_src_imm;
+
+ do_imm:
+ len = snprintf(buf, sizeof(buf), code_fmt, i.imm);
+ break;
+ do_dst_imm:
+ len = snprintf(buf, sizeof(buf), code_fmt, i.dst_reg, i.imm);
+ break;
+ do_src_imm:
+ len = snprintf(buf, sizeof(buf), code_fmt, i.src_reg, i.imm);
+ break;
+ do_dst_src:
+ len = snprintf(buf, sizeof(buf), code_fmt, i.dst_reg, i.src_reg);
+ break;
+ do_dst_imm_jmp:
+ len = snprintf(buf, sizeof(buf), code_fmt, i.dst_reg, i.imm, jmp);
+ break;
+ do_dst_src_jmp:
len = snprintf(buf, sizeof(buf), code_fmt,
- i.dst_reg, imm64, i.src_reg);
- }
- else if (code_fmt != NULL)
- {
- jmp = addr + i.off * sizeof(struct bpf_insn);
- len = snprintf(buf, sizeof(buf), code_fmt, i.dst_reg, i.src_reg,
- i.imm, i.off, jmp);
- }
- else
- {
+ i.dst_reg, i.src_reg, jmp);
+ break;
+ do_dst_imm_off:
+ len = snprintf(buf, sizeof(buf), code_fmt, i.dst_reg, i.imm, i.off);
+ break;
+ do_dst_src_off:
+ len = snprintf(buf, sizeof(buf), code_fmt,
+ i.dst_reg, i.src_reg, i.off);
+ break;
+
+ default:
class = BPF_CLASS(code);
len = snprintf(buf, sizeof(buf), "invalid class %s",
class_string[class]);
+ break;
}
*startp = start;
diff --git a/tests/testfile-bpf-dis1.expect.bz2 b/tests/testfile-bpf-dis1.expect.bz2
index b4a778e03882796d5c0682e2ac46daf8ecf4f53c..21b55e94431e96914d38ad86683b54f29c58c617 100644
GIT binary patch
literal 1467
zcmZXUdsNbA7{`Bj$Gm`MGuJUNgfvSj^ODlQF$Yv$5Hv)MAv*76mR34NQzJE}pq9=Y
znpc!+3SMWlO3yY$E4&=FWLjqGl$9x~Q(a)S7ZPv`9O*Y~{d^PJ~>p6_#Buiz+mEP;Uz
zU|1!Y5r8j?mE3c%@YGGU4ghSC$&?(Hf#u<_hXiHXNjh!MHq#DarC+qD+}HO{jtc>4
zcjJN(Qh_j{3U(a)B}FJ{Q-w6E*{4SDIgBaJuK5T5mLAw+V;S%JnY*8I(?)^NlTYmzen
zSQ!J~keixR0Tk={m<v@!QD72_MuqObi1rOYD0X>s*}$XzVmKTwS3g(&qgn|`d)^ZT
zO(a)3(pD4ylRJZ`ouw4K6hggOE}*LCVi`-1D<f!w)kyqLvtwPd}3u`gjc?p~-w=VR8#
z(a)LRCRNQ4txn>7g1CxP$)HektZX0nR|+bf<OebX&z+aBtAI<BXa3UOl(a)35^@|adb!c
z=*sP?s>016Sy;x8`o3-VXJ?}wr)L?|>S&o@^bTL7p}ca!5SNH`YHN?#w)CXK9b<di
zci1B-uyR?xZ?e^mPUBN50h(=l*`y7hN+EN$@;`L#Rge2|P3{zdogaE{-u(QSKmuU$
zH~>I=3Z7|m1aYM;V(a)IB*g{@7{Ph?cLDBV8b0l(a)empoP#Tqgw+jt_t3w_!C?@u{2OL
zl+}mLX4;R2T%KEajoQ_0l}|0+<z?)on(SksnnJo(1O09J5kWXAdPR4nN{=Ft7Onr@
z=y0Ggm~xNeS~hw-SOBdc{q{@sk^R<Jrd;O5yiKO}J0+84bXSW9LlCWddsb&s)pf3d
zUR)Fp=8oc%QDq80p9n8<j6Zzm{<6OInOv9-kw|1LgQjik#}>qggPncGP768nai=)8
zqC~tz8Iy=(RzTLJjWEdco%?fRW70Y_i-b|*3{a^wY4urw`%qHlyk)klLpJhz!%`+Y
z`UXc{5N_Fz%~P;oTDmOeqc<ty^ANaRch+}u4swe_xhBEw0kVsOG`ZwCN<W!8JtnPE
z!S=jdQ3bad>AdvUM<Hx=!$Nz(=mV8YwaS~_Dy1wPYmmS&AYrQIFluGkyX75;H`R5j
zm)x)OmfT$iTv#MDEE|daSefSq%f?uHCdVzdmUPaR(a)KWG*l+?ugPAD^|P8I=Cq*<)N
zPm**O(8CW7%7Y&ei9x)~kQy7C?U{(;GXw=-+_oJ?LYZMS4Nhbtia-tBk$9-3sa=s8
zl<(1bIUeUa6~YFqLOT+kI$s+PO$4iFBMnHX=#r)~MnoBd<8bj{sB5}aY0IJtZZYuK
znU}k(a)792j&=Yu#lc&t~II;}3(aGm>^1CTb#;NV;t!zbyw9ff)zC57&%rP<z^G&T|R
zlTQZlpGE&q;xeE!#nw6S+HYkI4GZkdQ#07tGmh)s_Gr=CcxbY>$Yn_!TcdDB(a)S(d&
z+Eaf+@#T=UaBI(Vucn5}->%Ll;5IgKZqM>p=eaf@$g_FS9`50qtQ1BBnC|wha7@~q
zG4$_HNu7;_`K_?7edk|@L^4E<p6TYs5APpKJ3M-qju*zuPj$ER$LWAQ`0~as!dsI}
zn2zZTaLgGrrR7a6GnKhvz(q*a%5*!JP?ruu?Bfz6BO}4VNX4;KQ;j9wpKi2A>K(5x
z6mRrECe^ZdrA13pesZ6d$*bXLRqLx?M=+<Gt=g#FOLcan+Dm)w2mQ!f3UYL+PK>_@
zGUu_Y322^bAp0+xSq**;7qc+AwbYFsq(0OC^P*^VG;rZX#qdL+7DcFQ$n&sGrmp}N
z)ZJf{+&0{;g_m6agKm2?m=^YqvOJ0q=uQ_EM(pA16;93P=ET5<eu}CUm15TE<bVdv
b_h0}6^y@|4=%LGB&90Cd49&EC*d)Y%<KBuh
literal 1497
zcmZ`(X;70_6n#lpLclaZP-JmP$`T^bri2}wBm$KstU(NvXaRvF4V!{&vZ!EW&=E{z
zX+}ZT#0gq7NLf^tA|n_;F_BHQ6c9p7kx~R)`l0>nnfK$)n|t29_nbSA79Q_RqB1Q)
zn0pw%{0WT8j^w3d;<7%js{x>W|NeB$QKZ5Cty9DP!j*N2%)Z6IZF0{t*^rnSGBjF4
zcj)#E2n2`&)T&`lriAYBPCV<rDLzl906+$CVs5pJNEj)p^ef-_HB3XRM&p{Kh~p?;
z(a)Q8nZkX;4h4dcl$-TH!n(Y1gzuP0tTj5YTF_5m*r02r}fv0$Wnk-Ua51wdmo*$|J%
zJGrc{XWXBD*4gLVl2^uqxnEJJ&C2p^$KjE8wu5h2!3hy%I|dRir%bjcF!_Z|(##Ce
z8NJ7|ky5A6l+U^CsekDV_G;)**n27}D#~Sm1av3}j6RnUOjOh)*bTU{MKi~)elU{0
zds&5$=fT9KKn(+Ckdz9|+Y^}dl#iKN%QLSEMJi><@%>L8qZp0h_R9&XTFEx*<H+3?
zk>%ygd3)`Ybt4D5usJ7!xD^r8ej$b%8thHvz^D>bm9BdJQ6?onu^}xr>|h>Ro&*4J
zb+c=j^S9~)mLB~8obQ2V*VMb5Ev$XwJC)5^n+QFUE3Q?Xba?>4j&T6Pz}mcPh3&LA
zxn`vi-=Z2zitw4VgQR?`BK-5OvTn2zEE|j{)%kjZ7`v{45G=80uTE=U?_nv<rX%Pf
zoW(iKcumbI0CTk%Ge=7Ug9o=vU_rWiwwkyQn7Ewk3RYGVls3(lk-m>juuZi9mZTPh
z^*36bdc8R0ma_Clr7Y7C&{Mq4Bn`EsGjQJmEOw)Ep`GrO^s35`U0>;uw9)I~;jw&g
z3p#nwcwa(}m6A?O^a+s?Ip?v6_Hl(w$_>ZcBf;YP(a)6&>?#=jn_W)T0qsjj9Hb7?Az
z<Dz+IISHA>MAGCw{IeEL3ZZm4>&1HKH0!7-3UTY=Ks48N364j*;n1m<&RRFeIm?fW
zW-#Z}FBPCVcI#+a#Kz+BPsioIlVW4}Qm-y2qUjWkR>z8z^wW05IX^w2q?47AseL%K
zHhG-qIK0XBEanYe+Z&E{oWHyORGYcTCRr9I2tS57i$nTF8}yNiqqalMurQ>QW5g59
zXxmG9{T3nYJL5cupXp*~%PIt)!g^)0nP9|ka8SjRGb=XjG;8ea>^b_hMgg_^B|05N
z2t`#h;<(`oS^fWPD$g@*_R;fcueW`>xC1O(mcKzS4oTaY^M32DAG^LBDo(>d`V<6R
zaOV013+f0s4YirNwy?157mC=phUjR~TyPK;Lay2%R2O<$%3AN-7VP<#4ACJ%8IT8x
zmI+?V*g_wP&-aF$^zgJAtK7Z<(C_eNASRx<;I!m$!xIbByua_ItlV0AJ9bi$9(a)s2s
zkx7&Gp(P4oG*=5}7K4~yG%n2Vi5*-Wavx_$lAqoTvt=dZ=|t&%YS<@NND%7(BXrd#
z2zTE;hf;LZ$7b{%80t<eY*XDJc??)LCiTF*-MjSFl|36v6JhBSzP{}aBey44n%r7f
zOyS;UrRITI4*?&pol0>X1>f~6-Bz#2u4ejuJZ*}!*km`*nS4;{ZFZdp(a)Q0wSJIseL
z8YPE>>ID#+@`lJGZv<tnBQh$ceIut(a)IucLO(9)KTg~7$8Cd9oT3ypbws-}m=$vp<H
zC-+gQrk%rqKD-Yc8Eb~r-9;tvH|AEOs;*#6?)-7n<ofBfP}K`=nR)i&vP<#M6g`wt
z+6bdvM~_9yvhvg!Me4KG(a)FH-@#*!>4t+ycv>S{`>1nQ5F<8>p5yHk>@3D17Q&WWWy
mjS|MSu~72>jR%kmrWCMOBAFHa(470*WQo6?nMSB}7Ue&skb?gJ
--
2.7.4