bpf: support packet data load instructions

To fill the gap with linux kernel eBPF implementation,
add support for two non-generic instructions:
(BPF_ABS | <size> | BPF_LD) and (BPF_IND | <size> | BPF_LD)
which are used to access packet data.
These instructions can only be used when BPF context is a pointer
to 'struct rte_mbuf' (i.e: RTE_BPF_ARG_PTR_MBUF type).

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
This commit is contained in:
Konstantin Ananyev 2020-05-27 15:16:51 +01:00 committed by Thomas Monjalon
parent 20c19d9df5
commit b901d92836
5 changed files with 633 additions and 1 deletions

View File

@ -43,6 +43,14 @@ struct dummy_net {
struct rte_ipv4_hdr ip_hdr;
};
#define DUMMY_MBUF_NUM 2
/* first mbuf in the packet, should always be at offset 0 */
struct dummy_mbuf {
struct rte_mbuf mb[DUMMY_MBUF_NUM];
uint8_t buf[DUMMY_MBUF_NUM][RTE_MBUF_DEFAULT_BUF_SIZE];
};
#define TEST_FILL_1 0xDEADBEEF
#define TEST_MUL_1 21
@ -2444,6 +2452,413 @@ static const struct rte_bpf_xsym test_call5_xsym[] = {
},
};
/* load mbuf (BPF_ABS/BPF_IND) test-cases */
static const struct ebpf_insn test_ld_mbuf1_prog[] = {
/* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
{
.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
.dst_reg = EBPF_REG_6,
.src_reg = EBPF_REG_1,
},
/* load IPv4 version and IHL */
{
.code = (BPF_LD | BPF_ABS | BPF_B),
.imm = offsetof(struct rte_ipv4_hdr, version_ihl),
},
/* check IP version */
{
.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
.dst_reg = EBPF_REG_2,
.src_reg = EBPF_REG_0,
},
{
.code = (BPF_ALU | BPF_AND | BPF_K),
.dst_reg = EBPF_REG_2,
.imm = 0xf0,
},
{
.code = (BPF_JMP | BPF_JEQ | BPF_K),
.dst_reg = EBPF_REG_2,
.imm = IPVERSION << 4,
.off = 2,
},
/* invalid IP version, return 0 */
{
.code = (EBPF_ALU64 | BPF_XOR | BPF_X),
.dst_reg = EBPF_REG_0,
.src_reg = EBPF_REG_0,
},
{
.code = (BPF_JMP | EBPF_EXIT),
},
/* load 3-rd byte of IP data */
{
.code = (BPF_ALU | BPF_AND | BPF_K),
.dst_reg = EBPF_REG_0,
.imm = RTE_IPV4_HDR_IHL_MASK,
},
{
.code = (BPF_ALU | BPF_LSH | BPF_K),
.dst_reg = EBPF_REG_0,
.imm = 2,
},
{
.code = (BPF_LD | BPF_IND | BPF_B),
.src_reg = EBPF_REG_0,
.imm = 3,
},
{
.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
.dst_reg = EBPF_REG_7,
.src_reg = EBPF_REG_0,
},
/* load IPv4 src addr */
{
.code = (BPF_LD | BPF_ABS | BPF_W),
.imm = offsetof(struct rte_ipv4_hdr, src_addr),
},
{
.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
.dst_reg = EBPF_REG_7,
.src_reg = EBPF_REG_0,
},
/* load IPv4 total length */
{
.code = (BPF_LD | BPF_ABS | BPF_H),
.imm = offsetof(struct rte_ipv4_hdr, total_length),
},
{
.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
.dst_reg = EBPF_REG_8,
.src_reg = EBPF_REG_0,
},
/* load last 4 bytes of IP data */
{
.code = (BPF_LD | BPF_IND | BPF_W),
.src_reg = EBPF_REG_8,
.imm = -(int32_t)sizeof(uint32_t),
},
{
.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
.dst_reg = EBPF_REG_7,
.src_reg = EBPF_REG_0,
},
/* load 2 bytes from the middle of IP data */
{
.code = (EBPF_ALU64 | BPF_RSH | BPF_K),
.dst_reg = EBPF_REG_8,
.imm = 1,
},
{
.code = (BPF_LD | BPF_IND | BPF_H),
.src_reg = EBPF_REG_8,
},
{
.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
.dst_reg = EBPF_REG_0,
.src_reg = EBPF_REG_7,
},
{
.code = (BPF_JMP | EBPF_EXIT),
},
};
static void
dummy_mbuf_prep(struct rte_mbuf *mb, uint8_t buf[], uint32_t buf_len,
uint32_t data_len)
{
uint32_t i;
uint8_t *db;
mb->buf_addr = buf;
mb->buf_iova = (uintptr_t)buf;
mb->buf_len = buf_len;
rte_mbuf_refcnt_set(mb, 1);
/* set pool pointer to dummy value, test doesn't use it */
mb->pool = (void *)buf;
rte_pktmbuf_reset(mb);
db = (uint8_t *)rte_pktmbuf_append(mb, data_len);
for (i = 0; i != data_len; i++)
db[i] = i;
}
static void
test_ld_mbuf1_prepare(void *arg)
{
struct dummy_mbuf *dm;
struct rte_ipv4_hdr *ph;
const uint32_t plen = 400;
const struct rte_ipv4_hdr iph = {
.version_ihl = RTE_IPV4_VHL_DEF,
.total_length = rte_cpu_to_be_16(plen),
.time_to_live = IPDEFTTL,
.next_proto_id = IPPROTO_RAW,
.src_addr = rte_cpu_to_be_32(RTE_IPV4_LOOPBACK),
.dst_addr = rte_cpu_to_be_32(RTE_IPV4_BROADCAST),
};
dm = arg;
memset(dm, 0, sizeof(*dm));
dummy_mbuf_prep(&dm->mb[0], dm->buf[0], sizeof(dm->buf[0]),
plen / 2 + 1);
dummy_mbuf_prep(&dm->mb[1], dm->buf[1], sizeof(dm->buf[0]),
plen / 2 - 1);
rte_pktmbuf_chain(&dm->mb[0], &dm->mb[1]);
ph = rte_pktmbuf_mtod(dm->mb, typeof(ph));
memcpy(ph, &iph, sizeof(iph));
}
static uint64_t
test_ld_mbuf1(const struct rte_mbuf *pkt)
{
uint64_t n, v;
const uint8_t *p8;
const uint16_t *p16;
const uint32_t *p32;
struct dummy_offset dof;
/* load IPv4 version and IHL */
p8 = rte_pktmbuf_read(pkt,
offsetof(struct rte_ipv4_hdr, version_ihl), sizeof(*p8),
&dof);
if (p8 == NULL)
return 0;
/* check IP version */
if ((p8[0] & 0xf0) != IPVERSION << 4)
return 0;
n = (p8[0] & RTE_IPV4_HDR_IHL_MASK) * RTE_IPV4_IHL_MULTIPLIER;
/* load 3-rd byte of IP data */
p8 = rte_pktmbuf_read(pkt, n + 3, sizeof(*p8), &dof);
if (p8 == NULL)
return 0;
v = p8[0];
/* load IPv4 src addr */
p32 = rte_pktmbuf_read(pkt,
offsetof(struct rte_ipv4_hdr, src_addr), sizeof(*p32),
&dof);
if (p32 == NULL)
return 0;
v += rte_be_to_cpu_32(p32[0]);
/* load IPv4 total length */
p16 = rte_pktmbuf_read(pkt,
offsetof(struct rte_ipv4_hdr, total_length), sizeof(*p16),
&dof);
if (p16 == NULL)
return 0;
n = rte_be_to_cpu_16(p16[0]);
/* load last 4 bytes of IP data */
p32 = rte_pktmbuf_read(pkt, n - sizeof(*p32), sizeof(*p32), &dof);
if (p32 == NULL)
return 0;
v += rte_be_to_cpu_32(p32[0]);
/* load 2 bytes from the middle of IP data */
p16 = rte_pktmbuf_read(pkt, n / 2, sizeof(*p16), &dof);
if (p16 == NULL)
return 0;
v += rte_be_to_cpu_16(p16[0]);
return v;
}
static int
test_ld_mbuf1_check(uint64_t rc, const void *arg)
{
const struct dummy_mbuf *dm;
uint64_t v;
dm = arg;
v = test_ld_mbuf1(dm->mb);
return cmp_res(__func__, v, rc, arg, arg, 0);
}
/*
* same as ld_mbuf1, but then trancate the mbuf by 1B,
* so load of last 4B fail.
*/
static void
test_ld_mbuf2_prepare(void *arg)
{
struct dummy_mbuf *dm;
test_ld_mbuf1_prepare(arg);
dm = arg;
rte_pktmbuf_trim(dm->mb, 1);
}
static int
test_ld_mbuf2_check(uint64_t rc, const void *arg)
{
return cmp_res(__func__, 0, rc, arg, arg, 0);
}
/* same as test_ld_mbuf1, but now store intermediate results on the stack */
static const struct ebpf_insn test_ld_mbuf3_prog[] = {
/* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */
{
.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
.dst_reg = EBPF_REG_6,
.src_reg = EBPF_REG_1,
},
/* load IPv4 version and IHL */
{
.code = (BPF_LD | BPF_ABS | BPF_B),
.imm = offsetof(struct rte_ipv4_hdr, version_ihl),
},
/* check IP version */
{
.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
.dst_reg = EBPF_REG_2,
.src_reg = EBPF_REG_0,
},
{
.code = (BPF_ALU | BPF_AND | BPF_K),
.dst_reg = EBPF_REG_2,
.imm = 0xf0,
},
{
.code = (BPF_JMP | BPF_JEQ | BPF_K),
.dst_reg = EBPF_REG_2,
.imm = IPVERSION << 4,
.off = 2,
},
/* invalid IP version, return 0 */
{
.code = (EBPF_ALU64 | BPF_XOR | BPF_X),
.dst_reg = EBPF_REG_0,
.src_reg = EBPF_REG_0,
},
{
.code = (BPF_JMP | EBPF_EXIT),
},
/* load 3-rd byte of IP data */
{
.code = (BPF_ALU | BPF_AND | BPF_K),
.dst_reg = EBPF_REG_0,
.imm = RTE_IPV4_HDR_IHL_MASK,
},
{
.code = (BPF_ALU | BPF_LSH | BPF_K),
.dst_reg = EBPF_REG_0,
.imm = 2,
},
{
.code = (BPF_LD | BPF_IND | BPF_B),
.src_reg = EBPF_REG_0,
.imm = 3,
},
{
.code = (BPF_STX | BPF_MEM | BPF_B),
.dst_reg = EBPF_REG_10,
.src_reg = EBPF_REG_0,
.off = (int16_t)(offsetof(struct dummy_offset, u8) -
sizeof(struct dummy_offset)),
},
/* load IPv4 src addr */
{
.code = (BPF_LD | BPF_ABS | BPF_W),
.imm = offsetof(struct rte_ipv4_hdr, src_addr),
},
{
.code = (BPF_STX | BPF_MEM | BPF_W),
.dst_reg = EBPF_REG_10,
.src_reg = EBPF_REG_0,
.off = (int16_t)(offsetof(struct dummy_offset, u32) -
sizeof(struct dummy_offset)),
},
/* load IPv4 total length */
{
.code = (BPF_LD | BPF_ABS | BPF_H),
.imm = offsetof(struct rte_ipv4_hdr, total_length),
},
{
.code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
.dst_reg = EBPF_REG_8,
.src_reg = EBPF_REG_0,
},
/* load last 4 bytes of IP data */
{
.code = (BPF_LD | BPF_IND | BPF_W),
.src_reg = EBPF_REG_8,
.imm = -(int32_t)sizeof(uint32_t),
},
{
.code = (BPF_STX | BPF_MEM | EBPF_DW),
.dst_reg = EBPF_REG_10,
.src_reg = EBPF_REG_0,
.off = (int16_t)(offsetof(struct dummy_offset, u64) -
sizeof(struct dummy_offset)),
},
/* load 2 bytes from the middle of IP data */
{
.code = (EBPF_ALU64 | BPF_RSH | BPF_K),
.dst_reg = EBPF_REG_8,
.imm = 1,
},
{
.code = (BPF_LD | BPF_IND | BPF_H),
.src_reg = EBPF_REG_8,
},
{
.code = (BPF_LDX | BPF_MEM | EBPF_DW),
.dst_reg = EBPF_REG_1,
.src_reg = EBPF_REG_10,
.off = (int16_t)(offsetof(struct dummy_offset, u64) -
sizeof(struct dummy_offset)),
},
{
.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
.dst_reg = EBPF_REG_0,
.src_reg = EBPF_REG_1,
},
{
.code = (BPF_LDX | BPF_MEM | BPF_W),
.dst_reg = EBPF_REG_1,
.src_reg = EBPF_REG_10,
.off = (int16_t)(offsetof(struct dummy_offset, u32) -
sizeof(struct dummy_offset)),
},
{
.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
.dst_reg = EBPF_REG_0,
.src_reg = EBPF_REG_1,
},
{
.code = (BPF_LDX | BPF_MEM | BPF_B),
.dst_reg = EBPF_REG_1,
.src_reg = EBPF_REG_10,
.off = (int16_t)(offsetof(struct dummy_offset, u8) -
sizeof(struct dummy_offset)),
},
{
.code = (EBPF_ALU64 | BPF_ADD | BPF_X),
.dst_reg = EBPF_REG_0,
.src_reg = EBPF_REG_1,
},
{
.code = (BPF_JMP | EBPF_EXIT),
},
};
/* all bpf test cases */
static const struct bpf_test tests[] = {
{
@ -2704,6 +3119,54 @@ static const struct bpf_test tests[] = {
/* for now don't support function calls on 32 bit platform */
.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
},
{
.name = "test_ld_mbuf1",
.arg_sz = sizeof(struct dummy_mbuf),
.prm = {
.ins = test_ld_mbuf1_prog,
.nb_ins = RTE_DIM(test_ld_mbuf1_prog),
.prog_arg = {
.type = RTE_BPF_ARG_PTR_MBUF,
.buf_size = sizeof(struct dummy_mbuf),
},
},
.prepare = test_ld_mbuf1_prepare,
.check_result = test_ld_mbuf1_check,
/* mbuf as input argument is not supported on 32 bit platform */
.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
},
{
.name = "test_ld_mbuf2",
.arg_sz = sizeof(struct dummy_mbuf),
.prm = {
.ins = test_ld_mbuf1_prog,
.nb_ins = RTE_DIM(test_ld_mbuf1_prog),
.prog_arg = {
.type = RTE_BPF_ARG_PTR_MBUF,
.buf_size = sizeof(struct dummy_mbuf),
},
},
.prepare = test_ld_mbuf2_prepare,
.check_result = test_ld_mbuf2_check,
/* mbuf as input argument is not supported on 32 bit platform */
.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
},
{
.name = "test_ld_mbuf3",
.arg_sz = sizeof(struct dummy_mbuf),
.prm = {
.ins = test_ld_mbuf3_prog,
.nb_ins = RTE_DIM(test_ld_mbuf3_prog),
.prog_arg = {
.type = RTE_BPF_ARG_PTR_MBUF,
.buf_size = sizeof(struct dummy_mbuf),
},
},
.prepare = test_ld_mbuf1_prepare,
.check_result = test_ld_mbuf1_check,
/* mbuf as input argument is not supported on 32 bit platform */
.allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
},
};
static int

View File

@ -27,6 +27,35 @@ The library API provides the following basic operations:
* Load BPF program from the ELF file and install callback to execute it on given ethdev port/queue.
Packet data load instructions
-----------------------------
DPDK supports two non-generic instructions: ``(BPF_ABS | size | BPF_LD)``
and ``(BPF_IND | size | BPF_LD)`` which are used to access packet data.
These instructions can only be used when execution context is a pointer to
``struct rte_mbuf`` and have seven implicit operands.
Register ``R6`` is an implicit input that must contain pointer to ``rte_mbuf``.
Register ``R0`` is an implicit output which contains the data fetched from the
packet. Registers ``R1-R5`` are scratch registers
and must not be used to store the data across these instructions.
These instructions have implicit program exit condition as well. When
eBPF program is trying to access the data beyond the packet boundary,
the interpreter will abort the execution of the program. JIT compilers
therefore must preserve this property. ``src_reg`` and ``imm32`` fields are
explicit inputs to these instructions.
For example, ``(BPF_IND | BPF_W | BPF_LD)`` means:
.. code-block:: c
uint32_t tmp;
R0 = rte_pktmbuf_read((const struct rte_mbuf *)R6, src_reg + imm32,
sizeof(tmp), &tmp);
if (R0 == NULL) return FAILED;
R0 = ntohl(*(uint32_t *)R0);
and ``R1-R5`` were scratched.
Not currently supported eBPF features
-------------------------------------
@ -34,5 +63,4 @@ Not currently supported eBPF features
- cBPF
- tail-pointer call
- eBPF MAP
- skb
- external function calls for 32-bit platforms

View File

@ -68,6 +68,13 @@ New Features
* Added new PMD devarg ``reclaim_mem_mode``.
* **Added support for BPF_ABS/BPF_IND load instructions.**
Added support for two BPF non-generic instructions:
``(BPF_ABS | <size> | BPF_LD)`` and ``(BPF_IND | <size> | BPF_LD)``
which are used to access packet data in a safe manner. Currently JIT support
for these instructions is implemented for x86 only.
Removed Items
-------------

View File

@ -74,6 +74,26 @@
(uintptr_t)((reg)[(ins)->dst_reg] + (ins)->off), \
reg[ins->src_reg]))
/* BPF_LD | BPF_ABS/BPF_IND */
#define NOP(x) (x)
#define BPF_LD_ABS(bpf, reg, ins, type, op) do { \
const type *p = bpf_ld_mbuf(bpf, reg, ins, (ins)->imm, sizeof(type)); \
if (p == NULL) \
return 0; \
reg[EBPF_REG_0] = op(p[0]); \
} while (0)
#define BPF_LD_IND(bpf, reg, ins, type, op) do { \
uint32_t ofs = reg[ins->src_reg] + (ins)->imm; \
const type *p = bpf_ld_mbuf(bpf, reg, ins, ofs, sizeof(type)); \
if (p == NULL) \
return 0; \
reg[EBPF_REG_0] = op(p[0]); \
} while (0)
static inline void
bpf_alu_be(uint64_t reg[EBPF_REG_NUM], const struct ebpf_insn *ins)
{
@ -112,6 +132,23 @@ bpf_alu_le(uint64_t reg[EBPF_REG_NUM], const struct ebpf_insn *ins)
}
}
static inline const void *
bpf_ld_mbuf(const struct rte_bpf *bpf, uint64_t reg[EBPF_REG_NUM],
const struct ebpf_insn *ins, uint32_t off, uint32_t len)
{
const struct rte_mbuf *mb;
const void *p;
mb = (const struct rte_mbuf *)(uintptr_t)reg[EBPF_REG_6];
p = rte_pktmbuf_read(mb, off, len, reg + EBPF_REG_0);
if (p == NULL)
RTE_BPF_LOG(DEBUG, "%s(bpf=%p, mbuf=%p, ofs=%u, len=%u): "
"load beyond packet boundary at pc: %#zx;\n",
__func__, bpf, mb, off, len,
(uintptr_t)(ins) - (uintptr_t)(bpf)->prm.ins);
return p;
}
static inline uint64_t
bpf_exec(const struct rte_bpf *bpf, uint64_t reg[EBPF_REG_NUM])
{
@ -296,6 +333,26 @@ bpf_exec(const struct rte_bpf *bpf, uint64_t reg[EBPF_REG_NUM])
(uint64_t)(uint32_t)ins[1].imm << 32;
ins++;
break;
/* load absolute instructions */
case (BPF_LD | BPF_ABS | BPF_B):
BPF_LD_ABS(bpf, reg, ins, uint8_t, NOP);
break;
case (BPF_LD | BPF_ABS | BPF_H):
BPF_LD_ABS(bpf, reg, ins, uint16_t, rte_be_to_cpu_16);
break;
case (BPF_LD | BPF_ABS | BPF_W):
BPF_LD_ABS(bpf, reg, ins, uint32_t, rte_be_to_cpu_32);
break;
/* load indirect instructions */
case (BPF_LD | BPF_IND | BPF_B):
BPF_LD_IND(bpf, reg, ins, uint8_t, NOP);
break;
case (BPF_LD | BPF_IND | BPF_H):
BPF_LD_IND(bpf, reg, ins, uint16_t, rte_be_to_cpu_16);
break;
case (BPF_LD | BPF_IND | BPF_W):
BPF_LD_IND(bpf, reg, ins, uint32_t, rte_be_to_cpu_32);
break;
/* store instructions */
case (BPF_STX | BPF_MEM | BPF_B):
BPF_ST_REG(reg, ins, uint8_t);

View File

@ -102,6 +102,9 @@ struct bpf_ins_check {
#define WRT_REGS RTE_LEN2MASK(EBPF_REG_10, uint16_t)
#define ZERO_REG RTE_LEN2MASK(EBPF_REG_1, uint16_t)
/* For LD_IND R6 is an implicit CTX register. */
#define IND_SRC_REGS (WRT_REGS ^ 1 << EBPF_REG_6)
/*
* check and evaluate functions for particular instruction types.
*/
@ -580,6 +583,42 @@ eval_neg(struct bpf_reg_val *rd, size_t opsz, uint64_t msk)
rd->s.min = RTE_MIN(sx, sy);
}
static const char *
eval_ld_mbuf(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
{
uint32_t i, mode;
struct bpf_reg_val *rv, ri, rs;
mode = BPF_MODE(ins->code);
/* R6 is an implicit input that must contain pointer to mbuf */
if (bvf->evst->rv[EBPF_REG_6].v.type != RTE_BPF_ARG_PTR_MBUF)
return "invalid type for implicit ctx register";
if (mode == BPF_IND) {
rs = bvf->evst->rv[ins->src_reg];
if (rs.v.type != RTE_BPF_ARG_RAW)
return "unexpected type for src register";
eval_fill_imm(&ri, UINT64_MAX, ins->imm);
eval_add(&rs, &ri, UINT64_MAX);
if (rs.s.max < 0 || rs.u.min > UINT32_MAX)
return "mbuf boundary violation";
}
/* R1-R5 scratch registers */
for (i = EBPF_REG_1; i != EBPF_REG_6; i++)
bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;
/* R0 is an implicit output, contains data fetched from the packet */
rv = bvf->evst->rv + EBPF_REG_0;
rv->v.size = bpf_size(BPF_SIZE(ins->code));
eval_fill_max_bound(rv, RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t));
return NULL;
}
/*
* check that destination and source operand are in defined state.
*/
@ -1425,6 +1464,44 @@ static const struct bpf_ins_check ins_chk[UINT8_MAX + 1] = {
.imm = { .min = 0, .max = UINT32_MAX},
.eval = eval_ld_imm64,
},
/* load absolute instructions */
[(BPF_LD | BPF_ABS | BPF_B)] = {
.mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
.off = { .min = 0, .max = 0},
.imm = { .min = 0, .max = INT32_MAX},
.eval = eval_ld_mbuf,
},
[(BPF_LD | BPF_ABS | BPF_H)] = {
.mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
.off = { .min = 0, .max = 0},
.imm = { .min = 0, .max = INT32_MAX},
.eval = eval_ld_mbuf,
},
[(BPF_LD | BPF_ABS | BPF_W)] = {
.mask = {. dreg = ZERO_REG, .sreg = ZERO_REG},
.off = { .min = 0, .max = 0},
.imm = { .min = 0, .max = INT32_MAX},
.eval = eval_ld_mbuf,
},
/* load indirect instructions */
[(BPF_LD | BPF_IND | BPF_B)] = {
.mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
.off = { .min = 0, .max = 0},
.imm = { .min = 0, .max = UINT32_MAX},
.eval = eval_ld_mbuf,
},
[(BPF_LD | BPF_IND | BPF_H)] = {
.mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
.off = { .min = 0, .max = 0},
.imm = { .min = 0, .max = UINT32_MAX},
.eval = eval_ld_mbuf,
},
[(BPF_LD | BPF_IND | BPF_W)] = {
.mask = {. dreg = ZERO_REG, .sreg = IND_SRC_REGS},
.off = { .min = 0, .max = 0},
.imm = { .min = 0, .max = UINT32_MAX},
.eval = eval_ld_mbuf,
},
/* store REG instructions */
[(BPF_STX | BPF_MEM | BPF_B)] = {
.mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},