2022-10-12 01:36:20 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/*
|
|
|
|
* BPF JIT compiler for LoongArch
|
|
|
|
*
|
|
|
|
* Copyright (C) 2022 Loongson Technology Corporation Limited
|
|
|
|
*/
|
|
|
|
#include "bpf_jit.h"
|
|
|
|
|
|
|
|
#define REG_TCC LOONGARCH_GPR_A6
|
|
|
|
#define TCC_SAVED LOONGARCH_GPR_S5
|
|
|
|
|
|
|
|
#define SAVE_RA BIT(0)
|
|
|
|
#define SAVE_TCC BIT(1)
|
|
|
|
|
|
|
|
static const int regmap[] = {
|
|
|
|
/* return value from in-kernel function, and exit value for eBPF program */
|
|
|
|
[BPF_REG_0] = LOONGARCH_GPR_A5,
|
|
|
|
/* arguments from eBPF program to in-kernel function */
|
|
|
|
[BPF_REG_1] = LOONGARCH_GPR_A0,
|
|
|
|
[BPF_REG_2] = LOONGARCH_GPR_A1,
|
|
|
|
[BPF_REG_3] = LOONGARCH_GPR_A2,
|
|
|
|
[BPF_REG_4] = LOONGARCH_GPR_A3,
|
|
|
|
[BPF_REG_5] = LOONGARCH_GPR_A4,
|
|
|
|
/* callee saved registers that in-kernel function will preserve */
|
|
|
|
[BPF_REG_6] = LOONGARCH_GPR_S0,
|
|
|
|
[BPF_REG_7] = LOONGARCH_GPR_S1,
|
|
|
|
[BPF_REG_8] = LOONGARCH_GPR_S2,
|
|
|
|
[BPF_REG_9] = LOONGARCH_GPR_S3,
|
|
|
|
/* read-only frame pointer to access stack */
|
|
|
|
[BPF_REG_FP] = LOONGARCH_GPR_S4,
|
|
|
|
/* temporary register for blinding constants */
|
|
|
|
[BPF_REG_AX] = LOONGARCH_GPR_T0,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void mark_call(struct jit_ctx *ctx)
|
|
|
|
{
|
|
|
|
ctx->flags |= SAVE_RA;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mark_tail_call(struct jit_ctx *ctx)
|
|
|
|
{
|
|
|
|
ctx->flags |= SAVE_TCC;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool seen_call(struct jit_ctx *ctx)
|
|
|
|
{
|
|
|
|
return (ctx->flags & SAVE_RA);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool seen_tail_call(struct jit_ctx *ctx)
|
|
|
|
{
|
|
|
|
return (ctx->flags & SAVE_TCC);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u8 tail_call_reg(struct jit_ctx *ctx)
|
|
|
|
{
|
|
|
|
if (seen_call(ctx))
|
|
|
|
return TCC_SAVED;
|
|
|
|
|
|
|
|
return REG_TCC;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* eBPF prog stack layout:
|
|
|
|
*
|
|
|
|
* high
|
|
|
|
* original $sp ------------> +-------------------------+ <--LOONGARCH_GPR_FP
|
|
|
|
* | $ra |
|
|
|
|
* +-------------------------+
|
|
|
|
* | $fp |
|
|
|
|
* +-------------------------+
|
|
|
|
* | $s0 |
|
|
|
|
* +-------------------------+
|
|
|
|
* | $s1 |
|
|
|
|
* +-------------------------+
|
|
|
|
* | $s2 |
|
|
|
|
* +-------------------------+
|
|
|
|
* | $s3 |
|
|
|
|
* +-------------------------+
|
|
|
|
* | $s4 |
|
|
|
|
* +-------------------------+
|
|
|
|
* | $s5 |
|
|
|
|
* +-------------------------+ <--BPF_REG_FP
|
|
|
|
* | prog->aux->stack_depth |
|
|
|
|
* | (optional) |
|
|
|
|
* current $sp -------------> +-------------------------+
|
|
|
|
* low
|
|
|
|
*/
|
|
|
|
static void build_prologue(struct jit_ctx *ctx)
|
|
|
|
{
|
|
|
|
int stack_adjust = 0, store_offset, bpf_stack_adjust;
|
|
|
|
|
|
|
|
bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
|
|
|
|
|
|
|
|
/* To store ra, fp, s0, s1, s2, s3, s4 and s5. */
|
|
|
|
stack_adjust += sizeof(long) * 8;
|
|
|
|
|
|
|
|
stack_adjust = round_up(stack_adjust, 16);
|
|
|
|
stack_adjust += bpf_stack_adjust;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First instruction initializes the tail call count (TCC).
|
|
|
|
* On tail call we skip this instruction, and the TCC is
|
|
|
|
* passed in REG_TCC from the caller.
|
|
|
|
*/
|
|
|
|
emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT);
|
|
|
|
|
|
|
|
emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_adjust);
|
|
|
|
|
|
|
|
store_offset = stack_adjust - sizeof(long);
|
|
|
|
emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, store_offset);
|
|
|
|
|
|
|
|
store_offset -= sizeof(long);
|
|
|
|
emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, store_offset);
|
|
|
|
|
|
|
|
store_offset -= sizeof(long);
|
|
|
|
emit_insn(ctx, std, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, store_offset);
|
|
|
|
|
|
|
|
store_offset -= sizeof(long);
|
|
|
|
emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, store_offset);
|
|
|
|
|
|
|
|
store_offset -= sizeof(long);
|
|
|
|
emit_insn(ctx, std, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, store_offset);
|
|
|
|
|
|
|
|
store_offset -= sizeof(long);
|
|
|
|
emit_insn(ctx, std, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, store_offset);
|
|
|
|
|
|
|
|
store_offset -= sizeof(long);
|
|
|
|
emit_insn(ctx, std, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, store_offset);
|
|
|
|
|
|
|
|
store_offset -= sizeof(long);
|
|
|
|
emit_insn(ctx, std, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, store_offset);
|
|
|
|
|
|
|
|
emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_adjust);
|
|
|
|
|
|
|
|
if (bpf_stack_adjust)
|
|
|
|
emit_insn(ctx, addid, regmap[BPF_REG_FP], LOONGARCH_GPR_SP, bpf_stack_adjust);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Program contains calls and tail calls, so REG_TCC need
|
|
|
|
* to be saved across calls.
|
|
|
|
*/
|
|
|
|
if (seen_tail_call(ctx) && seen_call(ctx))
|
|
|
|
move_reg(ctx, TCC_SAVED, REG_TCC);
|
|
|
|
|
|
|
|
ctx->stack_size = stack_adjust;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
|
|
|
|
{
|
|
|
|
int stack_adjust = ctx->stack_size;
|
|
|
|
int load_offset;
|
|
|
|
|
|
|
|
load_offset = stack_adjust - sizeof(long);
|
|
|
|
emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, load_offset);
|
|
|
|
|
|
|
|
load_offset -= sizeof(long);
|
|
|
|
emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, load_offset);
|
|
|
|
|
|
|
|
load_offset -= sizeof(long);
|
|
|
|
emit_insn(ctx, ldd, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, load_offset);
|
|
|
|
|
|
|
|
load_offset -= sizeof(long);
|
|
|
|
emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, load_offset);
|
|
|
|
|
|
|
|
load_offset -= sizeof(long);
|
|
|
|
emit_insn(ctx, ldd, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, load_offset);
|
|
|
|
|
|
|
|
load_offset -= sizeof(long);
|
|
|
|
emit_insn(ctx, ldd, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, load_offset);
|
|
|
|
|
|
|
|
load_offset -= sizeof(long);
|
|
|
|
emit_insn(ctx, ldd, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, load_offset);
|
|
|
|
|
|
|
|
load_offset -= sizeof(long);
|
|
|
|
emit_insn(ctx, ldd, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, load_offset);
|
|
|
|
|
|
|
|
emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_adjust);
|
|
|
|
|
|
|
|
if (!is_tail_call) {
|
|
|
|
/* Set return value */
|
|
|
|
move_reg(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0]);
|
|
|
|
/* Return to the caller */
|
|
|
|
emit_insn(ctx, jirl, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Call the next bpf prog and skip the first instruction
|
|
|
|
* of TCC initialization.
|
|
|
|
*/
|
|
|
|
emit_insn(ctx, jirl, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void build_epilogue(struct jit_ctx *ctx)
|
|
|
|
{
|
|
|
|
__build_epilogue(ctx, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool bpf_jit_supports_kfunc_call(void)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
LoongArch: BPF: Support 64-bit pointers to kfuncs
Like commit 1cf3bfc60f9836f ("bpf: Support 64-bit pointers to kfuncs")
for s390x, add support for 64-bit pointers to kfuncs for LoongArch.
Since the infrastructure is already implemented in BPF core, the only
thing need to be done is to override bpf_jit_supports_far_kfunc_call().
Before this change, several test_verifier tests failed:
# ./test_verifier | grep # | grep FAIL
#119/p calls: invalid kfunc call: ptr_to_mem to struct with non-scalar FAIL
#120/p calls: invalid kfunc call: ptr_to_mem to struct with nesting depth > 4 FAIL
#121/p calls: invalid kfunc call: ptr_to_mem to struct with FAM FAIL
#122/p calls: invalid kfunc call: reg->type != PTR_TO_CTX FAIL
#123/p calls: invalid kfunc call: void * not allowed in func proto without mem size arg FAIL
#124/p calls: trigger reg2btf_ids[reg->type] for reg->type > __BPF_REG_TYPE_MAX FAIL
#125/p calls: invalid kfunc call: reg->off must be zero when passed to release kfunc FAIL
#126/p calls: invalid kfunc call: don't match first member type when passed to release kfunc FAIL
#127/p calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset FAIL
#128/p calls: invalid kfunc call: PTR_TO_BTF_ID with variable offset FAIL
#129/p calls: invalid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID FAIL
#130/p calls: valid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID FAIL
#486/p map_kptr: ref: reference state created and released on xchg FAIL
This is because the kfuncs in the loaded module are far away from
__bpf_call_base:
ffff800002009440 t bpf_kfunc_call_test_fail1 [bpf_testmod]
9000000002e128d8 T __bpf_call_base
The offset relative to __bpf_call_base does NOT fit in s32, which breaks
the assumption in BPF core. Enable bpf_jit_supports_far_kfunc_call() lifts
this limit.
Note that to reproduce the above result, tools/testing/selftests/bpf/config
should be applied, and run the test with JIT enabled, unpriv BPF enabled.
With this change, the test_verifier tests now all passed:
# ./test_verifier
...
Summary: 777 PASSED, 0 SKIPPED, 0 FAILED
Tested-by: Tiezhu Yang <yangtiezhu@loongson.cn>
Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-01-16 21:43:13 -07:00
|
|
|
bool bpf_jit_supports_far_kfunc_call(void)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-10-12 01:36:20 -07:00
|
|
|
/* initialized on the first pass of build_body() */
|
|
|
|
static int out_offset = -1;
|
|
|
|
static int emit_bpf_tail_call(struct jit_ctx *ctx)
|
|
|
|
{
|
|
|
|
int off;
|
|
|
|
u8 tcc = tail_call_reg(ctx);
|
|
|
|
u8 a1 = LOONGARCH_GPR_A1;
|
|
|
|
u8 a2 = LOONGARCH_GPR_A2;
|
|
|
|
u8 t1 = LOONGARCH_GPR_T1;
|
|
|
|
u8 t2 = LOONGARCH_GPR_T2;
|
|
|
|
u8 t3 = LOONGARCH_GPR_T3;
|
|
|
|
const int idx0 = ctx->idx;
|
|
|
|
|
|
|
|
#define cur_offset (ctx->idx - idx0)
|
|
|
|
#define jmp_offset (out_offset - (cur_offset))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* a0: &ctx
|
|
|
|
* a1: &array
|
|
|
|
* a2: index
|
|
|
|
*
|
|
|
|
* if (index >= array->map.max_entries)
|
|
|
|
* goto out;
|
|
|
|
*/
|
|
|
|
off = offsetof(struct bpf_array, map.max_entries);
|
|
|
|
emit_insn(ctx, ldwu, t1, a1, off);
|
|
|
|
/* bgeu $a2, $t1, jmp_offset */
|
|
|
|
if (emit_tailcall_jmp(ctx, BPF_JGE, a2, t1, jmp_offset) < 0)
|
|
|
|
goto toofar;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if (--TCC < 0)
|
|
|
|
* goto out;
|
|
|
|
*/
|
|
|
|
emit_insn(ctx, addid, REG_TCC, tcc, -1);
|
|
|
|
if (emit_tailcall_jmp(ctx, BPF_JSLT, REG_TCC, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
|
|
|
|
goto toofar;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* prog = array->ptrs[index];
|
|
|
|
* if (!prog)
|
|
|
|
* goto out;
|
|
|
|
*/
|
|
|
|
emit_insn(ctx, alsld, t2, a2, a1, 2);
|
|
|
|
off = offsetof(struct bpf_array, ptrs);
|
|
|
|
emit_insn(ctx, ldd, t2, t2, off);
|
|
|
|
/* beq $t2, $zero, jmp_offset */
|
|
|
|
if (emit_tailcall_jmp(ctx, BPF_JEQ, t2, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
|
|
|
|
goto toofar;
|
|
|
|
|
|
|
|
/* goto *(prog->bpf_func + 4); */
|
|
|
|
off = offsetof(struct bpf_prog, bpf_func);
|
|
|
|
emit_insn(ctx, ldd, t3, t2, off);
|
|
|
|
__build_epilogue(ctx, true);
|
|
|
|
|
|
|
|
/* out: */
|
|
|
|
if (out_offset == -1)
|
|
|
|
out_offset = cur_offset;
|
|
|
|
if (cur_offset != out_offset) {
|
|
|
|
pr_err_once("tail_call out_offset = %d, expected %d!\n",
|
|
|
|
cur_offset, out_offset);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
toofar:
|
|
|
|
pr_info_once("tail_call: jump too far\n");
|
|
|
|
return -1;
|
|
|
|
#undef cur_offset
|
|
|
|
#undef jmp_offset
|
|
|
|
}
|
|
|
|
|
|
|
|
static void emit_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
|
|
|
{
|
|
|
|
const u8 t1 = LOONGARCH_GPR_T1;
|
|
|
|
const u8 t2 = LOONGARCH_GPR_T2;
|
|
|
|
const u8 t3 = LOONGARCH_GPR_T3;
|
2022-10-29 01:29:31 -07:00
|
|
|
const u8 r0 = regmap[BPF_REG_0];
|
2022-10-12 01:36:20 -07:00
|
|
|
const u8 src = regmap[insn->src_reg];
|
|
|
|
const u8 dst = regmap[insn->dst_reg];
|
|
|
|
const s16 off = insn->off;
|
|
|
|
const s32 imm = insn->imm;
|
|
|
|
const bool isdw = BPF_SIZE(insn->code) == BPF_DW;
|
|
|
|
|
|
|
|
move_imm(ctx, t1, off, false);
|
|
|
|
emit_insn(ctx, addd, t1, dst, t1);
|
|
|
|
move_reg(ctx, t3, src);
|
|
|
|
|
|
|
|
switch (imm) {
|
|
|
|
/* lock *(size *)(dst + off) <op>= src */
|
|
|
|
case BPF_ADD:
|
|
|
|
if (isdw)
|
|
|
|
emit_insn(ctx, amaddd, t2, t1, src);
|
|
|
|
else
|
|
|
|
emit_insn(ctx, amaddw, t2, t1, src);
|
|
|
|
break;
|
|
|
|
case BPF_AND:
|
|
|
|
if (isdw)
|
|
|
|
emit_insn(ctx, amandd, t2, t1, src);
|
|
|
|
else
|
|
|
|
emit_insn(ctx, amandw, t2, t1, src);
|
|
|
|
break;
|
|
|
|
case BPF_OR:
|
|
|
|
if (isdw)
|
|
|
|
emit_insn(ctx, amord, t2, t1, src);
|
|
|
|
else
|
|
|
|
emit_insn(ctx, amorw, t2, t1, src);
|
|
|
|
break;
|
|
|
|
case BPF_XOR:
|
|
|
|
if (isdw)
|
|
|
|
emit_insn(ctx, amxord, t2, t1, src);
|
|
|
|
else
|
|
|
|
emit_insn(ctx, amxorw, t2, t1, src);
|
|
|
|
break;
|
|
|
|
/* src = atomic_fetch_<op>(dst + off, src) */
|
|
|
|
case BPF_ADD | BPF_FETCH:
|
|
|
|
if (isdw) {
|
|
|
|
emit_insn(ctx, amaddd, src, t1, t3);
|
|
|
|
} else {
|
|
|
|
emit_insn(ctx, amaddw, src, t1, t3);
|
|
|
|
emit_zext_32(ctx, src, true);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_AND | BPF_FETCH:
|
|
|
|
if (isdw) {
|
|
|
|
emit_insn(ctx, amandd, src, t1, t3);
|
|
|
|
} else {
|
|
|
|
emit_insn(ctx, amandw, src, t1, t3);
|
|
|
|
emit_zext_32(ctx, src, true);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_OR | BPF_FETCH:
|
|
|
|
if (isdw) {
|
|
|
|
emit_insn(ctx, amord, src, t1, t3);
|
|
|
|
} else {
|
|
|
|
emit_insn(ctx, amorw, src, t1, t3);
|
|
|
|
emit_zext_32(ctx, src, true);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_XOR | BPF_FETCH:
|
|
|
|
if (isdw) {
|
|
|
|
emit_insn(ctx, amxord, src, t1, t3);
|
|
|
|
} else {
|
|
|
|
emit_insn(ctx, amxorw, src, t1, t3);
|
|
|
|
emit_zext_32(ctx, src, true);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
/* src = atomic_xchg(dst + off, src); */
|
|
|
|
case BPF_XCHG:
|
|
|
|
if (isdw) {
|
|
|
|
emit_insn(ctx, amswapd, src, t1, t3);
|
|
|
|
} else {
|
|
|
|
emit_insn(ctx, amswapw, src, t1, t3);
|
|
|
|
emit_zext_32(ctx, src, true);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
/* r0 = atomic_cmpxchg(dst + off, r0, src); */
|
|
|
|
case BPF_CMPXCHG:
|
|
|
|
move_reg(ctx, t2, r0);
|
|
|
|
if (isdw) {
|
|
|
|
emit_insn(ctx, lld, r0, t1, 0);
|
|
|
|
emit_insn(ctx, bne, t2, r0, 4);
|
|
|
|
move_reg(ctx, t3, src);
|
|
|
|
emit_insn(ctx, scd, t3, t1, 0);
|
|
|
|
emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -4);
|
|
|
|
} else {
|
|
|
|
emit_insn(ctx, llw, r0, t1, 0);
|
|
|
|
emit_zext_32(ctx, t2, true);
|
|
|
|
emit_zext_32(ctx, r0, true);
|
|
|
|
emit_insn(ctx, bne, t2, r0, 4);
|
|
|
|
move_reg(ctx, t3, src);
|
|
|
|
emit_insn(ctx, scw, t3, t1, 0);
|
|
|
|
emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -6);
|
|
|
|
emit_zext_32(ctx, r0, true);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_signed_bpf_cond(u8 cond)
|
|
|
|
{
|
|
|
|
return cond == BPF_JSGT || cond == BPF_JSLT ||
|
|
|
|
cond == BPF_JSGE || cond == BPF_JSLE;
|
|
|
|
}
|
|
|
|
|
LoongArch: BPF: Add BPF exception tables
Inspired by commit 800834285361("bpf, arm64: Add BPF exception tables"),
do similar to LoongArch to add BPF exception tables.
When a tracing BPF program attempts to read memory without using the
bpf_probe_read() helper, the verifier marks the load instruction with
the BPF_PROBE_MEM flag. Since the LoongArch JIT does not currently
recognize this flag it falls back to the interpreter.
Add support for BPF_PROBE_MEM, by appending an exception table to the
BPF program. If the load instruction causes a data abort, the fixup
infrastructure finds the exception table and fixes up the fault, by
clearing the destination register and jumping over the faulting
instruction.
To keep the compact exception table entry format, inspect the pc in
fixup_exception(). A more generic solution would add a "handler" field
to the table entry, like on x86, s390 and arm64, etc.
Signed-off-by: Youling Tang <tangyouling@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-12-10 07:39:59 -07:00
|
|
|
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
|
|
|
|
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
|
|
|
|
|
|
|
|
bool ex_handler_bpf(const struct exception_table_entry *ex,
|
|
|
|
struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
|
|
|
|
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
|
|
|
|
|
|
|
|
regs->regs[dst_reg] = 0;
|
|
|
|
regs->csr_era = (unsigned long)&ex->fixup - offset;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For accesses to BTF pointers, add an entry to the exception table */
|
|
|
|
static int add_exception_handler(const struct bpf_insn *insn,
|
|
|
|
struct jit_ctx *ctx,
|
|
|
|
int dst_reg)
|
|
|
|
{
|
|
|
|
unsigned long pc;
|
|
|
|
off_t offset;
|
|
|
|
struct exception_table_entry *ex;
|
|
|
|
|
2023-11-07 23:12:16 -07:00
|
|
|
if (!ctx->image || !ctx->prog->aux->extable)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
|
|
|
|
BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
|
LoongArch: BPF: Add BPF exception tables
Inspired by commit 800834285361("bpf, arm64: Add BPF exception tables"),
do similar to LoongArch to add BPF exception tables.
When a tracing BPF program attempts to read memory without using the
bpf_probe_read() helper, the verifier marks the load instruction with
the BPF_PROBE_MEM flag. Since the LoongArch JIT does not currently
recognize this flag it falls back to the interpreter.
Add support for BPF_PROBE_MEM, by appending an exception table to the
BPF program. If the load instruction causes a data abort, the fixup
infrastructure finds the exception table and fixes up the fault, by
clearing the destination register and jumping over the faulting
instruction.
To keep the compact exception table entry format, inspect the pc in
fixup_exception(). A more generic solution would add a "handler" field
to the table entry, like on x86, s390 and arm64, etc.
Signed-off-by: Youling Tang <tangyouling@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-12-10 07:39:59 -07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(ctx->num_exentries >= ctx->prog->aux->num_exentries))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ex = &ctx->prog->aux->extable[ctx->num_exentries];
|
|
|
|
pc = (unsigned long)&ctx->image[ctx->idx - 1];
|
|
|
|
|
|
|
|
offset = pc - (long)&ex->insn;
|
|
|
|
if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
|
|
|
|
return -ERANGE;
|
|
|
|
|
|
|
|
ex->insn = offset;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Since the extable follows the program, the fixup offset is always
|
|
|
|
* negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
|
|
|
|
* to keep things simple, and put the destination register in the upper
|
|
|
|
* bits. We don't need to worry about buildtime or runtime sort
|
|
|
|
* modifying the upper bits because the table is already sorted, and
|
|
|
|
* isn't part of the main exception table.
|
|
|
|
*/
|
|
|
|
offset = (long)&ex->fixup - (pc + LOONGARCH_INSN_SIZE);
|
|
|
|
if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
|
|
|
|
return -ERANGE;
|
|
|
|
|
|
|
|
ex->type = EX_TYPE_BPF;
|
|
|
|
ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) | FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
|
|
|
|
|
|
|
|
ctx->num_exentries++;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-10-12 01:36:20 -07:00
|
|
|
static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool extra_pass)
|
|
|
|
{
|
2022-10-29 01:29:31 -07:00
|
|
|
u8 tm = -1;
|
|
|
|
u64 func_addr;
|
2023-11-07 23:12:16 -07:00
|
|
|
bool func_addr_fixed, sign_extend;
|
2022-10-29 01:29:31 -07:00
|
|
|
int i = insn - ctx->prog->insnsi;
|
|
|
|
int ret, jmp_offset;
|
2022-10-12 01:36:20 -07:00
|
|
|
const u8 code = insn->code;
|
|
|
|
const u8 cond = BPF_OP(code);
|
|
|
|
const u8 t1 = LOONGARCH_GPR_T1;
|
|
|
|
const u8 t2 = LOONGARCH_GPR_T2;
|
|
|
|
const u8 src = regmap[insn->src_reg];
|
|
|
|
const u8 dst = regmap[insn->dst_reg];
|
|
|
|
const s16 off = insn->off;
|
|
|
|
const s32 imm = insn->imm;
|
2022-10-29 01:29:31 -07:00
|
|
|
const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32;
|
2022-10-12 01:36:20 -07:00
|
|
|
|
|
|
|
switch (code) {
|
|
|
|
/* dst = src */
|
|
|
|
case BPF_ALU | BPF_MOV | BPF_X:
|
|
|
|
case BPF_ALU64 | BPF_MOV | BPF_X:
|
2023-11-07 23:12:16 -07:00
|
|
|
switch (off) {
|
|
|
|
case 0:
|
|
|
|
move_reg(ctx, dst, src);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
move_reg(ctx, t1, src);
|
|
|
|
emit_insn(ctx, extwb, dst, t1);
|
2023-12-09 00:49:16 -07:00
|
|
|
emit_zext_32(ctx, dst, is32);
|
2023-11-07 23:12:16 -07:00
|
|
|
break;
|
|
|
|
case 16:
|
|
|
|
move_reg(ctx, t1, src);
|
|
|
|
emit_insn(ctx, extwh, dst, t1);
|
2023-12-09 00:49:16 -07:00
|
|
|
emit_zext_32(ctx, dst, is32);
|
2023-11-07 23:12:16 -07:00
|
|
|
break;
|
|
|
|
case 32:
|
|
|
|
emit_insn(ctx, addw, dst, src, LOONGARCH_GPR_ZERO);
|
|
|
|
break;
|
|
|
|
}
|
2022-10-12 01:36:20 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = imm */
|
|
|
|
case BPF_ALU | BPF_MOV | BPF_K:
|
|
|
|
case BPF_ALU64 | BPF_MOV | BPF_K:
|
|
|
|
move_imm(ctx, dst, imm, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst + src */
|
|
|
|
case BPF_ALU | BPF_ADD | BPF_X:
|
|
|
|
case BPF_ALU64 | BPF_ADD | BPF_X:
|
|
|
|
emit_insn(ctx, addd, dst, dst, src);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst + imm */
|
|
|
|
case BPF_ALU | BPF_ADD | BPF_K:
|
|
|
|
case BPF_ALU64 | BPF_ADD | BPF_K:
|
|
|
|
if (is_signed_imm12(imm)) {
|
|
|
|
emit_insn(ctx, addid, dst, dst, imm);
|
|
|
|
} else {
|
|
|
|
move_imm(ctx, t1, imm, is32);
|
|
|
|
emit_insn(ctx, addd, dst, dst, t1);
|
|
|
|
}
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst - src */
|
|
|
|
case BPF_ALU | BPF_SUB | BPF_X:
|
|
|
|
case BPF_ALU64 | BPF_SUB | BPF_X:
|
|
|
|
emit_insn(ctx, subd, dst, dst, src);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst - imm */
|
|
|
|
case BPF_ALU | BPF_SUB | BPF_K:
|
|
|
|
case BPF_ALU64 | BPF_SUB | BPF_K:
|
|
|
|
if (is_signed_imm12(-imm)) {
|
|
|
|
emit_insn(ctx, addid, dst, dst, -imm);
|
|
|
|
} else {
|
|
|
|
move_imm(ctx, t1, imm, is32);
|
|
|
|
emit_insn(ctx, subd, dst, dst, t1);
|
|
|
|
}
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst * src */
|
|
|
|
case BPF_ALU | BPF_MUL | BPF_X:
|
|
|
|
case BPF_ALU64 | BPF_MUL | BPF_X:
|
|
|
|
emit_insn(ctx, muld, dst, dst, src);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst * imm */
|
|
|
|
case BPF_ALU | BPF_MUL | BPF_K:
|
|
|
|
case BPF_ALU64 | BPF_MUL | BPF_K:
|
|
|
|
move_imm(ctx, t1, imm, is32);
|
|
|
|
emit_insn(ctx, muld, dst, dst, t1);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst / src */
|
|
|
|
case BPF_ALU | BPF_DIV | BPF_X:
|
|
|
|
case BPF_ALU64 | BPF_DIV | BPF_X:
|
2023-11-07 23:12:21 -07:00
|
|
|
if (!off) {
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
move_reg(ctx, t1, src);
|
|
|
|
emit_zext_32(ctx, t1, is32);
|
|
|
|
emit_insn(ctx, divdu, dst, dst, t1);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
} else {
|
|
|
|
emit_sext_32(ctx, dst, is32);
|
|
|
|
move_reg(ctx, t1, src);
|
|
|
|
emit_sext_32(ctx, t1, is32);
|
|
|
|
emit_insn(ctx, divd, dst, dst, t1);
|
|
|
|
emit_sext_32(ctx, dst, is32);
|
|
|
|
}
|
2022-10-12 01:36:20 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst / imm */
|
|
|
|
case BPF_ALU | BPF_DIV | BPF_K:
|
|
|
|
case BPF_ALU64 | BPF_DIV | BPF_K:
|
2023-11-07 23:12:21 -07:00
|
|
|
if (!off) {
|
|
|
|
move_imm(ctx, t1, imm, is32);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
emit_insn(ctx, divdu, dst, dst, t1);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
} else {
|
|
|
|
move_imm(ctx, t1, imm, false);
|
|
|
|
emit_sext_32(ctx, t1, is32);
|
|
|
|
emit_sext_32(ctx, dst, is32);
|
|
|
|
emit_insn(ctx, divd, dst, dst, t1);
|
|
|
|
emit_sext_32(ctx, dst, is32);
|
|
|
|
}
|
2022-10-12 01:36:20 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst % src */
|
|
|
|
case BPF_ALU | BPF_MOD | BPF_X:
|
|
|
|
case BPF_ALU64 | BPF_MOD | BPF_X:
|
2023-11-07 23:12:21 -07:00
|
|
|
if (!off) {
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
move_reg(ctx, t1, src);
|
|
|
|
emit_zext_32(ctx, t1, is32);
|
|
|
|
emit_insn(ctx, moddu, dst, dst, t1);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
} else {
|
|
|
|
emit_sext_32(ctx, dst, is32);
|
|
|
|
move_reg(ctx, t1, src);
|
|
|
|
emit_sext_32(ctx, t1, is32);
|
|
|
|
emit_insn(ctx, modd, dst, dst, t1);
|
|
|
|
emit_sext_32(ctx, dst, is32);
|
|
|
|
}
|
2022-10-12 01:36:20 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst % imm */
|
|
|
|
case BPF_ALU | BPF_MOD | BPF_K:
|
|
|
|
case BPF_ALU64 | BPF_MOD | BPF_K:
|
2023-11-07 23:12:21 -07:00
|
|
|
if (!off) {
|
|
|
|
move_imm(ctx, t1, imm, is32);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
emit_insn(ctx, moddu, dst, dst, t1);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
} else {
|
|
|
|
move_imm(ctx, t1, imm, false);
|
|
|
|
emit_sext_32(ctx, t1, is32);
|
|
|
|
emit_sext_32(ctx, dst, is32);
|
|
|
|
emit_insn(ctx, modd, dst, dst, t1);
|
|
|
|
emit_sext_32(ctx, dst, is32);
|
|
|
|
}
|
2022-10-12 01:36:20 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = -dst */
|
|
|
|
case BPF_ALU | BPF_NEG:
|
|
|
|
case BPF_ALU64 | BPF_NEG:
|
|
|
|
move_imm(ctx, t1, imm, is32);
|
|
|
|
emit_insn(ctx, subd, dst, LOONGARCH_GPR_ZERO, dst);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst & src */
|
|
|
|
case BPF_ALU | BPF_AND | BPF_X:
|
|
|
|
case BPF_ALU64 | BPF_AND | BPF_X:
|
|
|
|
emit_insn(ctx, and, dst, dst, src);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst & imm */
|
|
|
|
case BPF_ALU | BPF_AND | BPF_K:
|
|
|
|
case BPF_ALU64 | BPF_AND | BPF_K:
|
|
|
|
if (is_unsigned_imm12(imm)) {
|
|
|
|
emit_insn(ctx, andi, dst, dst, imm);
|
|
|
|
} else {
|
|
|
|
move_imm(ctx, t1, imm, is32);
|
|
|
|
emit_insn(ctx, and, dst, dst, t1);
|
|
|
|
}
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst | src */
|
|
|
|
case BPF_ALU | BPF_OR | BPF_X:
|
|
|
|
case BPF_ALU64 | BPF_OR | BPF_X:
|
|
|
|
emit_insn(ctx, or, dst, dst, src);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst | imm */
|
|
|
|
case BPF_ALU | BPF_OR | BPF_K:
|
|
|
|
case BPF_ALU64 | BPF_OR | BPF_K:
|
|
|
|
if (is_unsigned_imm12(imm)) {
|
|
|
|
emit_insn(ctx, ori, dst, dst, imm);
|
|
|
|
} else {
|
|
|
|
move_imm(ctx, t1, imm, is32);
|
|
|
|
emit_insn(ctx, or, dst, dst, t1);
|
|
|
|
}
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst ^ src */
|
|
|
|
case BPF_ALU | BPF_XOR | BPF_X:
|
|
|
|
case BPF_ALU64 | BPF_XOR | BPF_X:
|
|
|
|
emit_insn(ctx, xor, dst, dst, src);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst ^ imm */
|
|
|
|
case BPF_ALU | BPF_XOR | BPF_K:
|
|
|
|
case BPF_ALU64 | BPF_XOR | BPF_K:
|
|
|
|
if (is_unsigned_imm12(imm)) {
|
|
|
|
emit_insn(ctx, xori, dst, dst, imm);
|
|
|
|
} else {
|
|
|
|
move_imm(ctx, t1, imm, is32);
|
|
|
|
emit_insn(ctx, xor, dst, dst, t1);
|
|
|
|
}
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst << src (logical) */
|
|
|
|
case BPF_ALU | BPF_LSH | BPF_X:
|
|
|
|
emit_insn(ctx, sllw, dst, dst, src);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BPF_ALU64 | BPF_LSH | BPF_X:
|
|
|
|
emit_insn(ctx, slld, dst, dst, src);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst << imm (logical) */
|
|
|
|
case BPF_ALU | BPF_LSH | BPF_K:
|
|
|
|
emit_insn(ctx, slliw, dst, dst, imm);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BPF_ALU64 | BPF_LSH | BPF_K:
|
|
|
|
emit_insn(ctx, sllid, dst, dst, imm);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst >> src (logical) */
|
|
|
|
case BPF_ALU | BPF_RSH | BPF_X:
|
|
|
|
emit_insn(ctx, srlw, dst, dst, src);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BPF_ALU64 | BPF_RSH | BPF_X:
|
|
|
|
emit_insn(ctx, srld, dst, dst, src);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst >> imm (logical) */
|
|
|
|
case BPF_ALU | BPF_RSH | BPF_K:
|
|
|
|
emit_insn(ctx, srliw, dst, dst, imm);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BPF_ALU64 | BPF_RSH | BPF_K:
|
|
|
|
emit_insn(ctx, srlid, dst, dst, imm);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst >> src (arithmetic) */
|
|
|
|
case BPF_ALU | BPF_ARSH | BPF_X:
|
|
|
|
emit_insn(ctx, sraw, dst, dst, src);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BPF_ALU64 | BPF_ARSH | BPF_X:
|
|
|
|
emit_insn(ctx, srad, dst, dst, src);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = dst >> imm (arithmetic) */
|
|
|
|
case BPF_ALU | BPF_ARSH | BPF_K:
|
|
|
|
emit_insn(ctx, sraiw, dst, dst, imm);
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BPF_ALU64 | BPF_ARSH | BPF_K:
|
|
|
|
emit_insn(ctx, sraid, dst, dst, imm);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = BSWAP##imm(dst) */
|
|
|
|
case BPF_ALU | BPF_END | BPF_FROM_LE:
|
|
|
|
switch (imm) {
|
|
|
|
case 16:
|
|
|
|
/* zero-extend 16 bits into 64 bits */
|
|
|
|
emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
|
|
|
|
break;
|
|
|
|
case 32:
|
|
|
|
/* zero-extend 32 bits into 64 bits */
|
|
|
|
emit_zext_32(ctx, dst, is32);
|
|
|
|
break;
|
|
|
|
case 64:
|
|
|
|
/* do nothing */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BPF_ALU | BPF_END | BPF_FROM_BE:
|
2023-11-07 23:12:16 -07:00
|
|
|
case BPF_ALU64 | BPF_END | BPF_FROM_LE:
|
2022-10-12 01:36:20 -07:00
|
|
|
switch (imm) {
|
|
|
|
case 16:
|
|
|
|
emit_insn(ctx, revb2h, dst, dst);
|
|
|
|
/* zero-extend 16 bits into 64 bits */
|
|
|
|
emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
|
|
|
|
break;
|
|
|
|
case 32:
|
|
|
|
emit_insn(ctx, revb2w, dst, dst);
|
2023-12-09 00:49:16 -07:00
|
|
|
/* clear the upper 32 bits */
|
|
|
|
emit_zext_32(ctx, dst, true);
|
2022-10-12 01:36:20 -07:00
|
|
|
break;
|
|
|
|
case 64:
|
|
|
|
emit_insn(ctx, revbd, dst, dst);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* PC += off if dst cond src */
|
|
|
|
case BPF_JMP | BPF_JEQ | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JNE | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JGT | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JGE | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JLT | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JLE | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JSGT | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JSGE | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JSLT | BPF_X:
|
|
|
|
case BPF_JMP | BPF_JSLE | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JEQ | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JNE | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JGT | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JGE | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JLT | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JLE | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JSGT | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JSGE | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JSLT | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JSLE | BPF_X:
|
|
|
|
jmp_offset = bpf2la_offset(i, off, ctx);
|
|
|
|
move_reg(ctx, t1, dst);
|
|
|
|
move_reg(ctx, t2, src);
|
|
|
|
if (is_signed_bpf_cond(BPF_OP(code))) {
|
|
|
|
emit_sext_32(ctx, t1, is32);
|
|
|
|
emit_sext_32(ctx, t2, is32);
|
|
|
|
} else {
|
|
|
|
emit_zext_32(ctx, t1, is32);
|
|
|
|
emit_zext_32(ctx, t2, is32);
|
|
|
|
}
|
|
|
|
if (emit_cond_jmp(ctx, cond, t1, t2, jmp_offset) < 0)
|
|
|
|
goto toofar;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* PC += off if dst cond imm */
|
|
|
|
case BPF_JMP | BPF_JEQ | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JNE | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JGT | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JGE | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JLT | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JLE | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JSGT | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JSGE | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JSLT | BPF_K:
|
|
|
|
case BPF_JMP | BPF_JSLE | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JEQ | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JNE | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JGT | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JGE | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JLT | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JLE | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JSGT | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JSGE | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JSLT | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JSLE | BPF_K:
|
|
|
|
jmp_offset = bpf2la_offset(i, off, ctx);
|
|
|
|
if (imm) {
|
|
|
|
move_imm(ctx, t1, imm, false);
|
2022-10-29 01:29:31 -07:00
|
|
|
tm = t1;
|
2022-10-12 01:36:20 -07:00
|
|
|
} else {
|
|
|
|
/* If imm is 0, simply use zero register. */
|
2022-10-29 01:29:31 -07:00
|
|
|
tm = LOONGARCH_GPR_ZERO;
|
2022-10-12 01:36:20 -07:00
|
|
|
}
|
|
|
|
move_reg(ctx, t2, dst);
|
|
|
|
if (is_signed_bpf_cond(BPF_OP(code))) {
|
2022-10-29 01:29:31 -07:00
|
|
|
emit_sext_32(ctx, tm, is32);
|
2022-10-12 01:36:20 -07:00
|
|
|
emit_sext_32(ctx, t2, is32);
|
|
|
|
} else {
|
2022-10-29 01:29:31 -07:00
|
|
|
emit_zext_32(ctx, tm, is32);
|
2022-10-12 01:36:20 -07:00
|
|
|
emit_zext_32(ctx, t2, is32);
|
|
|
|
}
|
2022-10-29 01:29:31 -07:00
|
|
|
if (emit_cond_jmp(ctx, cond, t2, tm, jmp_offset) < 0)
|
2022-10-12 01:36:20 -07:00
|
|
|
goto toofar;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* PC += off if dst & src */
|
|
|
|
case BPF_JMP | BPF_JSET | BPF_X:
|
|
|
|
case BPF_JMP32 | BPF_JSET | BPF_X:
|
|
|
|
jmp_offset = bpf2la_offset(i, off, ctx);
|
|
|
|
emit_insn(ctx, and, t1, dst, src);
|
|
|
|
emit_zext_32(ctx, t1, is32);
|
|
|
|
if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
|
|
|
|
goto toofar;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* PC += off if dst & imm */
|
|
|
|
case BPF_JMP | BPF_JSET | BPF_K:
|
|
|
|
case BPF_JMP32 | BPF_JSET | BPF_K:
|
|
|
|
jmp_offset = bpf2la_offset(i, off, ctx);
|
|
|
|
move_imm(ctx, t1, imm, is32);
|
|
|
|
emit_insn(ctx, and, t1, dst, t1);
|
|
|
|
emit_zext_32(ctx, t1, is32);
|
|
|
|
if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
|
|
|
|
goto toofar;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* PC += off */
|
|
|
|
case BPF_JMP | BPF_JA:
|
2023-11-07 23:12:16 -07:00
|
|
|
case BPF_JMP32 | BPF_JA:
|
|
|
|
if (BPF_CLASS(code) == BPF_JMP)
|
|
|
|
jmp_offset = bpf2la_offset(i, off, ctx);
|
|
|
|
else
|
|
|
|
jmp_offset = bpf2la_offset(i, imm, ctx);
|
2022-10-12 01:36:20 -07:00
|
|
|
if (emit_uncond_jmp(ctx, jmp_offset) < 0)
|
|
|
|
goto toofar;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* function call */
|
|
|
|
case BPF_JMP | BPF_CALL:
|
|
|
|
mark_call(ctx);
|
|
|
|
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
|
|
|
|
&func_addr, &func_addr_fixed);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2023-02-14 08:26:33 -07:00
|
|
|
move_addr(ctx, t1, func_addr);
|
2022-10-12 01:36:20 -07:00
|
|
|
emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0);
|
|
|
|
move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* tail call */
|
|
|
|
case BPF_JMP | BPF_TAIL_CALL:
|
|
|
|
mark_tail_call(ctx);
|
|
|
|
if (emit_bpf_tail_call(ctx) < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* function return */
|
|
|
|
case BPF_JMP | BPF_EXIT:
|
|
|
|
if (i == ctx->prog->len - 1)
|
|
|
|
break;
|
|
|
|
|
|
|
|
jmp_offset = epilogue_offset(ctx);
|
|
|
|
if (emit_uncond_jmp(ctx, jmp_offset) < 0)
|
|
|
|
goto toofar;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* dst = imm64 */
|
|
|
|
case BPF_LD | BPF_IMM | BPF_DW:
|
LoongArch: BPF: Prevent out-of-bounds memory access
The test_tag test triggers an unhandled page fault:
# ./test_tag
[ 130.640218] CPU 0 Unable to handle kernel paging request at virtual address ffff80001b898004, era == 9000000003137f7c, ra == 9000000003139e70
[ 130.640501] Oops[#3]:
[ 130.640553] CPU: 0 PID: 1326 Comm: test_tag Tainted: G D O 6.7.0-rc4-loong-devel-gb62ab1a397cf #47 61985c1d94084daa2432f771daa45b56b10d8d2a
[ 130.640764] Hardware name: QEMU QEMU Virtual Machine, BIOS unknown 2/2/2022
[ 130.640874] pc 9000000003137f7c ra 9000000003139e70 tp 9000000104cb4000 sp 9000000104cb7a40
[ 130.641001] a0 ffff80001b894000 a1 ffff80001b897ff8 a2 000000006ba210be a3 0000000000000000
[ 130.641128] a4 000000006ba210be a5 00000000000000f1 a6 00000000000000b3 a7 0000000000000000
[ 130.641256] t0 0000000000000000 t1 00000000000007f6 t2 0000000000000000 t3 9000000004091b70
[ 130.641387] t4 000000006ba210be t5 0000000000000004 t6 fffffffffffffff0 t7 90000000040913e0
[ 130.641512] t8 0000000000000005 u0 0000000000000dc0 s9 0000000000000009 s0 9000000104cb7ae0
[ 130.641641] s1 00000000000007f6 s2 0000000000000009 s3 0000000000000095 s4 0000000000000000
[ 130.641771] s5 ffff80001b894000 s6 ffff80001b897fb0 s7 9000000004090c50 s8 0000000000000000
[ 130.641900] ra: 9000000003139e70 build_body+0x1fcc/0x4988
[ 130.642007] ERA: 9000000003137f7c build_body+0xd8/0x4988
[ 130.642112] CRMD: 000000b0 (PLV0 -IE -DA +PG DACF=CC DACM=CC -WE)
[ 130.642261] PRMD: 00000004 (PPLV0 +PIE -PWE)
[ 130.642353] EUEN: 00000003 (+FPE +SXE -ASXE -BTE)
[ 130.642458] ECFG: 00071c1c (LIE=2-4,10-12 VS=7)
[ 130.642554] ESTAT: 00010000 [PIL] (IS= ECode=1 EsubCode=0)
[ 130.642658] BADV: ffff80001b898004
[ 130.642719] PRID: 0014c010 (Loongson-64bit, Loongson-3A5000)
[ 130.642815] Modules linked in: [last unloaded: bpf_testmod(O)]
[ 130.642924] Process test_tag (pid: 1326, threadinfo=00000000f7f4015f, task=000000006499f9fd)
[ 130.643062] Stack : 0000000000000000 9000000003380724 0000000000000000 0000000104cb7be8
[ 130.643213] 0000000000000000 25af8d9b6e600558 9000000106250ea0 9000000104cb7ae0
[ 130.643378] 0000000000000000 0000000000000000 9000000104cb7be8 90000000049f6000
[ 130.643538] 0000000000000090 9000000106250ea0 ffff80001b894000 ffff80001b894000
[ 130.643685] 00007ffffb917790 900000000313ca94 0000000000000000 0000000000000000
[ 130.643831] ffff80001b894000 0000000000000ff7 0000000000000000 9000000100468000
[ 130.643983] 0000000000000000 0000000000000000 0000000000000040 25af8d9b6e600558
[ 130.644131] 0000000000000bb7 ffff80001b894048 0000000000000000 0000000000000000
[ 130.644276] 9000000104cb7be8 90000000049f6000 0000000000000090 9000000104cb7bdc
[ 130.644423] ffff80001b894000 0000000000000000 00007ffffb917790 90000000032acfb0
[ 130.644572] ...
[ 130.644629] Call Trace:
[ 130.644641] [<9000000003137f7c>] build_body+0xd8/0x4988
[ 130.644785] [<900000000313ca94>] bpf_int_jit_compile+0x228/0x4ec
[ 130.644891] [<90000000032acfb0>] bpf_prog_select_runtime+0x158/0x1b0
[ 130.645003] [<90000000032b3504>] bpf_prog_load+0x760/0xb44
[ 130.645089] [<90000000032b6744>] __sys_bpf+0xbb8/0x2588
[ 130.645175] [<90000000032b8388>] sys_bpf+0x20/0x2c
[ 130.645259] [<9000000003f6ab38>] do_syscall+0x7c/0x94
[ 130.645369] [<9000000003121c5c>] handle_syscall+0xbc/0x158
[ 130.645507]
[ 130.645539] Code: 380839f6 380831f9 28412bae <24000ca6> 004081ad 0014cb50 004083e8 02bff34c 58008e91
[ 130.645729]
[ 130.646418] ---[ end trace 0000000000000000 ]---
On my machine, which has CONFIG_PAGE_SIZE_16KB=y, the test failed at
loading a BPF prog with 2039 instructions:
prog = (struct bpf_prog *)ffff80001b894000
insn = (struct bpf_insn *)(prog->insnsi)ffff80001b894048
insn + 2039 = (struct bpf_insn *)ffff80001b898000 <- end of the page
In the build_insn() function, we are trying to access next instruction
unconditionally, i.e. `(insn + 1)->imm`. The address lies in the next
page and can be not owned by the current process, thus an page fault is
inevitable and then segfault.
So, let's access next instruction only under `dst = imm64` context.
With this fix, we have:
# ./test_tag
test_tag: OK (40945 tests)
Fixes: bbfddb904df6f82 ("LoongArch: BPF: Avoid declare variables in switch-case")
Tested-by: Tiezhu Yang <yangtiezhu@loongson.cn>
Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-01-16 21:43:13 -07:00
|
|
|
{
|
|
|
|
const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
|
|
|
|
|
2022-10-12 01:36:20 -07:00
|
|
|
move_imm(ctx, dst, imm64, is32);
|
|
|
|
return 1;
|
LoongArch: BPF: Prevent out-of-bounds memory access
The test_tag test triggers an unhandled page fault:
# ./test_tag
[ 130.640218] CPU 0 Unable to handle kernel paging request at virtual address ffff80001b898004, era == 9000000003137f7c, ra == 9000000003139e70
[ 130.640501] Oops[#3]:
[ 130.640553] CPU: 0 PID: 1326 Comm: test_tag Tainted: G D O 6.7.0-rc4-loong-devel-gb62ab1a397cf #47 61985c1d94084daa2432f771daa45b56b10d8d2a
[ 130.640764] Hardware name: QEMU QEMU Virtual Machine, BIOS unknown 2/2/2022
[ 130.640874] pc 9000000003137f7c ra 9000000003139e70 tp 9000000104cb4000 sp 9000000104cb7a40
[ 130.641001] a0 ffff80001b894000 a1 ffff80001b897ff8 a2 000000006ba210be a3 0000000000000000
[ 130.641128] a4 000000006ba210be a5 00000000000000f1 a6 00000000000000b3 a7 0000000000000000
[ 130.641256] t0 0000000000000000 t1 00000000000007f6 t2 0000000000000000 t3 9000000004091b70
[ 130.641387] t4 000000006ba210be t5 0000000000000004 t6 fffffffffffffff0 t7 90000000040913e0
[ 130.641512] t8 0000000000000005 u0 0000000000000dc0 s9 0000000000000009 s0 9000000104cb7ae0
[ 130.641641] s1 00000000000007f6 s2 0000000000000009 s3 0000000000000095 s4 0000000000000000
[ 130.641771] s5 ffff80001b894000 s6 ffff80001b897fb0 s7 9000000004090c50 s8 0000000000000000
[ 130.641900] ra: 9000000003139e70 build_body+0x1fcc/0x4988
[ 130.642007] ERA: 9000000003137f7c build_body+0xd8/0x4988
[ 130.642112] CRMD: 000000b0 (PLV0 -IE -DA +PG DACF=CC DACM=CC -WE)
[ 130.642261] PRMD: 00000004 (PPLV0 +PIE -PWE)
[ 130.642353] EUEN: 00000003 (+FPE +SXE -ASXE -BTE)
[ 130.642458] ECFG: 00071c1c (LIE=2-4,10-12 VS=7)
[ 130.642554] ESTAT: 00010000 [PIL] (IS= ECode=1 EsubCode=0)
[ 130.642658] BADV: ffff80001b898004
[ 130.642719] PRID: 0014c010 (Loongson-64bit, Loongson-3A5000)
[ 130.642815] Modules linked in: [last unloaded: bpf_testmod(O)]
[ 130.642924] Process test_tag (pid: 1326, threadinfo=00000000f7f4015f, task=000000006499f9fd)
[ 130.643062] Stack : 0000000000000000 9000000003380724 0000000000000000 0000000104cb7be8
[ 130.643213] 0000000000000000 25af8d9b6e600558 9000000106250ea0 9000000104cb7ae0
[ 130.643378] 0000000000000000 0000000000000000 9000000104cb7be8 90000000049f6000
[ 130.643538] 0000000000000090 9000000106250ea0 ffff80001b894000 ffff80001b894000
[ 130.643685] 00007ffffb917790 900000000313ca94 0000000000000000 0000000000000000
[ 130.643831] ffff80001b894000 0000000000000ff7 0000000000000000 9000000100468000
[ 130.643983] 0000000000000000 0000000000000000 0000000000000040 25af8d9b6e600558
[ 130.644131] 0000000000000bb7 ffff80001b894048 0000000000000000 0000000000000000
[ 130.644276] 9000000104cb7be8 90000000049f6000 0000000000000090 9000000104cb7bdc
[ 130.644423] ffff80001b894000 0000000000000000 00007ffffb917790 90000000032acfb0
[ 130.644572] ...
[ 130.644629] Call Trace:
[ 130.644641] [<9000000003137f7c>] build_body+0xd8/0x4988
[ 130.644785] [<900000000313ca94>] bpf_int_jit_compile+0x228/0x4ec
[ 130.644891] [<90000000032acfb0>] bpf_prog_select_runtime+0x158/0x1b0
[ 130.645003] [<90000000032b3504>] bpf_prog_load+0x760/0xb44
[ 130.645089] [<90000000032b6744>] __sys_bpf+0xbb8/0x2588
[ 130.645175] [<90000000032b8388>] sys_bpf+0x20/0x2c
[ 130.645259] [<9000000003f6ab38>] do_syscall+0x7c/0x94
[ 130.645369] [<9000000003121c5c>] handle_syscall+0xbc/0x158
[ 130.645507]
[ 130.645539] Code: 380839f6 380831f9 28412bae <24000ca6> 004081ad 0014cb50 004083e8 02bff34c 58008e91
[ 130.645729]
[ 130.646418] ---[ end trace 0000000000000000 ]---
On my machine, which has CONFIG_PAGE_SIZE_16KB=y, the test failed at
loading a BPF prog with 2039 instructions:
prog = (struct bpf_prog *)ffff80001b894000
insn = (struct bpf_insn *)(prog->insnsi)ffff80001b894048
insn + 2039 = (struct bpf_insn *)ffff80001b898000 <- end of the page
In the build_insn() function, we are trying to access next instruction
unconditionally, i.e. `(insn + 1)->imm`. The address lies in the next
page and can be not owned by the current process, thus an page fault is
inevitable and then segfault.
So, let's access next instruction only under `dst = imm64` context.
With this fix, we have:
# ./test_tag
test_tag: OK (40945 tests)
Fixes: bbfddb904df6f82 ("LoongArch: BPF: Avoid declare variables in switch-case")
Tested-by: Tiezhu Yang <yangtiezhu@loongson.cn>
Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-01-16 21:43:13 -07:00
|
|
|
}
|
2022-10-12 01:36:20 -07:00
|
|
|
|
|
|
|
/* dst = *(size *)(src + off) */
|
|
|
|
case BPF_LDX | BPF_MEM | BPF_B:
|
|
|
|
case BPF_LDX | BPF_MEM | BPF_H:
|
|
|
|
case BPF_LDX | BPF_MEM | BPF_W:
|
|
|
|
case BPF_LDX | BPF_MEM | BPF_DW:
|
LoongArch: BPF: Add BPF exception tables
Inspired by commit 800834285361("bpf, arm64: Add BPF exception tables"),
do similar to LoongArch to add BPF exception tables.
When a tracing BPF program attempts to read memory without using the
bpf_probe_read() helper, the verifier marks the load instruction with
the BPF_PROBE_MEM flag. Since the LoongArch JIT does not currently
recognize this flag it falls back to the interpreter.
Add support for BPF_PROBE_MEM, by appending an exception table to the
BPF program. If the load instruction causes a data abort, the fixup
infrastructure finds the exception table and fixes up the fault, by
clearing the destination register and jumping over the faulting
instruction.
To keep the compact exception table entry format, inspect the pc in
fixup_exception(). A more generic solution would add a "handler" field
to the table entry, like on x86, s390 and arm64, etc.
Signed-off-by: Youling Tang <tangyouling@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-12-10 07:39:59 -07:00
|
|
|
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
|
|
|
|
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
|
|
|
|
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
|
|
|
|
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
|
2023-11-07 23:12:16 -07:00
|
|
|
/* dst_reg = (s64)*(signed size *)(src_reg + off) */
|
|
|
|
case BPF_LDX | BPF_MEMSX | BPF_B:
|
|
|
|
case BPF_LDX | BPF_MEMSX | BPF_H:
|
|
|
|
case BPF_LDX | BPF_MEMSX | BPF_W:
|
|
|
|
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
|
|
|
|
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
|
|
|
|
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
|
|
|
|
sign_extend = BPF_MODE(insn->code) == BPF_MEMSX ||
|
|
|
|
BPF_MODE(insn->code) == BPF_PROBE_MEMSX;
|
2022-10-12 01:36:20 -07:00
|
|
|
switch (BPF_SIZE(code)) {
|
|
|
|
case BPF_B:
|
|
|
|
if (is_signed_imm12(off)) {
|
2023-11-07 23:12:16 -07:00
|
|
|
if (sign_extend)
|
|
|
|
emit_insn(ctx, ldb, dst, src, off);
|
|
|
|
else
|
|
|
|
emit_insn(ctx, ldbu, dst, src, off);
|
2022-10-12 01:36:20 -07:00
|
|
|
} else {
|
|
|
|
move_imm(ctx, t1, off, is32);
|
2023-11-07 23:12:16 -07:00
|
|
|
if (sign_extend)
|
|
|
|
emit_insn(ctx, ldxb, dst, src, t1);
|
|
|
|
else
|
|
|
|
emit_insn(ctx, ldxbu, dst, src, t1);
|
2022-10-12 01:36:20 -07:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_H:
|
|
|
|
if (is_signed_imm12(off)) {
|
2023-11-07 23:12:16 -07:00
|
|
|
if (sign_extend)
|
|
|
|
emit_insn(ctx, ldh, dst, src, off);
|
|
|
|
else
|
|
|
|
emit_insn(ctx, ldhu, dst, src, off);
|
2022-10-12 01:36:20 -07:00
|
|
|
} else {
|
|
|
|
move_imm(ctx, t1, off, is32);
|
2023-11-07 23:12:16 -07:00
|
|
|
if (sign_extend)
|
|
|
|
emit_insn(ctx, ldxh, dst, src, t1);
|
|
|
|
else
|
|
|
|
emit_insn(ctx, ldxhu, dst, src, t1);
|
2022-10-12 01:36:20 -07:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_W:
|
|
|
|
if (is_signed_imm12(off)) {
|
2023-11-07 23:12:16 -07:00
|
|
|
if (sign_extend)
|
|
|
|
emit_insn(ctx, ldw, dst, src, off);
|
|
|
|
else
|
|
|
|
emit_insn(ctx, ldwu, dst, src, off);
|
2022-10-12 01:36:20 -07:00
|
|
|
} else {
|
|
|
|
move_imm(ctx, t1, off, is32);
|
2023-11-07 23:12:16 -07:00
|
|
|
if (sign_extend)
|
|
|
|
emit_insn(ctx, ldxw, dst, src, t1);
|
|
|
|
else
|
|
|
|
emit_insn(ctx, ldxwu, dst, src, t1);
|
2022-10-12 01:36:20 -07:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_DW:
|
LoongArch: BPF: Don't sign extend memory load operand
The `cgrp_local_storage` test triggers a kernel panic like:
# ./test_progs -t cgrp_local_storage
Can't find bpf_testmod.ko kernel module: -2
WARNING! Selftests relying on bpf_testmod.ko will be skipped.
[ 550.930632] CPU 1 Unable to handle kernel paging request at virtual address 0000000000000080, era == ffff80000200be34, ra == ffff80000200be00
[ 550.931781] Oops[#1]:
[ 550.931966] CPU: 1 PID: 1303 Comm: test_progs Not tainted 6.7.0-rc2-loong-devel-g2f56bb0d2327 #35 a896aca3f4164f09cc346f89f2e09832e07be5f6
[ 550.932215] Hardware name: QEMU QEMU Virtual Machine, BIOS unknown 2/2/2022
[ 550.932403] pc ffff80000200be34 ra ffff80000200be00 tp 9000000108350000 sp 9000000108353dc0
[ 550.932545] a0 0000000000000000 a1 0000000000000517 a2 0000000000000118 a3 00007ffffbb15558
[ 550.932682] a4 00007ffffbb15620 a5 90000001004e7700 a6 0000000000000021 a7 0000000000000118
[ 550.932824] t0 ffff80000200bdc0 t1 0000000000000517 t2 0000000000000517 t3 00007ffff1c06ee0
[ 550.932961] t4 0000555578ae04d0 t5 fffffffffffffff8 t6 0000000000000004 t7 0000000000000020
[ 550.933097] t8 0000000000000040 u0 00000000000007b8 s9 9000000108353e00 s0 90000001004e7700
[ 550.933241] s1 9000000004005000 s2 0000000000000001 s3 0000000000000000 s4 0000555555eb2ec8
[ 550.933379] s5 00007ffffbb15bb8 s6 00007ffff1dafd60 s7 000055555663f610 s8 00007ffff1db0050
[ 550.933520] ra: ffff80000200be00 bpf_prog_98f1b9e767be2a84_on_enter+0x40/0x200
[ 550.933911] ERA: ffff80000200be34 bpf_prog_98f1b9e767be2a84_on_enter+0x74/0x200
[ 550.934105] CRMD: 000000b0 (PLV0 -IE -DA +PG DACF=CC DACM=CC -WE)
[ 550.934596] PRMD: 00000004 (PPLV0 +PIE -PWE)
[ 550.934712] EUEN: 00000003 (+FPE +SXE -ASXE -BTE)
[ 550.934836] ECFG: 00071c1c (LIE=2-4,10-12 VS=7)
[ 550.934976] ESTAT: 00010000 [PIL] (IS= ECode=1 EsubCode=0)
[ 550.935097] BADV: 0000000000000080
[ 550.935181] PRID: 0014c010 (Loongson-64bit, Loongson-3A5000)
[ 550.935291] Modules linked in:
[ 550.935391] Process test_progs (pid: 1303, threadinfo=000000006c3b1c41, task=0000000061f84a55)
[ 550.935643] Stack : 00007ffffbb15bb8 0000555555eb2ec8 0000000000000000 0000000000000001
[ 550.935844] 9000000004005000 ffff80001b864000 00007ffffbb15450 90000000029aa034
[ 550.935990] 0000000000000000 9000000108353ec0 0000000000000118 d07d9dfb09721a09
[ 550.936175] 0000000000000001 0000000000000000 9000000108353ec0 0000000000000118
[ 550.936314] 9000000101d46ad0 900000000290abf0 000055555663f610 0000000000000000
[ 550.936479] 0000000000000003 9000000108353ec0 00007ffffbb15450 90000000029d7288
[ 550.936635] 00007ffff1dafd60 000055555663f610 0000000000000000 0000000000000003
[ 550.936779] 9000000108353ec0 90000000035dd1f0 00007ffff1dafd58 9000000002841c5c
[ 550.936939] 0000000000000119 0000555555eea5a8 00007ffff1d78780 00007ffffbb153e0
[ 550.937083] ffffffffffffffda 00007ffffbb15518 0000000000000040 00007ffffbb15558
[ 550.937224] ...
[ 550.937299] Call Trace:
[ 550.937521] [<ffff80000200be34>] bpf_prog_98f1b9e767be2a84_on_enter+0x74/0x200
[ 550.937910] [<90000000029aa034>] bpf_trace_run2+0x90/0x154
[ 550.938105] [<900000000290abf0>] syscall_trace_enter.isra.0+0x1cc/0x200
[ 550.938224] [<90000000035dd1f0>] do_syscall+0x48/0x94
[ 550.938319] [<9000000002841c5c>] handle_syscall+0xbc/0x158
[ 550.938477]
[ 550.938607] Code: 580009ae 50016000 262402e4 <28c20085> 14092084 03a00084 16000024 03240084 00150006
[ 550.938851]
[ 550.939021] ---[ end trace 0000000000000000 ]---
Further investigation shows that this panic is triggered by memory
load operations:
ptr = bpf_cgrp_storage_get(&map_a, task->cgroups->dfl_cgrp, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
The expression `task->cgroups->dfl_cgrp` involves two memory load.
Since the field offset fits in imm12 or imm14, we use ldd or ldptrd
instructions. But both instructions have the side effect that it will
signed-extended the imm operand. Finally, we got the wrong addresses
and panics is inevitable.
Use a generic ldxd instruction to avoid this kind of issues.
With this change, we have:
# ./test_progs -t cgrp_local_storage
Can't find bpf_testmod.ko kernel module: -2
WARNING! Selftests relying on bpf_testmod.ko will be skipped.
test_cgrp_local_storage:PASS:join_cgroup /cgrp_local_storage 0 nsec
#48/1 cgrp_local_storage/tp_btf:OK
test_attach_cgroup:PASS:skel_open 0 nsec
test_attach_cgroup:PASS:prog_attach 0 nsec
test_attach_cgroup:PASS:prog_attach 0 nsec
libbpf: prog 'update_cookie_tracing': failed to attach: ERROR: strerror_r(-524)=22
test_attach_cgroup:FAIL:prog_attach unexpected error: -524
#48/2 cgrp_local_storage/attach_cgroup:FAIL
test_recursion:PASS:skel_open_and_load 0 nsec
libbpf: prog 'on_lookup': failed to attach: ERROR: strerror_r(-524)=22
libbpf: prog 'on_lookup': failed to auto-attach: -524
test_recursion:FAIL:skel_attach unexpected error: -524 (errno 524)
#48/3 cgrp_local_storage/recursion:FAIL
#48/4 cgrp_local_storage/negative:OK
#48/5 cgrp_local_storage/cgroup_iter_sleepable:OK
test_yes_rcu_lock:PASS:skel_open 0 nsec
test_yes_rcu_lock:PASS:skel_load 0 nsec
libbpf: prog 'yes_rcu_lock': failed to attach: ERROR: strerror_r(-524)=22
libbpf: prog 'yes_rcu_lock': failed to auto-attach: -524
test_yes_rcu_lock:FAIL:skel_attach unexpected error: -524 (errno 524)
#48/6 cgrp_local_storage/yes_rcu_lock:FAIL
#48/7 cgrp_local_storage/no_rcu_lock:OK
#48 cgrp_local_storage:FAIL
All error logs:
test_cgrp_local_storage:PASS:join_cgroup /cgrp_local_storage 0 nsec
test_attach_cgroup:PASS:skel_open 0 nsec
test_attach_cgroup:PASS:prog_attach 0 nsec
test_attach_cgroup:PASS:prog_attach 0 nsec
libbpf: prog 'update_cookie_tracing': failed to attach: ERROR: strerror_r(-524)=22
test_attach_cgroup:FAIL:prog_attach unexpected error: -524
#48/2 cgrp_local_storage/attach_cgroup:FAIL
test_recursion:PASS:skel_open_and_load 0 nsec
libbpf: prog 'on_lookup': failed to attach: ERROR: strerror_r(-524)=22
libbpf: prog 'on_lookup': failed to auto-attach: -524
test_recursion:FAIL:skel_attach unexpected error: -524 (errno 524)
#48/3 cgrp_local_storage/recursion:FAIL
test_yes_rcu_lock:PASS:skel_open 0 nsec
test_yes_rcu_lock:PASS:skel_load 0 nsec
libbpf: prog 'yes_rcu_lock': failed to attach: ERROR: strerror_r(-524)=22
libbpf: prog 'yes_rcu_lock': failed to auto-attach: -524
test_yes_rcu_lock:FAIL:skel_attach unexpected error: -524 (errno 524)
#48/6 cgrp_local_storage/yes_rcu_lock:FAIL
#48 cgrp_local_storage:FAIL
Summary: 0/4 PASSED, 0 SKIPPED, 1 FAILED
No panics any more (The test still failed because lack of BPF trampoline
which I am actively working on).
Fixes: 5dc615520c4d ("LoongArch: Add BPF JIT support")
Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2023-12-09 00:49:16 -07:00
|
|
|
move_imm(ctx, t1, off, is32);
|
|
|
|
emit_insn(ctx, ldxd, dst, src, t1);
|
2022-10-12 01:36:20 -07:00
|
|
|
break;
|
|
|
|
}
|
LoongArch: BPF: Add BPF exception tables
Inspired by commit 800834285361("bpf, arm64: Add BPF exception tables"),
do similar to LoongArch to add BPF exception tables.
When a tracing BPF program attempts to read memory without using the
bpf_probe_read() helper, the verifier marks the load instruction with
the BPF_PROBE_MEM flag. Since the LoongArch JIT does not currently
recognize this flag it falls back to the interpreter.
Add support for BPF_PROBE_MEM, by appending an exception table to the
BPF program. If the load instruction causes a data abort, the fixup
infrastructure finds the exception table and fixes up the fault, by
clearing the destination register and jumping over the faulting
instruction.
To keep the compact exception table entry format, inspect the pc in
fixup_exception(). A more generic solution would add a "handler" field
to the table entry, like on x86, s390 and arm64, etc.
Signed-off-by: Youling Tang <tangyouling@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-12-10 07:39:59 -07:00
|
|
|
|
|
|
|
ret = add_exception_handler(insn, ctx, dst);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2022-10-12 01:36:20 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* *(size *)(dst + off) = imm */
|
|
|
|
case BPF_ST | BPF_MEM | BPF_B:
|
|
|
|
case BPF_ST | BPF_MEM | BPF_H:
|
|
|
|
case BPF_ST | BPF_MEM | BPF_W:
|
|
|
|
case BPF_ST | BPF_MEM | BPF_DW:
|
|
|
|
switch (BPF_SIZE(code)) {
|
|
|
|
case BPF_B:
|
|
|
|
move_imm(ctx, t1, imm, is32);
|
|
|
|
if (is_signed_imm12(off)) {
|
|
|
|
emit_insn(ctx, stb, t1, dst, off);
|
|
|
|
} else {
|
|
|
|
move_imm(ctx, t2, off, is32);
|
|
|
|
emit_insn(ctx, stxb, t1, dst, t2);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_H:
|
|
|
|
move_imm(ctx, t1, imm, is32);
|
|
|
|
if (is_signed_imm12(off)) {
|
|
|
|
emit_insn(ctx, sth, t1, dst, off);
|
|
|
|
} else {
|
|
|
|
move_imm(ctx, t2, off, is32);
|
|
|
|
emit_insn(ctx, stxh, t1, dst, t2);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_W:
|
|
|
|
move_imm(ctx, t1, imm, is32);
|
|
|
|
if (is_signed_imm12(off)) {
|
|
|
|
emit_insn(ctx, stw, t1, dst, off);
|
|
|
|
} else if (is_signed_imm14(off)) {
|
|
|
|
emit_insn(ctx, stptrw, t1, dst, off);
|
|
|
|
} else {
|
|
|
|
move_imm(ctx, t2, off, is32);
|
|
|
|
emit_insn(ctx, stxw, t1, dst, t2);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_DW:
|
|
|
|
move_imm(ctx, t1, imm, is32);
|
|
|
|
if (is_signed_imm12(off)) {
|
|
|
|
emit_insn(ctx, std, t1, dst, off);
|
|
|
|
} else if (is_signed_imm14(off)) {
|
|
|
|
emit_insn(ctx, stptrd, t1, dst, off);
|
|
|
|
} else {
|
|
|
|
move_imm(ctx, t2, off, is32);
|
|
|
|
emit_insn(ctx, stxd, t1, dst, t2);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* *(size *)(dst + off) = src */
|
|
|
|
case BPF_STX | BPF_MEM | BPF_B:
|
|
|
|
case BPF_STX | BPF_MEM | BPF_H:
|
|
|
|
case BPF_STX | BPF_MEM | BPF_W:
|
|
|
|
case BPF_STX | BPF_MEM | BPF_DW:
|
|
|
|
switch (BPF_SIZE(code)) {
|
|
|
|
case BPF_B:
|
|
|
|
if (is_signed_imm12(off)) {
|
|
|
|
emit_insn(ctx, stb, src, dst, off);
|
|
|
|
} else {
|
|
|
|
move_imm(ctx, t1, off, is32);
|
|
|
|
emit_insn(ctx, stxb, src, dst, t1);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_H:
|
|
|
|
if (is_signed_imm12(off)) {
|
|
|
|
emit_insn(ctx, sth, src, dst, off);
|
|
|
|
} else {
|
|
|
|
move_imm(ctx, t1, off, is32);
|
|
|
|
emit_insn(ctx, stxh, src, dst, t1);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_W:
|
|
|
|
if (is_signed_imm12(off)) {
|
|
|
|
emit_insn(ctx, stw, src, dst, off);
|
|
|
|
} else if (is_signed_imm14(off)) {
|
|
|
|
emit_insn(ctx, stptrw, src, dst, off);
|
|
|
|
} else {
|
|
|
|
move_imm(ctx, t1, off, is32);
|
|
|
|
emit_insn(ctx, stxw, src, dst, t1);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case BPF_DW:
|
|
|
|
if (is_signed_imm12(off)) {
|
|
|
|
emit_insn(ctx, std, src, dst, off);
|
|
|
|
} else if (is_signed_imm14(off)) {
|
|
|
|
emit_insn(ctx, stptrd, src, dst, off);
|
|
|
|
} else {
|
|
|
|
move_imm(ctx, t1, off, is32);
|
|
|
|
emit_insn(ctx, stxd, src, dst, t1);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BPF_STX | BPF_ATOMIC | BPF_W:
|
|
|
|
case BPF_STX | BPF_ATOMIC | BPF_DW:
|
|
|
|
emit_atomic(insn, ctx);
|
|
|
|
break;
|
|
|
|
|
2023-03-28 00:13:35 -07:00
|
|
|
/* Speculation barrier */
|
|
|
|
case BPF_ST | BPF_NOSPEC:
|
|
|
|
break;
|
|
|
|
|
2022-10-12 01:36:20 -07:00
|
|
|
default:
|
|
|
|
pr_err("bpf_jit: unknown opcode %02x\n", code);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
toofar:
|
|
|
|
pr_info_once("bpf_jit: opcode %02x, jump too far\n", code);
|
|
|
|
return -E2BIG;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int build_body(struct jit_ctx *ctx, bool extra_pass)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
const struct bpf_prog *prog = ctx->prog;
|
|
|
|
|
|
|
|
for (i = 0; i < prog->len; i++) {
|
|
|
|
const struct bpf_insn *insn = &prog->insnsi[i];
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (ctx->image == NULL)
|
|
|
|
ctx->offset[i] = ctx->idx;
|
|
|
|
|
|
|
|
ret = build_insn(insn, ctx, extra_pass);
|
|
|
|
if (ret > 0) {
|
|
|
|
i++;
|
|
|
|
if (ctx->image == NULL)
|
|
|
|
ctx->offset[i] = ctx->idx;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctx->image == NULL)
|
|
|
|
ctx->offset[i] = ctx->idx;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fill space with break instructions */
|
|
|
|
static void jit_fill_hole(void *area, unsigned int size)
|
|
|
|
{
|
|
|
|
u32 *ptr;
|
|
|
|
|
|
|
|
/* We are guaranteed to have aligned memory */
|
|
|
|
for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
|
|
|
|
*ptr++ = INSN_BREAK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int validate_code(struct jit_ctx *ctx)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
union loongarch_instruction insn;
|
|
|
|
|
|
|
|
for (i = 0; i < ctx->idx; i++) {
|
|
|
|
insn = ctx->image[i];
|
|
|
|
/* Check INSN_BREAK */
|
|
|
|
if (insn.word == INSN_BREAK)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
LoongArch: BPF: Add BPF exception tables
Inspired by commit 800834285361("bpf, arm64: Add BPF exception tables"),
do similar to LoongArch to add BPF exception tables.
When a tracing BPF program attempts to read memory without using the
bpf_probe_read() helper, the verifier marks the load instruction with
the BPF_PROBE_MEM flag. Since the LoongArch JIT does not currently
recognize this flag it falls back to the interpreter.
Add support for BPF_PROBE_MEM, by appending an exception table to the
BPF program. If the load instruction causes a data abort, the fixup
infrastructure finds the exception table and fixes up the fault, by
clearing the destination register and jumping over the faulting
instruction.
To keep the compact exception table entry format, inspect the pc in
fixup_exception(). A more generic solution would add a "handler" field
to the table entry, like on x86, s390 and arm64, etc.
Signed-off-by: Youling Tang <tangyouling@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-12-10 07:39:59 -07:00
|
|
|
if (WARN_ON_ONCE(ctx->num_exentries != ctx->prog->aux->num_exentries))
|
|
|
|
return -1;
|
|
|
|
|
2022-10-12 01:36:20 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|
|
|
{
|
|
|
|
bool tmp_blinded = false, extra_pass = false;
|
|
|
|
u8 *image_ptr;
|
LoongArch: BPF: Add BPF exception tables
Inspired by commit 800834285361("bpf, arm64: Add BPF exception tables"),
do similar to LoongArch to add BPF exception tables.
When a tracing BPF program attempts to read memory without using the
bpf_probe_read() helper, the verifier marks the load instruction with
the BPF_PROBE_MEM flag. Since the LoongArch JIT does not currently
recognize this flag it falls back to the interpreter.
Add support for BPF_PROBE_MEM, by appending an exception table to the
BPF program. If the load instruction causes a data abort, the fixup
infrastructure finds the exception table and fixes up the fault, by
clearing the destination register and jumping over the faulting
instruction.
To keep the compact exception table entry format, inspect the pc in
fixup_exception(). A more generic solution would add a "handler" field
to the table entry, like on x86, s390 and arm64, etc.
Signed-off-by: Youling Tang <tangyouling@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-12-10 07:39:59 -07:00
|
|
|
int image_size, prog_size, extable_size;
|
2022-10-12 01:36:20 -07:00
|
|
|
struct jit_ctx ctx;
|
|
|
|
struct jit_data *jit_data;
|
|
|
|
struct bpf_binary_header *header;
|
|
|
|
struct bpf_prog *tmp, *orig_prog = prog;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If BPF JIT was not enabled then we must fall back to
|
|
|
|
* the interpreter.
|
|
|
|
*/
|
|
|
|
if (!prog->jit_requested)
|
|
|
|
return orig_prog;
|
|
|
|
|
|
|
|
tmp = bpf_jit_blind_constants(prog);
|
|
|
|
/*
|
|
|
|
* If blinding was requested and we failed during blinding,
|
|
|
|
* we must fall back to the interpreter. Otherwise, we save
|
|
|
|
* the new JITed code.
|
|
|
|
*/
|
|
|
|
if (IS_ERR(tmp))
|
|
|
|
return orig_prog;
|
|
|
|
|
|
|
|
if (tmp != prog) {
|
|
|
|
tmp_blinded = true;
|
|
|
|
prog = tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
jit_data = prog->aux->jit_data;
|
|
|
|
if (!jit_data) {
|
|
|
|
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
|
|
|
|
if (!jit_data) {
|
|
|
|
prog = orig_prog;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
prog->aux->jit_data = jit_data;
|
|
|
|
}
|
|
|
|
if (jit_data->ctx.offset) {
|
|
|
|
ctx = jit_data->ctx;
|
|
|
|
image_ptr = jit_data->image;
|
|
|
|
header = jit_data->header;
|
|
|
|
extra_pass = true;
|
LoongArch: BPF: Add BPF exception tables
Inspired by commit 800834285361("bpf, arm64: Add BPF exception tables"),
do similar to LoongArch to add BPF exception tables.
When a tracing BPF program attempts to read memory without using the
bpf_probe_read() helper, the verifier marks the load instruction with
the BPF_PROBE_MEM flag. Since the LoongArch JIT does not currently
recognize this flag it falls back to the interpreter.
Add support for BPF_PROBE_MEM, by appending an exception table to the
BPF program. If the load instruction causes a data abort, the fixup
infrastructure finds the exception table and fixes up the fault, by
clearing the destination register and jumping over the faulting
instruction.
To keep the compact exception table entry format, inspect the pc in
fixup_exception(). A more generic solution would add a "handler" field
to the table entry, like on x86, s390 and arm64, etc.
Signed-off-by: Youling Tang <tangyouling@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-12-10 07:39:59 -07:00
|
|
|
prog_size = sizeof(u32) * ctx.idx;
|
2022-10-12 01:36:20 -07:00
|
|
|
goto skip_init_ctx;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&ctx, 0, sizeof(ctx));
|
|
|
|
ctx.prog = prog;
|
|
|
|
|
|
|
|
ctx.offset = kvcalloc(prog->len + 1, sizeof(u32), GFP_KERNEL);
|
|
|
|
if (ctx.offset == NULL) {
|
|
|
|
prog = orig_prog;
|
|
|
|
goto out_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 1. Initial fake pass to compute ctx->idx and set ctx->flags */
|
|
|
|
build_prologue(&ctx);
|
|
|
|
if (build_body(&ctx, extra_pass)) {
|
|
|
|
prog = orig_prog;
|
|
|
|
goto out_offset;
|
|
|
|
}
|
|
|
|
ctx.epilogue_offset = ctx.idx;
|
|
|
|
build_epilogue(&ctx);
|
|
|
|
|
LoongArch: BPF: Add BPF exception tables
Inspired by commit 800834285361("bpf, arm64: Add BPF exception tables"),
do similar to LoongArch to add BPF exception tables.
When a tracing BPF program attempts to read memory without using the
bpf_probe_read() helper, the verifier marks the load instruction with
the BPF_PROBE_MEM flag. Since the LoongArch JIT does not currently
recognize this flag it falls back to the interpreter.
Add support for BPF_PROBE_MEM, by appending an exception table to the
BPF program. If the load instruction causes a data abort, the fixup
infrastructure finds the exception table and fixes up the fault, by
clearing the destination register and jumping over the faulting
instruction.
To keep the compact exception table entry format, inspect the pc in
fixup_exception(). A more generic solution would add a "handler" field
to the table entry, like on x86, s390 and arm64, etc.
Signed-off-by: Youling Tang <tangyouling@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-12-10 07:39:59 -07:00
|
|
|
extable_size = prog->aux->num_exentries * sizeof(struct exception_table_entry);
|
|
|
|
|
2022-10-12 01:36:20 -07:00
|
|
|
/* Now we know the actual image size.
|
|
|
|
* As each LoongArch instruction is of length 32bit,
|
|
|
|
* we are translating number of JITed intructions into
|
|
|
|
* the size required to store these JITed code.
|
|
|
|
*/
|
LoongArch: BPF: Add BPF exception tables
Inspired by commit 800834285361("bpf, arm64: Add BPF exception tables"),
do similar to LoongArch to add BPF exception tables.
When a tracing BPF program attempts to read memory without using the
bpf_probe_read() helper, the verifier marks the load instruction with
the BPF_PROBE_MEM flag. Since the LoongArch JIT does not currently
recognize this flag it falls back to the interpreter.
Add support for BPF_PROBE_MEM, by appending an exception table to the
BPF program. If the load instruction causes a data abort, the fixup
infrastructure finds the exception table and fixes up the fault, by
clearing the destination register and jumping over the faulting
instruction.
To keep the compact exception table entry format, inspect the pc in
fixup_exception(). A more generic solution would add a "handler" field
to the table entry, like on x86, s390 and arm64, etc.
Signed-off-by: Youling Tang <tangyouling@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-12-10 07:39:59 -07:00
|
|
|
prog_size = sizeof(u32) * ctx.idx;
|
|
|
|
image_size = prog_size + extable_size;
|
2022-10-12 01:36:20 -07:00
|
|
|
/* Now we know the size of the structure to make */
|
|
|
|
header = bpf_jit_binary_alloc(image_size, &image_ptr,
|
|
|
|
sizeof(u32), jit_fill_hole);
|
|
|
|
if (header == NULL) {
|
|
|
|
prog = orig_prog;
|
|
|
|
goto out_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 2. Now, the actual pass to generate final JIT code */
|
|
|
|
ctx.image = (union loongarch_instruction *)image_ptr;
|
LoongArch: BPF: Add BPF exception tables
Inspired by commit 800834285361("bpf, arm64: Add BPF exception tables"),
do similar to LoongArch to add BPF exception tables.
When a tracing BPF program attempts to read memory without using the
bpf_probe_read() helper, the verifier marks the load instruction with
the BPF_PROBE_MEM flag. Since the LoongArch JIT does not currently
recognize this flag it falls back to the interpreter.
Add support for BPF_PROBE_MEM, by appending an exception table to the
BPF program. If the load instruction causes a data abort, the fixup
infrastructure finds the exception table and fixes up the fault, by
clearing the destination register and jumping over the faulting
instruction.
To keep the compact exception table entry format, inspect the pc in
fixup_exception(). A more generic solution would add a "handler" field
to the table entry, like on x86, s390 and arm64, etc.
Signed-off-by: Youling Tang <tangyouling@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-12-10 07:39:59 -07:00
|
|
|
if (extable_size)
|
|
|
|
prog->aux->extable = (void *)image_ptr + prog_size;
|
2022-10-12 01:36:20 -07:00
|
|
|
|
|
|
|
skip_init_ctx:
|
|
|
|
ctx.idx = 0;
|
LoongArch: BPF: Add BPF exception tables
Inspired by commit 800834285361("bpf, arm64: Add BPF exception tables"),
do similar to LoongArch to add BPF exception tables.
When a tracing BPF program attempts to read memory without using the
bpf_probe_read() helper, the verifier marks the load instruction with
the BPF_PROBE_MEM flag. Since the LoongArch JIT does not currently
recognize this flag it falls back to the interpreter.
Add support for BPF_PROBE_MEM, by appending an exception table to the
BPF program. If the load instruction causes a data abort, the fixup
infrastructure finds the exception table and fixes up the fault, by
clearing the destination register and jumping over the faulting
instruction.
To keep the compact exception table entry format, inspect the pc in
fixup_exception(). A more generic solution would add a "handler" field
to the table entry, like on x86, s390 and arm64, etc.
Signed-off-by: Youling Tang <tangyouling@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-12-10 07:39:59 -07:00
|
|
|
ctx.num_exentries = 0;
|
2022-10-12 01:36:20 -07:00
|
|
|
|
|
|
|
build_prologue(&ctx);
|
|
|
|
if (build_body(&ctx, extra_pass)) {
|
|
|
|
bpf_jit_binary_free(header);
|
|
|
|
prog = orig_prog;
|
|
|
|
goto out_offset;
|
|
|
|
}
|
|
|
|
build_epilogue(&ctx);
|
|
|
|
|
|
|
|
/* 3. Extra pass to validate JITed code */
|
|
|
|
if (validate_code(&ctx)) {
|
|
|
|
bpf_jit_binary_free(header);
|
|
|
|
prog = orig_prog;
|
|
|
|
goto out_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* And we're done */
|
|
|
|
if (bpf_jit_enable > 1)
|
LoongArch: BPF: Add BPF exception tables
Inspired by commit 800834285361("bpf, arm64: Add BPF exception tables"),
do similar to LoongArch to add BPF exception tables.
When a tracing BPF program attempts to read memory without using the
bpf_probe_read() helper, the verifier marks the load instruction with
the BPF_PROBE_MEM flag. Since the LoongArch JIT does not currently
recognize this flag it falls back to the interpreter.
Add support for BPF_PROBE_MEM, by appending an exception table to the
BPF program. If the load instruction causes a data abort, the fixup
infrastructure finds the exception table and fixes up the fault, by
clearing the destination register and jumping over the faulting
instruction.
To keep the compact exception table entry format, inspect the pc in
fixup_exception(). A more generic solution would add a "handler" field
to the table entry, like on x86, s390 and arm64, etc.
Signed-off-by: Youling Tang <tangyouling@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-12-10 07:39:59 -07:00
|
|
|
bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
|
2022-10-12 01:36:20 -07:00
|
|
|
|
|
|
|
/* Update the icache */
|
|
|
|
flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
|
|
|
|
|
|
|
|
if (!prog->is_func || extra_pass) {
|
2024-03-07 22:38:08 -07:00
|
|
|
int err;
|
|
|
|
|
2022-10-12 01:36:20 -07:00
|
|
|
if (extra_pass && ctx.idx != jit_data->ctx.idx) {
|
|
|
|
pr_err_once("multi-func JIT bug %d != %d\n",
|
|
|
|
ctx.idx, jit_data->ctx.idx);
|
2024-03-07 22:38:08 -07:00
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
err = bpf_jit_binary_lock_ro(header);
|
|
|
|
if (err) {
|
|
|
|
pr_err_once("bpf_jit_binary_lock_ro() returned %d\n",
|
|
|
|
err);
|
|
|
|
goto out_free;
|
2022-10-12 01:36:20 -07:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
jit_data->ctx = ctx;
|
|
|
|
jit_data->image = image_ptr;
|
|
|
|
jit_data->header = header;
|
|
|
|
}
|
|
|
|
prog->jited = 1;
|
LoongArch: BPF: Add BPF exception tables
Inspired by commit 800834285361("bpf, arm64: Add BPF exception tables"),
do similar to LoongArch to add BPF exception tables.
When a tracing BPF program attempts to read memory without using the
bpf_probe_read() helper, the verifier marks the load instruction with
the BPF_PROBE_MEM flag. Since the LoongArch JIT does not currently
recognize this flag it falls back to the interpreter.
Add support for BPF_PROBE_MEM, by appending an exception table to the
BPF program. If the load instruction causes a data abort, the fixup
infrastructure finds the exception table and fixes up the fault, by
clearing the destination register and jumping over the faulting
instruction.
To keep the compact exception table entry format, inspect the pc in
fixup_exception(). A more generic solution would add a "handler" field
to the table entry, like on x86, s390 and arm64, etc.
Signed-off-by: Youling Tang <tangyouling@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-12-10 07:39:59 -07:00
|
|
|
prog->jited_len = prog_size;
|
2022-10-12 01:36:20 -07:00
|
|
|
prog->bpf_func = (void *)ctx.image;
|
|
|
|
|
|
|
|
if (!prog->is_func || extra_pass) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* offset[prog->len] is the size of program */
|
|
|
|
for (i = 0; i <= prog->len; i++)
|
|
|
|
ctx.offset[i] *= LOONGARCH_INSN_SIZE;
|
|
|
|
bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
|
|
|
|
|
|
|
|
out_offset:
|
|
|
|
kvfree(ctx.offset);
|
|
|
|
kfree(jit_data);
|
|
|
|
prog->aux->jit_data = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (tmp_blinded)
|
|
|
|
bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog);
|
|
|
|
|
|
|
|
out_offset = -1;
|
|
|
|
|
|
|
|
return prog;
|
2024-03-07 22:38:08 -07:00
|
|
|
|
|
|
|
out_free:
|
|
|
|
bpf_jit_binary_free(header);
|
|
|
|
prog->bpf_func = NULL;
|
|
|
|
prog->jited = 0;
|
|
|
|
prog->jited_len = 0;
|
|
|
|
goto out_offset;
|
2022-10-12 01:36:20 -07:00
|
|
|
}
|
2023-02-18 03:53:17 -07:00
|
|
|
|
|
|
|
/* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
|
|
|
|
bool bpf_jit_supports_subprog_tailcalls(void)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|