ARC, bpf: Fix issues reported by the static analyzers
Also updated couple of comments along the way. One of the issues reported was indeed a bug in the code: memset(ctx, 0, sizeof(ctx)) // original line memset(ctx, 0, sizeof(*ctx)) // fixed line That was a nice catch. Reported-by: kernel test robot <lkp@intel.com> Closes: https://lore.kernel.org/oe-kbuild-all/202405222314.UG5F2NHn-lkp@intel.com/ Closes: https://lore.kernel.org/oe-kbuild-all/202405232036.Xqoc3b0J-lkp@intel.com/ Signed-off-by: Shahab Vahedi <shahab@synopsys.com> Link: https://lore.kernel.org/r/20240525035628.1026-1-list+bpf@vahedi.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
590016ad83
commit
dd6a403795
@ -39,7 +39,7 @@
|
|||||||
|
|
||||||
/************** Functions that the back-end must provide **************/
|
/************** Functions that the back-end must provide **************/
|
||||||
/* Extension for 32-bit operations. */
|
/* Extension for 32-bit operations. */
|
||||||
inline u8 zext(u8 *buf, u8 rd);
|
u8 zext(u8 *buf, u8 rd);
|
||||||
/***** Moves *****/
|
/***** Moves *****/
|
||||||
u8 mov_r32(u8 *buf, u8 rd, u8 rs, u8 sign_ext);
|
u8 mov_r32(u8 *buf, u8 rd, u8 rs, u8 sign_ext);
|
||||||
u8 mov_r32_i32(u8 *buf, u8 reg, s32 imm);
|
u8 mov_r32_i32(u8 *buf, u8 reg, s32 imm);
|
||||||
|
@ -62,7 +62,7 @@ enum {
|
|||||||
* If/when we decide to add ARCv2 instructions that do use register pairs,
|
* If/when we decide to add ARCv2 instructions that do use register pairs,
|
||||||
* the mapping, hopefully, doesn't need to be revisited.
|
* the mapping, hopefully, doesn't need to be revisited.
|
||||||
*/
|
*/
|
||||||
const u8 bpf2arc[][2] = {
|
static const u8 bpf2arc[][2] = {
|
||||||
/* Return value from in-kernel function, and exit value from eBPF */
|
/* Return value from in-kernel function, and exit value from eBPF */
|
||||||
[BPF_REG_0] = {ARC_R_8, ARC_R_9},
|
[BPF_REG_0] = {ARC_R_8, ARC_R_9},
|
||||||
/* Arguments from eBPF program to in-kernel function */
|
/* Arguments from eBPF program to in-kernel function */
|
||||||
@ -1302,7 +1302,7 @@ static u8 arc_b(u8 *buf, s32 offset)
|
|||||||
|
|
||||||
/************* Packers (Deal with BPF_REGs) **************/
|
/************* Packers (Deal with BPF_REGs) **************/
|
||||||
|
|
||||||
inline u8 zext(u8 *buf, u8 rd)
|
u8 zext(u8 *buf, u8 rd)
|
||||||
{
|
{
|
||||||
if (rd != BPF_REG_FP)
|
if (rd != BPF_REG_FP)
|
||||||
return arc_movi_r(buf, REG_HI(rd), 0);
|
return arc_movi_r(buf, REG_HI(rd), 0);
|
||||||
@ -2235,6 +2235,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext)
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
/* The caller must have handled this. */
|
/* The caller must have handled this. */
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
@ -2253,6 +2254,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext)
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
/* The caller must have handled this. */
|
/* The caller must have handled this. */
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2517,7 +2519,7 @@ u8 arc_epilogue(u8 *buf, u32 usage, u16 frame_size)
|
|||||||
#define JCC64_NR_OF_JMPS 3 /* Number of jumps in jcc64 template. */
|
#define JCC64_NR_OF_JMPS 3 /* Number of jumps in jcc64 template. */
|
||||||
#define JCC64_INSNS_TO_END 3 /* Number of insn. inclusive the 2nd jmp to end. */
|
#define JCC64_INSNS_TO_END 3 /* Number of insn. inclusive the 2nd jmp to end. */
|
||||||
#define JCC64_SKIP_JMP 1 /* Index of the "skip" jump to "end". */
|
#define JCC64_SKIP_JMP 1 /* Index of the "skip" jump to "end". */
|
||||||
const struct {
|
static const struct {
|
||||||
/*
|
/*
|
||||||
* "jit_off" is common between all "jmp[]" and is coupled with
|
* "jit_off" is common between all "jmp[]" and is coupled with
|
||||||
* "cond" of each "jmp[]" instance. e.g.:
|
* "cond" of each "jmp[]" instance. e.g.:
|
||||||
@ -2883,7 +2885,7 @@ u8 gen_jmp_64(u8 *buf, u8 rd, u8 rs, u8 cond, u32 curr_off, u32 targ_off)
|
|||||||
* The "ARC_CC_SET" becomes "CC_unequal" because of the "tst"
|
* The "ARC_CC_SET" becomes "CC_unequal" because of the "tst"
|
||||||
* instruction that precedes the conditional branch.
|
* instruction that precedes the conditional branch.
|
||||||
*/
|
*/
|
||||||
const u8 arcv2_32_jmps[ARC_CC_LAST] = {
|
static const u8 arcv2_32_jmps[ARC_CC_LAST] = {
|
||||||
[ARC_CC_UGT] = CC_great_u,
|
[ARC_CC_UGT] = CC_great_u,
|
||||||
[ARC_CC_UGE] = CC_great_eq_u,
|
[ARC_CC_UGE] = CC_great_eq_u,
|
||||||
[ARC_CC_ULT] = CC_less_u,
|
[ARC_CC_ULT] = CC_less_u,
|
||||||
|
@ -159,7 +159,7 @@ static void jit_dump(const struct jit_context *ctx)
|
|||||||
/* Initialise the context so there's no garbage. */
|
/* Initialise the context so there's no garbage. */
|
||||||
static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
|
static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
|
||||||
{
|
{
|
||||||
memset(ctx, 0, sizeof(ctx));
|
memset(ctx, 0, sizeof(*ctx));
|
||||||
|
|
||||||
ctx->orig_prog = prog;
|
ctx->orig_prog = prog;
|
||||||
|
|
||||||
@ -167,7 +167,7 @@ static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
|
|||||||
ctx->prog = bpf_jit_blind_constants(prog);
|
ctx->prog = bpf_jit_blind_constants(prog);
|
||||||
if (IS_ERR(ctx->prog))
|
if (IS_ERR(ctx->prog))
|
||||||
return PTR_ERR(ctx->prog);
|
return PTR_ERR(ctx->prog);
|
||||||
ctx->blinded = (ctx->prog == ctx->orig_prog ? false : true);
|
ctx->blinded = (ctx->prog != ctx->orig_prog);
|
||||||
|
|
||||||
/* If the verifier doesn't zero-extend, then we have to do it. */
|
/* If the verifier doesn't zero-extend, then we have to do it. */
|
||||||
ctx->do_zext = !ctx->prog->aux->verifier_zext;
|
ctx->do_zext = !ctx->prog->aux->verifier_zext;
|
||||||
@ -1182,12 +1182,12 @@ static int jit_prepare(struct jit_context *ctx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All the "handle_*()" functions have been called before by the
|
* jit_compile() is the real compilation phase. jit_prepare() is
|
||||||
* "jit_prepare()". If there was an error, we would know by now.
|
* invoked before jit_compile() as a dry-run to make sure everything
|
||||||
* Therefore, no extra error checking at this point, other than
|
* will go OK and allocate the necessary memory.
|
||||||
* a sanity check at the end that expects the calculated length
|
*
|
||||||
* (jit.len) to be equal to the length of generated instructions
|
* In the end, jit_compile() checks if it has produced the same number
|
||||||
* (jit.index).
|
* of instructions as jit_prepare() would.
|
||||||
*/
|
*/
|
||||||
static int jit_compile(struct jit_context *ctx)
|
static int jit_compile(struct jit_context *ctx)
|
||||||
{
|
{
|
||||||
@ -1407,9 +1407,9 @@ static struct bpf_prog *do_extra_pass(struct bpf_prog *prog)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* This function may be invoked twice for the same stream of BPF
|
* This function may be invoked twice for the same stream of BPF
|
||||||
* instructions. The "extra pass" happens, when there are "call"s
|
* instructions. The "extra pass" happens, when there are
|
||||||
* involved that their addresses are not known during the first
|
* (re)locations involved that their addresses are not known
|
||||||
* invocation.
|
* during the first run.
|
||||||
*/
|
*/
|
||||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user