riscv, bpf: Introduce shift add helper with Zba optimization
Zba extension is very useful for generating addresses that index into array of basic data types. This patch introduces sh2add and sh3add helpers for RV32 and RV64 respectively, to accelerate addressing for array of unsigned long data. Signed-off-by: Xiao Wang <xiao.w.wang@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Björn Töpel <bjorn@kernel.org> Link: https://lore.kernel.org/bpf/20240524075543.4050464-3-xiao.w.wang@intel.com
This commit is contained in:
parent
531876c800
commit
96a27ee76f
@ -742,6 +742,17 @@ static inline u16 rvc_swsp(u32 imm8, u8 rs2)
|
|||||||
return rv_css_insn(0x6, imm, rs2, 0x2);
|
return rv_css_insn(0x6, imm, rs2, 0x2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* RVZBA instructions. */
|
||||||
|
static inline u32 rvzba_sh2add(u8 rd, u8 rs1, u8 rs2)
|
||||||
|
{
|
||||||
|
return rv_r_insn(0x10, rs2, rs1, 0x4, rd, 0x33);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 rvzba_sh3add(u8 rd, u8 rs1, u8 rs2)
|
||||||
|
{
|
||||||
|
return rv_r_insn(0x10, rs2, rs1, 0x6, rd, 0x33);
|
||||||
|
}
|
||||||
|
|
||||||
/* RVZBB instructions. */
|
/* RVZBB instructions. */
|
||||||
static inline u32 rvzbb_sextb(u8 rd, u8 rs1)
|
static inline u32 rvzbb_sextb(u8 rd, u8 rs1)
|
||||||
{
|
{
|
||||||
@ -1095,6 +1106,28 @@ static inline void emit_sw(u8 rs1, s32 off, u8 rs2, struct rv_jit_context *ctx)
|
|||||||
emit(rv_sw(rs1, off, rs2), ctx);
|
emit(rv_sw(rs1, off, rs2), ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void emit_sh2add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
|
||||||
|
{
|
||||||
|
if (rvzba_enabled()) {
|
||||||
|
emit(rvzba_sh2add(rd, rs1, rs2), ctx);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
emit_slli(rd, rs1, 2, ctx);
|
||||||
|
emit_add(rd, rd, rs2, ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void emit_sh3add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
|
||||||
|
{
|
||||||
|
if (rvzba_enabled()) {
|
||||||
|
emit(rvzba_sh3add(rd, rs1, rs2), ctx);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
emit_slli(rd, rs1, 3, ctx);
|
||||||
|
emit_add(rd, rd, rs2, ctx);
|
||||||
|
}
|
||||||
|
|
||||||
/* RV64-only helper functions. */
|
/* RV64-only helper functions. */
|
||||||
#if __riscv_xlen == 64
|
#if __riscv_xlen == 64
|
||||||
|
|
||||||
|
@ -811,8 +811,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
|
|||||||
* if (!prog)
|
* if (!prog)
|
||||||
* goto out;
|
* goto out;
|
||||||
*/
|
*/
|
||||||
emit(rv_slli(RV_REG_T0, lo(idx_reg), 2), ctx);
|
emit_sh2add(RV_REG_T0, lo(idx_reg), lo(arr_reg), ctx);
|
||||||
emit(rv_add(RV_REG_T0, RV_REG_T0, lo(arr_reg)), ctx);
|
|
||||||
off = offsetof(struct bpf_array, ptrs);
|
off = offsetof(struct bpf_array, ptrs);
|
||||||
if (is_12b_check(off, insn))
|
if (is_12b_check(off, insn))
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -380,8 +380,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
|
|||||||
* if (!prog)
|
* if (!prog)
|
||||||
* goto out;
|
* goto out;
|
||||||
*/
|
*/
|
||||||
emit_slli(RV_REG_T2, RV_REG_A2, 3, ctx);
|
emit_sh3add(RV_REG_T2, RV_REG_A2, RV_REG_A1, ctx);
|
||||||
emit_add(RV_REG_T2, RV_REG_T2, RV_REG_A1, ctx);
|
|
||||||
off = offsetof(struct bpf_array, ptrs);
|
off = offsetof(struct bpf_array, ptrs);
|
||||||
if (is_12b_check(off, insn))
|
if (is_12b_check(off, insn))
|
||||||
return -1;
|
return -1;
|
||||||
@ -1099,12 +1098,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
|||||||
/* Load current CPU number in T1 */
|
/* Load current CPU number in T1 */
|
||||||
emit_ld(RV_REG_T1, offsetof(struct thread_info, cpu),
|
emit_ld(RV_REG_T1, offsetof(struct thread_info, cpu),
|
||||||
RV_REG_TP, ctx);
|
RV_REG_TP, ctx);
|
||||||
/* << 3 because offsets are 8 bytes */
|
|
||||||
emit_slli(RV_REG_T1, RV_REG_T1, 3, ctx);
|
|
||||||
/* Load address of __per_cpu_offset array in T2 */
|
/* Load address of __per_cpu_offset array in T2 */
|
||||||
emit_addr(RV_REG_T2, (u64)&__per_cpu_offset, extra_pass, ctx);
|
emit_addr(RV_REG_T2, (u64)&__per_cpu_offset, extra_pass, ctx);
|
||||||
/* Add offset of current CPU to __per_cpu_offset */
|
/* Get address of __per_cpu_offset[cpu] in T1 */
|
||||||
emit_add(RV_REG_T1, RV_REG_T2, RV_REG_T1, ctx);
|
emit_sh3add(RV_REG_T1, RV_REG_T1, RV_REG_T2, ctx);
|
||||||
/* Load __per_cpu_offset[cpu] in T1 */
|
/* Load __per_cpu_offset[cpu] in T1 */
|
||||||
emit_ld(RV_REG_T1, 0, RV_REG_T1, ctx);
|
emit_ld(RV_REG_T1, 0, RV_REG_T1, ctx);
|
||||||
/* Add the offset to Rd */
|
/* Add the offset to Rd */
|
||||||
|
Loading…
Reference in New Issue
Block a user