1

bpf: Add support for kprobe session context

Adding struct bpf_session_run_ctx object to hold session related
data, which is atm is_return bool and data pointer coming in
following changes.

Placing bpf_session_run_ctx layer in between bpf_run_ctx and
bpf_kprobe_multi_run_ctx so the session data can be retrieved
regardless of if it's kprobe_multi or uprobe_multi link, which
support is coming in future. This way both kprobe_multi and
uprobe_multi can use same kfuncs to access the session data.

Adding bpf_session_is_return kfunc that returns true if the
bpf program is executed from the exit probe of the kprobe multi
link attached in wrapper mode. It returns false otherwise.

Adding new kprobe hook for kprobe program type.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20240430112830.1184228-3-jolsa@kernel.org
This commit is contained in:
Jiri Olsa 2024-04-30 13:28:25 +02:00 committed by Andrii Nakryiko
parent 535a3692ba
commit adf46d88ae
2 changed files with 63 additions and 7 deletions

View File

@ -218,6 +218,7 @@ enum btf_kfunc_hook {
BTF_KFUNC_HOOK_SOCKET_FILTER,
BTF_KFUNC_HOOK_LWT,
BTF_KFUNC_HOOK_NETFILTER,
BTF_KFUNC_HOOK_KPROBE,
BTF_KFUNC_HOOK_MAX,
};
@ -8157,6 +8158,8 @@ static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
return BTF_KFUNC_HOOK_LWT;
case BPF_PROG_TYPE_NETFILTER:
return BTF_KFUNC_HOOK_NETFILTER;
case BPF_PROG_TYPE_KPROBE:
return BTF_KFUNC_HOOK_KPROBE;
default:
return BTF_KFUNC_HOOK_MAX;
}

View File

@ -2596,6 +2596,11 @@ static int __init bpf_event_init(void)
fs_initcall(bpf_event_init);
#endif /* CONFIG_MODULES */
struct bpf_session_run_ctx {
struct bpf_run_ctx run_ctx;
bool is_return;
};
#ifdef CONFIG_FPROBE
struct bpf_kprobe_multi_link {
struct bpf_link link;
@ -2609,7 +2614,7 @@ struct bpf_kprobe_multi_link {
};
struct bpf_kprobe_multi_run_ctx {
struct bpf_run_ctx run_ctx;
struct bpf_session_run_ctx session_ctx;
struct bpf_kprobe_multi_link *link;
unsigned long entry_ip;
};
@ -2788,7 +2793,8 @@ static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
if (WARN_ON_ONCE(!ctx))
return 0;
run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
session_ctx.run_ctx);
link = run_ctx->link;
if (!link->cookies)
return 0;
@ -2805,15 +2811,20 @@ static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
{
struct bpf_kprobe_multi_run_ctx *run_ctx;
run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx,
session_ctx.run_ctx);
return run_ctx->entry_ip;
}
static int
kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
unsigned long entry_ip, struct pt_regs *regs)
unsigned long entry_ip, struct pt_regs *regs,
bool is_return)
{
struct bpf_kprobe_multi_run_ctx run_ctx = {
.session_ctx = {
.is_return = is_return,
},
.link = link,
.entry_ip = entry_ip,
};
@ -2828,7 +2839,7 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
migrate_disable();
rcu_read_lock();
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
err = bpf_prog_run(link->link.prog, regs);
bpf_reset_run_ctx(old_run_ctx);
rcu_read_unlock();
@ -2848,7 +2859,7 @@ kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
int err;
link = container_of(fp, struct bpf_kprobe_multi_link, fp);
err = kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
err = kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, false);
return is_kprobe_session(link->link.prog) ? err : 0;
}
@ -2860,7 +2871,7 @@ kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
struct bpf_kprobe_multi_link *link;
link = container_of(fp, struct bpf_kprobe_multi_link, fp);
kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs, true);
}
static int symbols_cmp_r(const void *a, const void *b, const void *priv)
@ -3503,3 +3514,45 @@ static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
return 0;
}
#endif /* CONFIG_UPROBES */
#ifdef CONFIG_FPROBE
__bpf_kfunc_start_defs();
__bpf_kfunc bool bpf_session_is_return(void)
{
struct bpf_session_run_ctx *session_ctx;
session_ctx = container_of(current->bpf_ctx, struct bpf_session_run_ctx, run_ctx);
return session_ctx->is_return;
}
__bpf_kfunc_end_defs();
BTF_KFUNCS_START(kprobe_multi_kfunc_set_ids)
BTF_ID_FLAGS(func, bpf_session_is_return)
BTF_KFUNCS_END(kprobe_multi_kfunc_set_ids)
static int bpf_kprobe_multi_filter(const struct bpf_prog *prog, u32 kfunc_id)
{
if (!btf_id_set8_contains(&kprobe_multi_kfunc_set_ids, kfunc_id))
return 0;
if (!is_kprobe_session(prog))
return -EACCES;
return 0;
}
static const struct btf_kfunc_id_set bpf_kprobe_multi_kfunc_set = {
.owner = THIS_MODULE,
.set = &kprobe_multi_kfunc_set_ids,
.filter = bpf_kprobe_multi_filter,
};
static int __init bpf_kprobe_multi_kfuncs_init(void)
{
return register_btf_kfunc_id_set(BPF_PROG_TYPE_KPROBE, &bpf_kprobe_multi_kfunc_set);
}
late_initcall(bpf_kprobe_multi_kfuncs_init);
#endif