From df216f57522c2ebe6e301f873195420de7992aa8 Mon Sep 17 00:00:00 2001 From: Jeff Johnson Date: Wed, 12 Jun 2024 08:44:27 +0900 Subject: [PATCH 01/19] fprobe: add missing MODULE_DESCRIPTION() macro make allmodconfig && make W=1 C=1 reports: WARNING: modpost: missing MODULE_DESCRIPTION() in samples/fprobe/fprobe_example.o Add the missing invocation of the MODULE_DESCRIPTION() macro. Link: https://lore.kernel.org/all/20240601-md-samples-fprobe-v1-1-5d256a956612@quicinc.com/ Signed-off-by: Jeff Johnson Signed-off-by: Masami Hiramatsu (Google) --- samples/fprobe/fprobe_example.c | 1 + 1 file changed, 1 insertion(+) diff --git a/samples/fprobe/fprobe_example.c b/samples/fprobe/fprobe_example.c index 64e715e7ed11..0a50b05add96 100644 --- a/samples/fprobe/fprobe_example.c +++ b/samples/fprobe/fprobe_example.c @@ -150,4 +150,5 @@ static void __exit fprobe_exit(void) module_init(fprobe_init) module_exit(fprobe_exit) +MODULE_DESCRIPTION("sample kernel module showing the use of fprobe"); MODULE_LICENSE("GPL"); From 1b3c86eeea7594eeeb49b8d1c1db0a40f0ce7920 Mon Sep 17 00:00:00 2001 From: Jeff Johnson Date: Wed, 12 Jun 2024 08:44:27 +0900 Subject: [PATCH 02/19] samples: kprobes: add missing MODULE_DESCRIPTION() macros make allmodconfig && make W=1 C=1 reports: WARNING: modpost: missing MODULE_DESCRIPTION() in samples/kprobes/kprobe_example.o WARNING: modpost: missing MODULE_DESCRIPTION() in samples/kprobes/kretprobe_example.o Add the missing invocations of the MODULE_DESCRIPTION() macro. Link: https://lore.kernel.org/all/20240601-md-samples-kprobes-v1-1-b6a772353893@quicinc.com/ Signed-off-by: Jeff Johnson Signed-off-by: Masami Hiramatsu (Google) --- samples/kprobes/kprobe_example.c | 1 + samples/kprobes/kretprobe_example.c | 1 + 2 files changed, 2 insertions(+) diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c index ef44c614c6d9..53ec6c8b8c40 100644 --- a/samples/kprobes/kprobe_example.c +++ b/samples/kprobes/kprobe_example.c @@ -125,4 +125,5 @@ static void __exit kprobe_exit(void) module_init(kprobe_init) module_exit(kprobe_exit) +MODULE_DESCRIPTION("sample kernel module showing the use of kprobes"); MODULE_LICENSE("GPL"); diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c index ed79fd3d48fb..65d6dcafd742 100644 --- a/samples/kprobes/kretprobe_example.c +++ b/samples/kprobes/kretprobe_example.c @@ -104,4 +104,5 @@ static void __exit kretprobe_exit(void) module_init(kretprobe_init) module_exit(kretprobe_exit) +MODULE_DESCRIPTION("sample kernel module showing the use of return probes"); MODULE_LICENSE("GPL"); From 1713b63a07a28a475de94664f783b4cfd2e4fa90 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 12 Jun 2024 08:44:27 +0900 Subject: [PATCH 03/19] x86/shstk: Make return uprobe work with shadow stack Currently the application with enabled shadow stack will crash if it sets up return uprobe. The reason is the uretprobe kernel code changes the user space task's stack, but does not update shadow stack accordingly. Adding new functions to update values on shadow stack and using them in uprobe code to keep shadow stack in sync with uretprobe changes to user stack. Link: https://lore.kernel.org/all/20240611112158.40795-2-jolsa@kernel.org/ Acked-by: Andrii Nakryiko Acked-by: Rick Edgecombe Reviewed-by: Oleg Nesterov Fixes: 488af8ea7131 ("x86/shstk: Wire in shadow stack interface") Signed-off-by: Jiri Olsa Signed-off-by: Masami Hiramatsu (Google) --- arch/x86/include/asm/shstk.h | 2 ++ arch/x86/kernel/shstk.c | 11 +++++++++++ arch/x86/kernel/uprobes.c | 7 ++++++- 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/shstk.h b/arch/x86/include/asm/shstk.h index 42fee8959df7..896909f306e3 100644 --- a/arch/x86/include/asm/shstk.h +++ b/arch/x86/include/asm/shstk.h @@ -21,6 +21,7 @@ unsigned long shstk_alloc_thread_stack(struct task_struct *p, unsigned long clon void shstk_free(struct task_struct *p); int setup_signal_shadow_stack(struct ksignal *ksig); int restore_signal_shadow_stack(void); +int shstk_update_last_frame(unsigned long val); #else static inline long shstk_prctl(struct task_struct *task, int option, unsigned long arg2) { return -EINVAL; } @@ -31,6 +32,7 @@ static inline unsigned long shstk_alloc_thread_stack(struct task_struct *p, static inline void shstk_free(struct task_struct *p) {} static inline int setup_signal_shadow_stack(struct ksignal *ksig) { return 0; } static inline int restore_signal_shadow_stack(void) { return 0; } +static inline int shstk_update_last_frame(unsigned long val) { return 0; } #endif /* CONFIG_X86_USER_SHADOW_STACK */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c index 6f1e9883f074..9797d4cdb78a 100644 --- a/arch/x86/kernel/shstk.c +++ b/arch/x86/kernel/shstk.c @@ -577,3 +577,14 @@ long shstk_prctl(struct task_struct *task, int option, unsigned long arg2) return wrss_control(true); return -EINVAL; } + +int shstk_update_last_frame(unsigned long val) +{ + unsigned long ssp; + + if (!features_enabled(ARCH_SHSTK_SHSTK)) + return 0; + + ssp = get_user_shstk_addr(); + return write_user_shstk_64((u64 __user *)ssp, (u64)val); +} diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 6c07f6daaa22..6402fb3089d2 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -1076,8 +1076,13 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs return orig_ret_vaddr; nleft = copy_to_user((void __user *)regs->sp, &trampoline_vaddr, rasize); - if (likely(!nleft)) + if (likely(!nleft)) { + if (shstk_update_last_frame(trampoline_vaddr)) { + force_sig(SIGSEGV); + return -1; + } return orig_ret_vaddr; + } if (nleft != rasize) { pr_err("return address clobbered: pid=%d, %%sp=%#lx, %%ip=%#lx\n", From 190fec72df4a5d4d98b1e783c333f471e5e5f344 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 12 Jun 2024 08:44:27 +0900 Subject: [PATCH 04/19] uprobe: Wire up uretprobe system call Wiring up uretprobe system call, which comes in following changes. We need to do the wiring before, because the uretprobe implementation needs the syscall number. Note at the moment uretprobe syscall is supported only for native 64-bit process. Link: https://lore.kernel.org/all/20240611112158.40795-3-jolsa@kernel.org/ Reviewed-by: Oleg Nesterov Reviewed-by: Masami Hiramatsu (Google) Acked-by: Andrii Nakryiko Signed-off-by: Jiri Olsa Signed-off-by: Masami Hiramatsu (Google) --- arch/x86/entry/syscalls/syscall_64.tbl | 1 + include/linux/syscalls.h | 2 ++ include/uapi/asm-generic/unistd.h | 5 ++++- kernel/sys_ni.c | 2 ++ 4 files changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index a396f6e6ab5b..6452c2ec469a 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -384,6 +384,7 @@ 460 common lsm_set_self_attr sys_lsm_set_self_attr 461 common lsm_list_modules sys_lsm_list_modules 462 common mseal sys_mseal +463 64 uretprobe sys_uretprobe # # Due to a historical design error, certain syscalls are numbered differently diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 9104952d323d..494f5e0f61f7 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -973,6 +973,8 @@ asmlinkage long sys_lsm_list_modules(u64 *ids, u32 *size, u32 flags); /* x86 */ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on); +asmlinkage long sys_uretprobe(void); + /* pciconfig: alpha, arm, arm64, ia64, sparc */ asmlinkage long sys_pciconfig_read(unsigned long bus, unsigned long dfn, unsigned long off, unsigned long len, diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index d983c48a3b6a..2378f88d5ad4 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -845,8 +845,11 @@ __SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules) #define __NR_mseal 462 __SYSCALL(__NR_mseal, sys_mseal) +#define __NR_uretprobe 463 +__SYSCALL(__NR_uretprobe, sys_uretprobe) + #undef __NR_syscalls -#define __NR_syscalls 463 +#define __NR_syscalls 464 /* * 32 bit systems traditionally used different diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index d7eee421d4bc..5ce9fa0dc195 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -392,3 +392,5 @@ COND_SYSCALL(setuid16); /* restartable sequence */ COND_SYSCALL(rseq); + +COND_SYSCALL(uretprobe); From ff474a78cef5cb5f32be52fe25b78441327a2e7c Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 12 Jun 2024 08:44:28 +0900 Subject: [PATCH 05/19] uprobe: Add uretprobe syscall to speed up return probe MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adding uretprobe syscall instead of trap to speed up return probe. At the moment the uretprobe setup/path is: - install entry uprobe - when the uprobe is hit, it overwrites probed function's return address on stack with address of the trampoline that contains breakpoint instruction - the breakpoint trap code handles the uretprobe consumers execution and jumps back to original return address This patch replaces the above trampoline's breakpoint instruction with new ureprobe syscall call. This syscall does exactly the same job as the trap with some more extra work: - syscall trampoline must save original value for rax/r11/rcx registers on stack - rax is set to syscall number and r11/rcx are changed and used by syscall instruction - the syscall code reads the original values of those registers and restore those values in task's pt_regs area - only caller from trampoline exposed in '[uprobes]' is allowed, the process will receive SIGILL signal otherwise Even with some extra work, using the uretprobes syscall shows speed improvement (compared to using standard breakpoint): On Intel (11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz) current: uretprobe-nop : 1.498 ± 0.000M/s uretprobe-push : 1.448 ± 0.001M/s uretprobe-ret : 0.816 ± 0.001M/s with the fix: uretprobe-nop : 1.969 ± 0.002M/s < 31% speed up uretprobe-push : 1.910 ± 0.000M/s < 31% speed up uretprobe-ret : 0.934 ± 0.000M/s < 14% speed up On Amd (AMD Ryzen 7 5700U) current: uretprobe-nop : 0.778 ± 0.001M/s uretprobe-push : 0.744 ± 0.001M/s uretprobe-ret : 0.540 ± 0.001M/s with the fix: uretprobe-nop : 0.860 ± 0.001M/s < 10% speed up uretprobe-push : 0.818 ± 0.001M/s < 10% speed up uretprobe-ret : 0.578 ± 0.000M/s < 7% speed up The performance test spawns a thread that runs loop which triggers uprobe with attached bpf program that increments the counter that gets printed in results above. The uprobe (and uretprobe) kind is determined by which instruction is being patched with breakpoint instruction. That's also important for uretprobes, because uprobe is installed for each uretprobe. The performance test is part of bpf selftests: tools/testing/selftests/bpf/run_bench_uprobes.sh Note at the moment uretprobe syscall is supported only for native 64-bit process, compat process still uses standard breakpoint. Note that when shadow stack is enabled the uretprobe syscall returns via iret, which is slower than return via sysret, but won't cause the shadow stack violation. Link: https://lore.kernel.org/all/20240611112158.40795-4-jolsa@kernel.org/ Suggested-by: Andrii Nakryiko Reviewed-by: Oleg Nesterov Reviewed-by: Masami Hiramatsu (Google) Acked-by: Andrii Nakryiko Signed-off-by: Oleg Nesterov Signed-off-by: Jiri Olsa Signed-off-by: Masami Hiramatsu (Google) --- arch/x86/include/asm/shstk.h | 2 + arch/x86/kernel/shstk.c | 5 ++ arch/x86/kernel/uprobes.c | 117 +++++++++++++++++++++++++++++++++++ include/linux/uprobes.h | 3 + kernel/events/uprobes.c | 24 ++++--- 5 files changed, 144 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/shstk.h b/arch/x86/include/asm/shstk.h index 896909f306e3..4cb77e004615 100644 --- a/arch/x86/include/asm/shstk.h +++ b/arch/x86/include/asm/shstk.h @@ -22,6 +22,7 @@ void shstk_free(struct task_struct *p); int setup_signal_shadow_stack(struct ksignal *ksig); int restore_signal_shadow_stack(void); int shstk_update_last_frame(unsigned long val); +bool shstk_is_enabled(void); #else static inline long shstk_prctl(struct task_struct *task, int option, unsigned long arg2) { return -EINVAL; } @@ -33,6 +34,7 @@ static inline void shstk_free(struct task_struct *p) {} static inline int setup_signal_shadow_stack(struct ksignal *ksig) { return 0; } static inline int restore_signal_shadow_stack(void) { return 0; } static inline int shstk_update_last_frame(unsigned long val) { return 0; } +static inline bool shstk_is_enabled(void) { return false; } #endif /* CONFIG_X86_USER_SHADOW_STACK */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c index 9797d4cdb78a..059685612362 100644 --- a/arch/x86/kernel/shstk.c +++ b/arch/x86/kernel/shstk.c @@ -588,3 +588,8 @@ int shstk_update_last_frame(unsigned long val) ssp = get_user_shstk_addr(); return write_user_shstk_64((u64 __user *)ssp, (u64)val); } + +bool shstk_is_enabled(void) +{ + return features_enabled(ARCH_SHSTK_SHSTK); +} diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 6402fb3089d2..5a952c5ea66b 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -308,6 +309,122 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool } #ifdef CONFIG_X86_64 + +asm ( + ".pushsection .rodata\n" + ".global uretprobe_trampoline_entry\n" + "uretprobe_trampoline_entry:\n" + "pushq %rax\n" + "pushq %rcx\n" + "pushq %r11\n" + "movq $" __stringify(__NR_uretprobe) ", %rax\n" + "syscall\n" + ".global uretprobe_syscall_check\n" + "uretprobe_syscall_check:\n" + "popq %r11\n" + "popq %rcx\n" + + /* The uretprobe syscall replaces stored %rax value with final + * return address, so we don't restore %rax in here and just + * call ret. + */ + "retq\n" + ".global uretprobe_trampoline_end\n" + "uretprobe_trampoline_end:\n" + ".popsection\n" +); + +extern u8 uretprobe_trampoline_entry[]; +extern u8 uretprobe_trampoline_end[]; +extern u8 uretprobe_syscall_check[]; + +void *arch_uprobe_trampoline(unsigned long *psize) +{ + static uprobe_opcode_t insn = UPROBE_SWBP_INSN; + struct pt_regs *regs = task_pt_regs(current); + + /* + * At the moment the uretprobe syscall trampoline is supported + * only for native 64-bit process, the compat process still uses + * standard breakpoint. + */ + if (user_64bit_mode(regs)) { + *psize = uretprobe_trampoline_end - uretprobe_trampoline_entry; + return uretprobe_trampoline_entry; + } + + *psize = UPROBE_SWBP_INSN_SIZE; + return &insn; +} + +static unsigned long trampoline_check_ip(void) +{ + unsigned long tramp = uprobe_get_trampoline_vaddr(); + + return tramp + (uretprobe_syscall_check - uretprobe_trampoline_entry); +} + +SYSCALL_DEFINE0(uretprobe) +{ + struct pt_regs *regs = task_pt_regs(current); + unsigned long err, ip, sp, r11_cx_ax[3]; + + if (regs->ip != trampoline_check_ip()) + goto sigill; + + err = copy_from_user(r11_cx_ax, (void __user *)regs->sp, sizeof(r11_cx_ax)); + if (err) + goto sigill; + + /* expose the "right" values of r11/cx/ax/sp to uprobe_consumer/s */ + regs->r11 = r11_cx_ax[0]; + regs->cx = r11_cx_ax[1]; + regs->ax = r11_cx_ax[2]; + regs->sp += sizeof(r11_cx_ax); + regs->orig_ax = -1; + + ip = regs->ip; + sp = regs->sp; + + uprobe_handle_trampoline(regs); + + /* + * Some of the uprobe consumers has changed sp, we can do nothing, + * just return via iret. + * .. or shadow stack is enabled, in which case we need to skip + * return through the user space stack address. + */ + if (regs->sp != sp || shstk_is_enabled()) + return regs->ax; + regs->sp -= sizeof(r11_cx_ax); + + /* for the case uprobe_consumer has changed r11/cx */ + r11_cx_ax[0] = regs->r11; + r11_cx_ax[1] = regs->cx; + + /* + * ax register is passed through as return value, so we can use + * its space on stack for ip value and jump to it through the + * trampoline's ret instruction + */ + r11_cx_ax[2] = regs->ip; + regs->ip = ip; + + err = copy_to_user((void __user *)regs->sp, r11_cx_ax, sizeof(r11_cx_ax)); + if (err) + goto sigill; + + /* ensure sysret, see do_syscall_64() */ + regs->r11 = regs->flags; + regs->cx = regs->ip; + + return regs->ax; + +sigill: + force_sig(SIGILL); + return -1; +} + /* * If arch_uprobe->insn doesn't use rip-relative addressing, return * immediately. Otherwise, rewrite the instruction so that it accesses diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index f46e0ca0169c..b503fafb7fb3 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -138,6 +138,9 @@ extern bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check c extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs); extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, void *src, unsigned long len); +extern void uprobe_handle_trampoline(struct pt_regs *regs); +extern void *arch_uprobe_trampoline(unsigned long *psize); +extern unsigned long uprobe_get_trampoline_vaddr(void); #else /* !CONFIG_UPROBES */ struct uprobes_state { }; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 2c83ba776fc7..2816e65729ac 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1474,11 +1474,20 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area) return ret; } +void * __weak arch_uprobe_trampoline(unsigned long *psize) +{ + static uprobe_opcode_t insn = UPROBE_SWBP_INSN; + + *psize = UPROBE_SWBP_INSN_SIZE; + return &insn; +} + static struct xol_area *__create_xol_area(unsigned long vaddr) { struct mm_struct *mm = current->mm; - uprobe_opcode_t insn = UPROBE_SWBP_INSN; + unsigned long insns_size; struct xol_area *area; + void *insns; area = kmalloc(sizeof(*area), GFP_KERNEL); if (unlikely(!area)) @@ -1502,7 +1511,8 @@ static struct xol_area *__create_xol_area(unsigned long vaddr) /* Reserve the 1st slot for get_trampoline_vaddr() */ set_bit(0, area->bitmap); atomic_set(&area->slot_count, 1); - arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE); + insns = arch_uprobe_trampoline(&insns_size); + arch_uprobe_copy_ixol(area->pages[0], 0, insns, insns_size); if (!xol_add_vma(mm, area)) return area; @@ -1827,7 +1837,7 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags) * * Returns -1 in case the xol_area is not allocated. */ -static unsigned long get_trampoline_vaddr(void) +unsigned long uprobe_get_trampoline_vaddr(void) { struct xol_area *area; unsigned long trampoline_vaddr = -1; @@ -1878,7 +1888,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) if (!ri) return; - trampoline_vaddr = get_trampoline_vaddr(); + trampoline_vaddr = uprobe_get_trampoline_vaddr(); orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); if (orig_ret_vaddr == -1) goto fail; @@ -2123,7 +2133,7 @@ static struct return_instance *find_next_ret_chain(struct return_instance *ri) return ri; } -static void handle_trampoline(struct pt_regs *regs) +void uprobe_handle_trampoline(struct pt_regs *regs) { struct uprobe_task *utask; struct return_instance *ri, *next; @@ -2187,8 +2197,8 @@ static void handle_swbp(struct pt_regs *regs) int is_swbp; bp_vaddr = uprobe_get_swbp_addr(regs); - if (bp_vaddr == get_trampoline_vaddr()) - return handle_trampoline(regs); + if (bp_vaddr == uprobe_get_trampoline_vaddr()) + return uprobe_handle_trampoline(regs); uprobe = find_active_uprobe(bp_vaddr, &is_swbp); if (!uprobe) { From 29edd8b003db897d81d82d950785327f164650d3 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 12 Jun 2024 08:44:28 +0900 Subject: [PATCH 06/19] selftests/x86: Add return uprobe shadow stack test Adding return uprobe test for shadow stack and making sure it's working properly. Borrowed some of the code from bpf selftests. Link: https://lore.kernel.org/all/20240611112158.40795-5-jolsa@kernel.org/ Acked-by: Andrii Nakryiko Signed-off-by: Jiri Olsa Signed-off-by: Masami Hiramatsu (Google) --- .../testing/selftests/x86/test_shadow_stack.c | 145 ++++++++++++++++++ 1 file changed, 145 insertions(+) diff --git a/tools/testing/selftests/x86/test_shadow_stack.c b/tools/testing/selftests/x86/test_shadow_stack.c index ee909a7927f9..21af54d5f4ea 100644 --- a/tools/testing/selftests/x86/test_shadow_stack.c +++ b/tools/testing/selftests/x86/test_shadow_stack.c @@ -34,6 +34,7 @@ #include #include #include +#include /* * Define the ABI defines if needed, so people can run the tests @@ -734,6 +735,144 @@ int test_32bit(void) return !segv_triggered; } +static int parse_uint_from_file(const char *file, const char *fmt) +{ + int err, ret; + FILE *f; + + f = fopen(file, "re"); + if (!f) { + err = -errno; + printf("failed to open '%s': %d\n", file, err); + return err; + } + err = fscanf(f, fmt, &ret); + if (err != 1) { + err = err == EOF ? -EIO : -errno; + printf("failed to parse '%s': %d\n", file, err); + fclose(f); + return err; + } + fclose(f); + return ret; +} + +static int determine_uprobe_perf_type(void) +{ + const char *file = "/sys/bus/event_source/devices/uprobe/type"; + + return parse_uint_from_file(file, "%d\n"); +} + +static int determine_uprobe_retprobe_bit(void) +{ + const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe"; + + return parse_uint_from_file(file, "config:%d\n"); +} + +static ssize_t get_uprobe_offset(const void *addr) +{ + size_t start, end, base; + char buf[256]; + bool found = false; + FILE *f; + + f = fopen("/proc/self/maps", "r"); + if (!f) + return -errno; + + while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &base) == 4) { + if (buf[2] == 'x' && (uintptr_t)addr >= start && (uintptr_t)addr < end) { + found = true; + break; + } + } + + fclose(f); + + if (!found) + return -ESRCH; + + return (uintptr_t)addr - start + base; +} + +static __attribute__((noinline)) void uretprobe_trigger(void) +{ + asm volatile (""); +} + +/* + * This test setups return uprobe, which is sensitive to shadow stack + * (crashes without extra fix). After executing the uretprobe we fail + * the test if we receive SIGSEGV, no crash means we're good. + * + * Helper functions above borrowed from bpf selftests. + */ +static int test_uretprobe(void) +{ + const size_t attr_sz = sizeof(struct perf_event_attr); + const char *file = "/proc/self/exe"; + int bit, fd = 0, type, err = 1; + struct perf_event_attr attr; + struct sigaction sa = {}; + ssize_t offset; + + type = determine_uprobe_perf_type(); + if (type < 0) { + if (type == -ENOENT) + printf("[SKIP]\tUretprobe test, uprobes are not available\n"); + return 0; + } + + offset = get_uprobe_offset(uretprobe_trigger); + if (offset < 0) + return 1; + + bit = determine_uprobe_retprobe_bit(); + if (bit < 0) + return 1; + + sa.sa_sigaction = segv_gp_handler; + sa.sa_flags = SA_SIGINFO; + if (sigaction(SIGSEGV, &sa, NULL)) + return 1; + + /* Setup return uprobe through perf event interface. */ + memset(&attr, 0, attr_sz); + attr.size = attr_sz; + attr.type = type; + attr.config = 1 << bit; + attr.config1 = (__u64) (unsigned long) file; + attr.config2 = offset; + + fd = syscall(__NR_perf_event_open, &attr, 0 /* pid */, -1 /* cpu */, + -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); + if (fd < 0) + goto out; + + if (sigsetjmp(jmp_buffer, 1)) + goto out; + + ARCH_PRCTL(ARCH_SHSTK_ENABLE, ARCH_SHSTK_SHSTK); + + /* + * This either segfaults and goes through sigsetjmp above + * or succeeds and we're good. + */ + uretprobe_trigger(); + + printf("[OK]\tUretprobe test\n"); + err = 0; + +out: + ARCH_PRCTL(ARCH_SHSTK_DISABLE, ARCH_SHSTK_SHSTK); + signal(SIGSEGV, SIG_DFL); + if (fd) + close(fd); + return err; +} + void segv_handler_ptrace(int signum, siginfo_t *si, void *uc) { /* The SSP adjustment caused a segfault. */ @@ -926,6 +1065,12 @@ int main(int argc, char *argv[]) goto out; } + if (test_uretprobe()) { + ret = 1; + printf("[FAIL]\turetprobe test\n"); + goto out; + } + return ret; out: From 3e8e25761a40194887336650673587191564e12c Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 12 Jun 2024 08:44:28 +0900 Subject: [PATCH 07/19] selftests/bpf: Add uretprobe syscall test for regs integrity Add uretprobe syscall test that compares register values before and after the uretprobe is hit. It also compares the register values seen from attached bpf program. Link: https://lore.kernel.org/all/20240611112158.40795-6-jolsa@kernel.org/ Acked-by: Andrii Nakryiko Reviewed-by: Masami Hiramatsu (Google) Signed-off-by: Jiri Olsa Signed-off-by: Masami Hiramatsu (Google) --- tools/include/linux/compiler.h | 4 + .../selftests/bpf/prog_tests/uprobe_syscall.c | 163 ++++++++++++++++++ .../selftests/bpf/progs/uprobe_syscall.c | 15 ++ 3 files changed, 182 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c create mode 100644 tools/testing/selftests/bpf/progs/uprobe_syscall.c diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index 8a63a9913495..6f7f22ac9da5 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h @@ -62,6 +62,10 @@ #define __nocf_check __attribute__((nocf_check)) #endif +#ifndef __naked +#define __naked __attribute__((__naked__)) +#endif + /* Are two types/vars the same type (ignoring qualifiers)? */ #ifndef __same_type # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c new file mode 100644 index 000000000000..311ac19d8992 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#ifdef __x86_64__ + +#include +#include +#include +#include "uprobe_syscall.skel.h" + +__naked unsigned long uretprobe_regs_trigger(void) +{ + asm volatile ( + "movq $0xdeadbeef, %rax\n" + "ret\n" + ); +} + +__naked void uretprobe_regs(struct pt_regs *before, struct pt_regs *after) +{ + asm volatile ( + "movq %r15, 0(%rdi)\n" + "movq %r14, 8(%rdi)\n" + "movq %r13, 16(%rdi)\n" + "movq %r12, 24(%rdi)\n" + "movq %rbp, 32(%rdi)\n" + "movq %rbx, 40(%rdi)\n" + "movq %r11, 48(%rdi)\n" + "movq %r10, 56(%rdi)\n" + "movq %r9, 64(%rdi)\n" + "movq %r8, 72(%rdi)\n" + "movq %rax, 80(%rdi)\n" + "movq %rcx, 88(%rdi)\n" + "movq %rdx, 96(%rdi)\n" + "movq %rsi, 104(%rdi)\n" + "movq %rdi, 112(%rdi)\n" + "movq $0, 120(%rdi)\n" /* orig_rax */ + "movq $0, 128(%rdi)\n" /* rip */ + "movq $0, 136(%rdi)\n" /* cs */ + "pushf\n" + "pop %rax\n" + "movq %rax, 144(%rdi)\n" /* eflags */ + "movq %rsp, 152(%rdi)\n" /* rsp */ + "movq $0, 160(%rdi)\n" /* ss */ + + /* save 2nd argument */ + "pushq %rsi\n" + "call uretprobe_regs_trigger\n" + + /* save return value and load 2nd argument pointer to rax */ + "pushq %rax\n" + "movq 8(%rsp), %rax\n" + + "movq %r15, 0(%rax)\n" + "movq %r14, 8(%rax)\n" + "movq %r13, 16(%rax)\n" + "movq %r12, 24(%rax)\n" + "movq %rbp, 32(%rax)\n" + "movq %rbx, 40(%rax)\n" + "movq %r11, 48(%rax)\n" + "movq %r10, 56(%rax)\n" + "movq %r9, 64(%rax)\n" + "movq %r8, 72(%rax)\n" + "movq %rcx, 88(%rax)\n" + "movq %rdx, 96(%rax)\n" + "movq %rsi, 104(%rax)\n" + "movq %rdi, 112(%rax)\n" + "movq $0, 120(%rax)\n" /* orig_rax */ + "movq $0, 128(%rax)\n" /* rip */ + "movq $0, 136(%rax)\n" /* cs */ + + /* restore return value and 2nd argument */ + "pop %rax\n" + "pop %rsi\n" + + "movq %rax, 80(%rsi)\n" + + "pushf\n" + "pop %rax\n" + + "movq %rax, 144(%rsi)\n" /* eflags */ + "movq %rsp, 152(%rsi)\n" /* rsp */ + "movq $0, 160(%rsi)\n" /* ss */ + "ret\n" +); +} + +static void test_uretprobe_regs_equal(void) +{ + struct uprobe_syscall *skel = NULL; + struct pt_regs before = {}, after = {}; + unsigned long *pb = (unsigned long *) &before; + unsigned long *pa = (unsigned long *) &after; + unsigned long *pp; + unsigned int i, cnt; + int err; + + skel = uprobe_syscall__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_syscall__open_and_load")) + goto cleanup; + + err = uprobe_syscall__attach(skel); + if (!ASSERT_OK(err, "uprobe_syscall__attach")) + goto cleanup; + + uretprobe_regs(&before, &after); + + pp = (unsigned long *) &skel->bss->regs; + cnt = sizeof(before)/sizeof(*pb); + + for (i = 0; i < cnt; i++) { + unsigned int offset = i * sizeof(unsigned long); + + /* + * Check register before and after uretprobe_regs_trigger call + * that triggers the uretprobe. + */ + switch (offset) { + case offsetof(struct pt_regs, rax): + ASSERT_EQ(pa[i], 0xdeadbeef, "return value"); + break; + default: + if (!ASSERT_EQ(pb[i], pa[i], "register before-after value check")) + fprintf(stdout, "failed register offset %u\n", offset); + } + + /* + * Check register seen from bpf program and register after + * uretprobe_regs_trigger call + */ + switch (offset) { + /* + * These values will be different (not set in uretprobe_regs), + * we don't care. + */ + case offsetof(struct pt_regs, orig_rax): + case offsetof(struct pt_regs, rip): + case offsetof(struct pt_regs, cs): + case offsetof(struct pt_regs, rsp): + case offsetof(struct pt_regs, ss): + break; + default: + if (!ASSERT_EQ(pp[i], pa[i], "register prog-after value check")) + fprintf(stdout, "failed register offset %u\n", offset); + } + } + +cleanup: + uprobe_syscall__destroy(skel); +} +#else +static void test_uretprobe_regs_equal(void) +{ + test__skip(); +} +#endif + +void test_uprobe_syscall(void) +{ + if (test__start_subtest("uretprobe_regs_equal")) + test_uretprobe_regs_equal(); +} diff --git a/tools/testing/selftests/bpf/progs/uprobe_syscall.c b/tools/testing/selftests/bpf/progs/uprobe_syscall.c new file mode 100644 index 000000000000..8a4fa6c7ef59 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_syscall.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "vmlinux.h" +#include +#include + +struct pt_regs regs; + +char _license[] SEC("license") = "GPL"; + +SEC("uretprobe//proc/self/exe:uretprobe_regs_trigger") +int uretprobe(struct pt_regs *ctx) +{ + __builtin_memcpy(®s, ctx, sizeof(regs)); + return 0; +} From f42a58ffb8bb54e66bf9668a6be91477828c0c1b Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 12 Jun 2024 08:44:28 +0900 Subject: [PATCH 08/19] selftests/bpf: Add uretprobe syscall test for regs changes Adding test that creates uprobe consumer on uretprobe which changes some of the registers. Making sure the changed registers are propagated to the user space when the ureptobe syscall trampoline is used on x86_64. To be able to do this, adding support to bpf_testmod to create uprobe via new attribute file: /sys/kernel/bpf_testmod_uprobe This file is expecting file offset and creates related uprobe on current process exe file and removes existing uprobe if offset is 0. The can be only single uprobe at any time. The uprobe has specific consumer that changes registers used in ureprobe syscall trampoline and which are later checked in the test. Link: https://lore.kernel.org/all/20240611112158.40795-7-jolsa@kernel.org/ Acked-by: Andrii Nakryiko Reviewed-by: Masami Hiramatsu (Google) Signed-off-by: Jiri Olsa Signed-off-by: Masami Hiramatsu (Google) --- .../selftests/bpf/bpf_testmod/bpf_testmod.c | 123 +++++++++++++++++- .../selftests/bpf/prog_tests/uprobe_syscall.c | 67 ++++++++++ 2 files changed, 189 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 2a18bd320e92..b0132a342bb5 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "bpf_testmod.h" #include "bpf_testmod_kfunc.h" @@ -358,6 +359,119 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { .write = bpf_testmod_test_write, }; +/* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only, + * please see test_uretprobe_regs_change test + */ +#ifdef __x86_64__ + +static int +uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func, + struct pt_regs *regs) + +{ + regs->ax = 0x12345678deadbeef; + regs->cx = 0x87654321feebdaed; + regs->r11 = (u64) -1; + return true; +} + +struct testmod_uprobe { + struct path path; + loff_t offset; + struct uprobe_consumer consumer; +}; + +static DEFINE_MUTEX(testmod_uprobe_mutex); + +static struct testmod_uprobe uprobe = { + .consumer.ret_handler = uprobe_ret_handler, +}; + +static int testmod_register_uprobe(loff_t offset) +{ + int err = -EBUSY; + + if (uprobe.offset) + return -EBUSY; + + mutex_lock(&testmod_uprobe_mutex); + + if (uprobe.offset) + goto out; + + err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path); + if (err) + goto out; + + err = uprobe_register_refctr(d_real_inode(uprobe.path.dentry), + offset, 0, &uprobe.consumer); + if (err) + path_put(&uprobe.path); + else + uprobe.offset = offset; + +out: + mutex_unlock(&testmod_uprobe_mutex); + return err; +} + +static void testmod_unregister_uprobe(void) +{ + mutex_lock(&testmod_uprobe_mutex); + + if (uprobe.offset) { + uprobe_unregister(d_real_inode(uprobe.path.dentry), + uprobe.offset, &uprobe.consumer); + uprobe.offset = 0; + } + + mutex_unlock(&testmod_uprobe_mutex); +} + +static ssize_t +bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t len) +{ + unsigned long offset = 0; + int err = 0; + + if (kstrtoul(buf, 0, &offset)) + return -EINVAL; + + if (offset) + err = testmod_register_uprobe(offset); + else + testmod_unregister_uprobe(); + + return err ?: strlen(buf); +} + +static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = { + .attr = { .name = "bpf_testmod_uprobe", .mode = 0666, }, + .write = bpf_testmod_uprobe_write, +}; + +static int register_bpf_testmod_uprobe(void) +{ + return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file); +} + +static void unregister_bpf_testmod_uprobe(void) +{ + testmod_unregister_uprobe(); + sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file); +} + +#else +static int register_bpf_testmod_uprobe(void) +{ + return 0; +} + +static void unregister_bpf_testmod_uprobe(void) { } +#endif + BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL) @@ -912,7 +1026,13 @@ static int bpf_testmod_init(void) return -EINVAL; sock = NULL; mutex_init(&sock_lock); - return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); + ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); + if (ret < 0) + return ret; + ret = register_bpf_testmod_uprobe(); + if (ret < 0) + return ret; + return 0; } static void bpf_testmod_exit(void) @@ -927,6 +1047,7 @@ static void bpf_testmod_exit(void) bpf_kfunc_close_sock(); sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); + unregister_bpf_testmod_uprobe(); } module_init(bpf_testmod_init); diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c index 311ac19d8992..1a50cd35205d 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c @@ -149,15 +149,82 @@ static void test_uretprobe_regs_equal(void) cleanup: uprobe_syscall__destroy(skel); } + +#define BPF_TESTMOD_UPROBE_TEST_FILE "/sys/kernel/bpf_testmod_uprobe" + +static int write_bpf_testmod_uprobe(unsigned long offset) +{ + size_t n, ret; + char buf[30]; + int fd; + + n = sprintf(buf, "%lu", offset); + + fd = open(BPF_TESTMOD_UPROBE_TEST_FILE, O_WRONLY); + if (fd < 0) + return -errno; + + ret = write(fd, buf, n); + close(fd); + return ret != n ? (int) ret : 0; +} + +static void test_uretprobe_regs_change(void) +{ + struct pt_regs before = {}, after = {}; + unsigned long *pb = (unsigned long *) &before; + unsigned long *pa = (unsigned long *) &after; + unsigned long cnt = sizeof(before)/sizeof(*pb); + unsigned int i, err, offset; + + offset = get_uprobe_offset(uretprobe_regs_trigger); + + err = write_bpf_testmod_uprobe(offset); + if (!ASSERT_OK(err, "register_uprobe")) + return; + + uretprobe_regs(&before, &after); + + err = write_bpf_testmod_uprobe(0); + if (!ASSERT_OK(err, "unregister_uprobe")) + return; + + for (i = 0; i < cnt; i++) { + unsigned int offset = i * sizeof(unsigned long); + + switch (offset) { + case offsetof(struct pt_regs, rax): + ASSERT_EQ(pa[i], 0x12345678deadbeef, "rax"); + break; + case offsetof(struct pt_regs, rcx): + ASSERT_EQ(pa[i], 0x87654321feebdaed, "rcx"); + break; + case offsetof(struct pt_regs, r11): + ASSERT_EQ(pa[i], (__u64) -1, "r11"); + break; + default: + if (!ASSERT_EQ(pa[i], pb[i], "register before-after value check")) + fprintf(stdout, "failed register offset %u\n", offset); + } + } +} + #else static void test_uretprobe_regs_equal(void) { test__skip(); } + +static void test_uretprobe_regs_change(void) +{ + test__skip(); +} #endif void test_uprobe_syscall(void) { if (test__start_subtest("uretprobe_regs_equal")) test_uretprobe_regs_equal(); + if (test__start_subtest("uretprobe_regs_change")) + test_uretprobe_regs_change(); } From 9e7f74e64ae58688a33a6445e4f9a4e291d0824f Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 12 Jun 2024 08:44:28 +0900 Subject: [PATCH 09/19] selftests/bpf: Add uretprobe syscall call from user space test Adding test to verify that when called from outside of the trampoline provided by kernel, the uretprobe syscall will cause calling process to receive SIGILL signal and the attached bpf program is not executed. Link: https://lore.kernel.org/all/20240611112158.40795-8-jolsa@kernel.org/ Acked-by: Andrii Nakryiko Reviewed-by: Masami Hiramatsu (Google) Signed-off-by: Jiri Olsa Signed-off-by: Masami Hiramatsu (Google) --- .../selftests/bpf/prog_tests/uprobe_syscall.c | 95 +++++++++++++++++++ .../bpf/progs/uprobe_syscall_executed.c | 17 ++++ 2 files changed, 112 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c index 1a50cd35205d..11ccd693ef73 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c @@ -7,7 +7,10 @@ #include #include #include +#include +#include #include "uprobe_syscall.skel.h" +#include "uprobe_syscall_executed.skel.h" __naked unsigned long uretprobe_regs_trigger(void) { @@ -209,6 +212,91 @@ static void test_uretprobe_regs_change(void) } } +#ifndef __NR_uretprobe +#define __NR_uretprobe 463 +#endif + +__naked unsigned long uretprobe_syscall_call_1(void) +{ + /* + * Pretend we are uretprobe trampoline to trigger the return + * probe invocation in order to verify we get SIGILL. + */ + asm volatile ( + "pushq %rax\n" + "pushq %rcx\n" + "pushq %r11\n" + "movq $" __stringify(__NR_uretprobe) ", %rax\n" + "syscall\n" + "popq %r11\n" + "popq %rcx\n" + "retq\n" + ); +} + +__naked unsigned long uretprobe_syscall_call(void) +{ + asm volatile ( + "call uretprobe_syscall_call_1\n" + "retq\n" + ); +} + +static void test_uretprobe_syscall_call(void) +{ + LIBBPF_OPTS(bpf_uprobe_multi_opts, opts, + .retprobe = true, + ); + struct uprobe_syscall_executed *skel; + int pid, status, err, go[2], c; + + if (ASSERT_OK(pipe(go), "pipe")) + return; + + skel = uprobe_syscall_executed__open_and_load(); + if (!ASSERT_OK_PTR(skel, "uprobe_syscall_executed__open_and_load")) + goto cleanup; + + pid = fork(); + if (!ASSERT_GE(pid, 0, "fork")) + goto cleanup; + + /* child */ + if (pid == 0) { + close(go[1]); + + /* wait for parent's kick */ + err = read(go[0], &c, 1); + if (err != 1) + exit(-1); + + uretprobe_syscall_call(); + _exit(0); + } + + skel->links.test = bpf_program__attach_uprobe_multi(skel->progs.test, pid, + "/proc/self/exe", + "uretprobe_syscall_call", &opts); + if (!ASSERT_OK_PTR(skel->links.test, "bpf_program__attach_uprobe_multi")) + goto cleanup; + + /* kick the child */ + write(go[1], &c, 1); + err = waitpid(pid, &status, 0); + ASSERT_EQ(err, pid, "waitpid"); + + /* verify the child got killed with SIGILL */ + ASSERT_EQ(WIFSIGNALED(status), 1, "WIFSIGNALED"); + ASSERT_EQ(WTERMSIG(status), SIGILL, "WTERMSIG"); + + /* verify the uretprobe program wasn't called */ + ASSERT_EQ(skel->bss->executed, 0, "executed"); + +cleanup: + uprobe_syscall_executed__destroy(skel); + close(go[1]); + close(go[0]); +} #else static void test_uretprobe_regs_equal(void) { @@ -219,6 +307,11 @@ static void test_uretprobe_regs_change(void) { test__skip(); } + +static void test_uretprobe_syscall_call(void) +{ + test__skip(); +} #endif void test_uprobe_syscall(void) @@ -227,4 +320,6 @@ void test_uprobe_syscall(void) test_uretprobe_regs_equal(); if (test__start_subtest("uretprobe_regs_change")) test_uretprobe_regs_change(); + if (test__start_subtest("uretprobe_syscall_call")) + test_uretprobe_syscall_call(); } diff --git a/tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c b/tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c new file mode 100644 index 000000000000..0d7f1a7db2e2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uprobe_syscall_executed.c @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "vmlinux.h" +#include +#include + +struct pt_regs regs; + +char _license[] SEC("license") = "GPL"; + +int executed = 0; + +SEC("uretprobe.multi") +int test(struct pt_regs *regs) +{ + executed = 1; + return 0; +} From 30addd1dc6cc4558ec7024448f04bc77d508fe7d Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 12 Jun 2024 08:44:29 +0900 Subject: [PATCH 10/19] selftests/bpf: Add uretprobe shadow stack test Adding uretprobe shadow stack test that runs all existing uretprobe tests with shadow stack enabled if it's available. Link: https://lore.kernel.org/all/20240611112158.40795-9-jolsa@kernel.org/ Acked-by: Andrii Nakryiko Reviewed-by: Masami Hiramatsu (Google) Signed-off-by: Jiri Olsa Signed-off-by: Masami Hiramatsu (Google) --- .../selftests/bpf/prog_tests/uprobe_syscall.c | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c index 11ccd693ef73..c8517c8f5313 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c @@ -9,6 +9,9 @@ #include #include #include +#include +#include +#include #include "uprobe_syscall.skel.h" #include "uprobe_syscall_executed.skel.h" @@ -297,6 +300,56 @@ cleanup: close(go[1]); close(go[0]); } + +/* + * Borrowed from tools/testing/selftests/x86/test_shadow_stack.c. + * + * For use in inline enablement of shadow stack. + * + * The program can't return from the point where shadow stack gets enabled + * because there will be no address on the shadow stack. So it can't use + * syscall() for enablement, since it is a function. + * + * Based on code from nolibc.h. Keep a copy here because this can't pull + * in all of nolibc.h. + */ +#define ARCH_PRCTL(arg1, arg2) \ +({ \ + long _ret; \ + register long _num asm("eax") = __NR_arch_prctl; \ + register long _arg1 asm("rdi") = (long)(arg1); \ + register long _arg2 asm("rsi") = (long)(arg2); \ + \ + asm volatile ( \ + "syscall\n" \ + : "=a"(_ret) \ + : "r"(_arg1), "r"(_arg2), \ + "0"(_num) \ + : "rcx", "r11", "memory", "cc" \ + ); \ + _ret; \ +}) + +#ifndef ARCH_SHSTK_ENABLE +#define ARCH_SHSTK_ENABLE 0x5001 +#define ARCH_SHSTK_DISABLE 0x5002 +#define ARCH_SHSTK_SHSTK (1ULL << 0) +#endif + +static void test_uretprobe_shadow_stack(void) +{ + if (ARCH_PRCTL(ARCH_SHSTK_ENABLE, ARCH_SHSTK_SHSTK)) { + test__skip(); + return; + } + + /* Run all of the uretprobe tests. */ + test_uretprobe_regs_equal(); + test_uretprobe_regs_change(); + test_uretprobe_syscall_call(); + + ARCH_PRCTL(ARCH_SHSTK_DISABLE, ARCH_SHSTK_SHSTK); +} #else static void test_uretprobe_regs_equal(void) { @@ -312,6 +365,11 @@ static void test_uretprobe_syscall_call(void) { test__skip(); } + +static void test_uretprobe_shadow_stack(void) +{ + test__skip(); +} #endif void test_uprobe_syscall(void) @@ -322,4 +380,6 @@ void test_uprobe_syscall(void) test_uretprobe_regs_change(); if (test__start_subtest("uretprobe_syscall_call")) test_uretprobe_syscall_call(); + if (test__start_subtest("uretprobe_shadow_stack")) + test_uretprobe_shadow_stack(); } From 41051daa38a778dd6da49f854442260ebc029894 Mon Sep 17 00:00:00 2001 From: "Masami Hiramatsu (Google)" Date: Tue, 11 Jun 2024 22:30:46 +0900 Subject: [PATCH 11/19] tracing/kprobe: Integrate test warnings into WARN_ONCE Cleanup the redundant WARN_ON_ONCE(cond) + pr_warn(msg) into WARN_ONCE(cond, msg). Also add some WARN_ONCE() for hitcount check. These WARN_ONCE() errors makes it easy to handle errors from ktest. Link: https://lore.kernel.org/all/171811264685.85078.8068819097047430463.stgit@devnote2/ Suggested-by: Steven Rostedt Signed-off-by: Masami Hiramatsu (Google) Reviewed-by: Steven Rostedt (Google) --- kernel/trace/trace_kprobe.c | 54 +++++++++++++------------------------ 1 file changed, 19 insertions(+), 35 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 16383247bdbf..8c5816c04bd2 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -2023,19 +2023,16 @@ static __init int kprobe_trace_self_tests_init(void) pr_info("Testing kprobe tracing: "); ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)"); - if (WARN_ON_ONCE(ret)) { - pr_warn("error on probing function entry.\n"); + if (WARN_ONCE(ret, "error on probing function entry.")) { warn++; } else { /* Enable trace point */ tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); - if (WARN_ON_ONCE(tk == NULL)) { - pr_warn("error on getting new probe.\n"); + if (WARN_ONCE(tk == NULL, "error on probing function entry.")) { warn++; } else { file = find_trace_probe_file(tk, top_trace_array()); - if (WARN_ON_ONCE(file == NULL)) { - pr_warn("error on getting probe file.\n"); + if (WARN_ONCE(file == NULL, "error on getting probe file.")) { warn++; } else enable_trace_kprobe( @@ -2044,19 +2041,16 @@ static __init int kprobe_trace_self_tests_init(void) } ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval"); - if (WARN_ON_ONCE(ret)) { - pr_warn("error on probing function return.\n"); + if (WARN_ONCE(ret, "error on probing function return.")) { warn++; } else { /* Enable trace point */ tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); - if (WARN_ON_ONCE(tk == NULL)) { - pr_warn("error on getting 2nd new probe.\n"); + if (WARN_ONCE(tk == NULL, "error on getting 2nd new probe.")) { warn++; } else { file = find_trace_probe_file(tk, top_trace_array()); - if (WARN_ON_ONCE(file == NULL)) { - pr_warn("error on getting probe file.\n"); + if (WARN_ONCE(file == NULL, "error on getting probe file.")) { warn++; } else enable_trace_kprobe( @@ -2079,18 +2073,15 @@ static __init int kprobe_trace_self_tests_init(void) /* Disable trace points before removing it */ tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); - if (WARN_ON_ONCE(tk == NULL)) { - pr_warn("error on getting test probe.\n"); + if (WARN_ONCE(tk == NULL, "error on getting test probe.")) { warn++; } else { - if (trace_kprobe_nhit(tk) != 1) { - pr_warn("incorrect number of testprobe hits\n"); + if (WARN_ONCE(trace_kprobe_nhit(tk) != 1, + "incorrect number of testprobe hits.")) warn++; - } file = find_trace_probe_file(tk, top_trace_array()); - if (WARN_ON_ONCE(file == NULL)) { - pr_warn("error on getting probe file.\n"); + if (WARN_ONCE(file == NULL, "error on getting probe file.")) { warn++; } else disable_trace_kprobe( @@ -2098,18 +2089,15 @@ static __init int kprobe_trace_self_tests_init(void) } tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); - if (WARN_ON_ONCE(tk == NULL)) { - pr_warn("error on getting 2nd test probe.\n"); + if (WARN_ONCE(tk == NULL, "error on getting 2nd test probe.")) { warn++; } else { - if (trace_kprobe_nhit(tk) != 1) { - pr_warn("incorrect number of testprobe2 hits\n"); + if (WARN_ONCE(trace_kprobe_nhit(tk) != 1, + "incorrect number of testprobe2 hits.")) warn++; - } file = find_trace_probe_file(tk, top_trace_array()); - if (WARN_ON_ONCE(file == NULL)) { - pr_warn("error on getting probe file.\n"); + if (WARN_ONCE(file == NULL, "error on getting probe file.")) { warn++; } else disable_trace_kprobe( @@ -2117,23 +2105,19 @@ static __init int kprobe_trace_self_tests_init(void) } ret = create_or_delete_trace_kprobe("-:testprobe"); - if (WARN_ON_ONCE(ret)) { - pr_warn("error on deleting a probe.\n"); + if (WARN_ONCE(ret, "error on deleting a probe.")) warn++; - } ret = create_or_delete_trace_kprobe("-:testprobe2"); - if (WARN_ON_ONCE(ret)) { - pr_warn("error on deleting a probe.\n"); + if (WARN_ONCE(ret, "error on deleting a probe.")) warn++; - } + end: ret = dyn_events_release_all(&trace_kprobe_ops); - if (WARN_ON_ONCE(ret)) { - pr_warn("error on cleaning up probes.\n"); + if (WARN_ONCE(ret, "error on cleaning up probes.")) warn++; - } + /* * Wait for the optimizer work to finish. Otherwise it might fiddle * with probes in already freed __init text. From 3eddb031965ae9a95ba098ae6eb81b082e024c65 Mon Sep 17 00:00:00 2001 From: "Masami Hiramatsu (Google)" Date: Tue, 11 Jun 2024 22:30:56 +0900 Subject: [PATCH 12/19] tracing/kprobe: Remove cleanup code unrelated to selftest This cleanup all kprobe events code is not related to the selftest itself, and it can fail by the reason unrelated to this test. If the test is successful, the generated events are cleaned up. And if not, we cannot guarantee that the kprobe events will work correctly. So, anyway, there is no need to clean it up. Link: https://lore.kernel.org/all/171811265627.85078.16897867213512435822.stgit@devnote2/ Signed-off-by: Masami Hiramatsu (Google) Reviewed-by: Steven Rostedt (Google) --- kernel/trace/trace_kprobe.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 8c5816c04bd2..7fd0f8576e4c 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -2114,10 +2114,6 @@ static __init int kprobe_trace_self_tests_init(void) end: - ret = dyn_events_release_all(&trace_kprobe_ops); - if (WARN_ONCE(ret, "error on cleaning up probes.")) - warn++; - /* * Wait for the optimizer work to finish. Otherwise it might fiddle * with probes in already freed __init text. From 4a365eb8a6d9940e838739935f1ce21f1ec8e33f Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 May 2024 18:38:43 -0700 Subject: [PATCH 13/19] perf,uprobes: fix user stack traces in the presence of pending uretprobes When kernel has pending uretprobes installed, it hijacks original user function return address on the stack with a uretprobe trampoline address. There could be multiple such pending uretprobes (either on different user functions or on the same recursive one) at any given time within the same task. This approach interferes with the user stack trace capture logic, which would report suprising addresses (like 0x7fffffffe000) that correspond to a special "[uprobes]" section that kernel installs in the target process address space for uretprobe trampoline code, while logically it should be an address somewhere within the calling function of another traced user function. This is easy to correct for, though. Uprobes subsystem keeps track of pending uretprobes and records original return addresses. This patch is using this to do a post-processing step and restore each trampoline address entries with correct original return address. This is done only if there are pending uretprobes for current task. This is a similar approach to what fprobe/kretprobe infrastructure is doing when capturing kernel stack traces in the presence of pending return probes. Link: https://lore.kernel.org/all/20240522013845.1631305-3-andrii@kernel.org/ Reported-by: Riham Selim Signed-off-by: Andrii Nakryiko Signed-off-by: Masami Hiramatsu (Google) --- kernel/events/callchain.c | 43 ++++++++++++++++++++++++++++++++++++++- kernel/events/uprobes.c | 9 ++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 1273be84392c..b17e3323f7f6 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "internal.h" @@ -176,13 +177,51 @@ put_callchain_entry(int rctx) put_recursion_context(this_cpu_ptr(callchain_recursion), rctx); } +static void fixup_uretprobe_trampoline_entries(struct perf_callchain_entry *entry, + int start_entry_idx) +{ +#ifdef CONFIG_UPROBES + struct uprobe_task *utask = current->utask; + struct return_instance *ri; + __u64 *cur_ip, *last_ip, tramp_addr; + + if (likely(!utask || !utask->return_instances)) + return; + + cur_ip = &entry->ip[start_entry_idx]; + last_ip = &entry->ip[entry->nr - 1]; + ri = utask->return_instances; + tramp_addr = uprobe_get_trampoline_vaddr(); + + /* + * If there are pending uretprobes for the current thread, they are + * recorded in a list inside utask->return_instances; each such + * pending uretprobe replaces traced user function's return address on + * the stack, so when stack trace is captured, instead of seeing + * actual function's return address, we'll have one or many uretprobe + * trampoline addresses in the stack trace, which are not helpful and + * misleading to users. + * So here we go over the pending list of uretprobes, and each + * encountered trampoline address is replaced with actual return + * address. + */ + while (ri && cur_ip <= last_ip) { + if (*cur_ip == tramp_addr) { + *cur_ip = ri->orig_ret_vaddr; + ri = ri->next; + } + cur_ip++; + } +#endif +} + struct perf_callchain_entry * get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark) { struct perf_callchain_entry *entry; struct perf_callchain_entry_ctx ctx; - int rctx; + int rctx, start_entry_idx; entry = get_callchain_entry(&rctx); if (!entry) @@ -215,7 +254,9 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, if (add_mark) perf_callchain_store_context(&ctx, PERF_CONTEXT_USER); + start_entry_idx = entry->nr; perf_callchain_user(&ctx, regs); + fixup_uretprobe_trampoline_entries(entry, start_entry_idx); } } diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 2816e65729ac..99be2adedbc0 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -2159,6 +2159,15 @@ void uprobe_handle_trampoline(struct pt_regs *regs) instruction_pointer_set(regs, ri->orig_ret_vaddr); do { + /* pop current instance from the stack of pending return instances, + * as it's not pending anymore: we just fixed up original + * instruction pointer in regs and are about to call handlers; + * this allows fixup_uretprobe_trampoline_entries() to properly fix up + * captured stack traces from uretprobe handlers, in which pending + * trampoline addresses on the stack are replaced with correct + * original return addresses + */ + utask->return_instances = ri->next; if (valid) handle_uretprobe_chain(ri, regs); ri = free_ret_instance(ri); From 637c26f9b02d9c72448fcd5c9c4e3b08015404fc Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 May 2024 18:38:45 -0700 Subject: [PATCH 14/19] selftests/bpf: add test validating uprobe/uretprobe stack traces Add a set of tests to validate that stack traces captured from or in the presence of active uprobes and uretprobes are valid and complete. For this we use BPF program that are installed either on entry or exit of user function, plus deep-nested USDT. One of target funtions (target_1) is recursive to generate two different entries in the stack trace for the same uprobe/uretprobe, testing potential edge conditions. If there is no fixes, we get something like this for one of the scenarios: caller: 0x758fff - 0x7595ab target_1: 0x758fd5 - 0x758fff target_2: 0x758fca - 0x758fd5 target_3: 0x758fbf - 0x758fca target_4: 0x758fb3 - 0x758fbf ENTRY #0: 0x758fb3 (in target_4) ENTRY #1: 0x758fd3 (in target_2) ENTRY #2: 0x758ffd (in target_1) ENTRY #3: 0x7fffffffe000 ENTRY #4: 0x7fffffffe000 ENTRY #5: 0x6f8f39 ENTRY #6: 0x6fa6f0 ENTRY #7: 0x7f403f229590 Entry #3 and #4 (0x7fffffffe000) are uretprobe trampoline addresses which obscure actual target_1 and another target_1 invocations. Also note that between entry #0 and entry #1 we are missing an entry for target_3. With fixes, we get desired full stack traces: caller: 0x758fff - 0x7595ab target_1: 0x758fd5 - 0x758fff target_2: 0x758fca - 0x758fd5 target_3: 0x758fbf - 0x758fca target_4: 0x758fb3 - 0x758fbf ENTRY #0: 0x758fb7 (in target_4) ENTRY #1: 0x758fc8 (in target_3) ENTRY #2: 0x758fd3 (in target_2) ENTRY #3: 0x758ffd (in target_1) ENTRY #4: 0x758ff3 (in target_1) ENTRY #5: 0x75922c (in caller) ENTRY #6: 0x6f8f39 ENTRY #7: 0x6fa6f0 ENTRY #8: 0x7f986adc4cd0 Now there is a logical and complete sequence of function calls. Link: https://lore.kernel.org/all/20240522013845.1631305-5-andrii@kernel.org/ Signed-off-by: Andrii Nakryiko Acked-by: Jiri Olsa Signed-off-by: Masami Hiramatsu (Google) --- .../bpf/prog_tests/uretprobe_stack.c | 186 ++++++++++++++++++ .../selftests/bpf/progs/uretprobe_stack.c | 96 +++++++++ 2 files changed, 282 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c create mode 100644 tools/testing/selftests/bpf/progs/uretprobe_stack.c diff --git a/tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c b/tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c new file mode 100644 index 000000000000..6deb8d560ddd --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/uretprobe_stack.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include +#include "uretprobe_stack.skel.h" +#include "../sdt.h" + +/* We set up target_1() -> target_2() -> target_3() -> target_4() -> USDT() + * call chain, each being traced by our BPF program. On entry or return from + * each target_*() we are capturing user stack trace and recording it in + * global variable, so that user space part of the test can validate it. + * + * Note, we put each target function into a custom section to get those + * __start_XXX/__stop_XXX symbols, generated by linker for us, which allow us + * to know address range of those functions + */ +__attribute__((section("uprobe__target_4"))) +__weak int target_4(void) +{ + STAP_PROBE1(uretprobe_stack, target, 42); + return 42; +} + +extern const void *__start_uprobe__target_4; +extern const void *__stop_uprobe__target_4; + +__attribute__((section("uprobe__target_3"))) +__weak int target_3(void) +{ + return target_4(); +} + +extern const void *__start_uprobe__target_3; +extern const void *__stop_uprobe__target_3; + +__attribute__((section("uprobe__target_2"))) +__weak int target_2(void) +{ + return target_3(); +} + +extern const void *__start_uprobe__target_2; +extern const void *__stop_uprobe__target_2; + +__attribute__((section("uprobe__target_1"))) +__weak int target_1(int depth) +{ + if (depth < 1) + return 1 + target_1(depth + 1); + else + return target_2(); +} + +extern const void *__start_uprobe__target_1; +extern const void *__stop_uprobe__target_1; + +extern const void *__start_uretprobe_stack_sec; +extern const void *__stop_uretprobe_stack_sec; + +struct range { + long start; + long stop; +}; + +static struct range targets[] = { + {}, /* we want target_1 to map to target[1], so need 1-based indexing */ + { (long)&__start_uprobe__target_1, (long)&__stop_uprobe__target_1 }, + { (long)&__start_uprobe__target_2, (long)&__stop_uprobe__target_2 }, + { (long)&__start_uprobe__target_3, (long)&__stop_uprobe__target_3 }, + { (long)&__start_uprobe__target_4, (long)&__stop_uprobe__target_4 }, +}; + +static struct range caller = { + (long)&__start_uretprobe_stack_sec, + (long)&__stop_uretprobe_stack_sec, +}; + +static void validate_stack(__u64 *ips, int stack_len, int cnt, ...) +{ + int i, j; + va_list args; + + if (!ASSERT_GT(stack_len, 0, "stack_len")) + return; + + stack_len /= 8; + + /* check if we have enough entries to satisfy test expectations */ + if (!ASSERT_GE(stack_len, cnt, "stack_len2")) + return; + + if (env.verbosity >= VERBOSE_NORMAL) { + printf("caller: %#lx - %#lx\n", caller.start, caller.stop); + for (i = 1; i < ARRAY_SIZE(targets); i++) + printf("target_%d: %#lx - %#lx\n", i, targets[i].start, targets[i].stop); + for (i = 0; i < stack_len; i++) { + for (j = 1; j < ARRAY_SIZE(targets); j++) { + if (ips[i] >= targets[j].start && ips[i] < targets[j].stop) + break; + } + if (j < ARRAY_SIZE(targets)) { /* found target match */ + printf("ENTRY #%d: %#lx (in target_%d)\n", i, (long)ips[i], j); + } else if (ips[i] >= caller.start && ips[i] < caller.stop) { + printf("ENTRY #%d: %#lx (in caller)\n", i, (long)ips[i]); + } else { + printf("ENTRY #%d: %#lx\n", i, (long)ips[i]); + } + } + } + + va_start(args, cnt); + + for (i = cnt - 1; i >= 0; i--) { + /* most recent entry is the deepest target function */ + const struct range *t = va_arg(args, const struct range *); + + ASSERT_GE(ips[i], t->start, "addr_start"); + ASSERT_LT(ips[i], t->stop, "addr_stop"); + } + + va_end(args); +} + +/* __weak prevents inlining */ +__attribute__((section("uretprobe_stack_sec"))) +__weak void test_uretprobe_stack(void) +{ + LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts); + struct uretprobe_stack *skel; + int err; + + skel = uretprobe_stack__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + err = uretprobe_stack__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + /* trigger */ + ASSERT_EQ(target_1(0), 42 + 1, "trigger_return"); + + /* + * Stacks captured on ENTRY uprobes + */ + + /* (uprobe 1) target_1 in stack trace*/ + validate_stack(skel->bss->entry_stack1, skel->bss->entry1_len, + 2, &caller, &targets[1]); + /* (uprobe 1, recursed) */ + validate_stack(skel->bss->entry_stack1_recur, skel->bss->entry1_recur_len, + 3, &caller, &targets[1], &targets[1]); + /* (uprobe 2) caller -> target_1 -> target_1 -> target_2 */ + validate_stack(skel->bss->entry_stack2, skel->bss->entry2_len, + 4, &caller, &targets[1], &targets[1], &targets[2]); + /* (uprobe 3) */ + validate_stack(skel->bss->entry_stack3, skel->bss->entry3_len, + 5, &caller, &targets[1], &targets[1], &targets[2], &targets[3]); + /* (uprobe 4) caller -> target_1 -> target_1 -> target_2 -> target_3 -> target_4 */ + validate_stack(skel->bss->entry_stack4, skel->bss->entry4_len, + 6, &caller, &targets[1], &targets[1], &targets[2], &targets[3], &targets[4]); + + /* (USDT): full caller -> target_1 -> target_1 -> target_2 (uretprobed) + * -> target_3 -> target_4 (uretprobes) chain + */ + validate_stack(skel->bss->usdt_stack, skel->bss->usdt_len, + 6, &caller, &targets[1], &targets[1], &targets[2], &targets[3], &targets[4]); + + /* + * Now stacks captured on the way out in EXIT uprobes + */ + + /* (uretprobe 4) everything up to target_4, but excluding it */ + validate_stack(skel->bss->exit_stack4, skel->bss->exit4_len, + 5, &caller, &targets[1], &targets[1], &targets[2], &targets[3]); + /* we didn't install uretprobes on target_2 and target_3 */ + /* (uretprobe 1, recur) first target_1 call only */ + validate_stack(skel->bss->exit_stack1_recur, skel->bss->exit1_recur_len, + 2, &caller, &targets[1]); + /* (uretprobe 1) just a caller in the stack trace */ + validate_stack(skel->bss->exit_stack1, skel->bss->exit1_len, + 1, &caller); + +cleanup: + uretprobe_stack__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/uretprobe_stack.c b/tools/testing/selftests/bpf/progs/uretprobe_stack.c new file mode 100644 index 000000000000..9fdcf396b8f4 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/uretprobe_stack.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +__u64 entry_stack1[32], exit_stack1[32]; +__u64 entry_stack1_recur[32], exit_stack1_recur[32]; +__u64 entry_stack2[32]; +__u64 entry_stack3[32]; +__u64 entry_stack4[32], exit_stack4[32]; +__u64 usdt_stack[32]; + +int entry1_len, exit1_len; +int entry1_recur_len, exit1_recur_len; +int entry2_len, exit2_len; +int entry3_len, exit3_len; +int entry4_len, exit4_len; +int usdt_len; + +#define SZ sizeof(usdt_stack) + +SEC("uprobe//proc/self/exe:target_1") +int BPF_UPROBE(uprobe_1) +{ + /* target_1 is recursive wit depth of 2, so we capture two separate + * stack traces, depending on which occurence it is + */ + static bool recur = false; + + if (!recur) + entry1_len = bpf_get_stack(ctx, &entry_stack1, SZ, BPF_F_USER_STACK); + else + entry1_recur_len = bpf_get_stack(ctx, &entry_stack1_recur, SZ, BPF_F_USER_STACK); + + recur = true; + return 0; +} + +SEC("uretprobe//proc/self/exe:target_1") +int BPF_URETPROBE(uretprobe_1) +{ + /* see above, target_1 is recursive */ + static bool recur = false; + + /* NOTE: order of returns is reversed to order of entries */ + if (!recur) + exit1_recur_len = bpf_get_stack(ctx, &exit_stack1_recur, SZ, BPF_F_USER_STACK); + else + exit1_len = bpf_get_stack(ctx, &exit_stack1, SZ, BPF_F_USER_STACK); + + recur = true; + return 0; +} + +SEC("uprobe//proc/self/exe:target_2") +int BPF_UPROBE(uprobe_2) +{ + entry2_len = bpf_get_stack(ctx, &entry_stack2, SZ, BPF_F_USER_STACK); + return 0; +} + +/* no uretprobe for target_2 */ + +SEC("uprobe//proc/self/exe:target_3") +int BPF_UPROBE(uprobe_3) +{ + entry3_len = bpf_get_stack(ctx, &entry_stack3, SZ, BPF_F_USER_STACK); + return 0; +} + +/* no uretprobe for target_3 */ + +SEC("uprobe//proc/self/exe:target_4") +int BPF_UPROBE(uprobe_4) +{ + entry4_len = bpf_get_stack(ctx, &entry_stack4, SZ, BPF_F_USER_STACK); + return 0; +} + +SEC("uretprobe//proc/self/exe:target_4") +int BPF_URETPROBE(uretprobe_4) +{ + exit4_len = bpf_get_stack(ctx, &exit_stack4, SZ, BPF_F_USER_STACK); + return 0; +} + +SEC("usdt//proc/self/exe:uretprobe_stack:target") +int BPF_USDT(usdt_probe) +{ + usdt_len = bpf_get_stack(ctx, &usdt_stack, SZ, BPF_F_USER_STACK); + return 0; +} From 9d8616034f161222a4ac166c1b42b6d79961c005 Mon Sep 17 00:00:00 2001 From: "Masami Hiramatsu (Google)" Date: Fri, 5 Jul 2024 16:11:25 +0900 Subject: [PATCH 15/19] tracing/kprobes: Add symbol counting check when module loads Currently, kprobe event checks whether the target symbol name is unique or not, so that it does not put a probe on an unexpected place. But this skips the check if the target is on a module because the module may not be loaded. To fix this issue, this patch checks the number of probe target symbols in a target module when the module is loaded. If the probe is not on the unique name symbols in the module, it will be rejected at that point. Note that the symbol which has a unique name in the target module, it will be accepted even if there are same-name symbols in the kernel or other modules, Link: https://lore.kernel.org/all/172016348553.99543.2834679315611882137.stgit@devnote2/ Signed-off-by: Masami Hiramatsu (Google) Reviewed-by: Steven Rostedt (Google) --- kernel/trace/trace_kprobe.c | 125 +++++++++++++++++++++++------------- 1 file changed, 81 insertions(+), 44 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 7fd0f8576e4c..4cee3442bcce 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -678,6 +678,21 @@ end: } #ifdef CONFIG_MODULES +static int validate_module_probe_symbol(const char *modname, const char *symbol); + +static int register_module_trace_kprobe(struct module *mod, struct trace_kprobe *tk) +{ + const char *p; + int ret = 0; + + p = strchr(trace_kprobe_symbol(tk), ':'); + if (p) + ret = validate_module_probe_symbol(module_name(mod), p + 1); + if (!ret) + ret = __register_trace_kprobe(tk); + return ret; +} + /* Module notifier call back, checking event on the module */ static int trace_kprobe_module_callback(struct notifier_block *nb, unsigned long val, void *data) @@ -696,7 +711,7 @@ static int trace_kprobe_module_callback(struct notifier_block *nb, if (trace_kprobe_within_module(tk, mod)) { /* Don't need to check busy - this should have gone. */ __unregister_trace_kprobe(tk); - ret = __register_trace_kprobe(tk); + ret = register_module_trace_kprobe(mod, tk); if (ret) pr_warn("Failed to re-register probe %s on %s: %d\n", trace_probe_name(&tk->tp), @@ -747,17 +762,68 @@ static int count_mod_symbols(void *data, const char *name, unsigned long unused) return 0; } -static unsigned int number_of_same_symbols(char *func_name) +static unsigned int number_of_same_symbols(const char *mod, const char *func_name) { struct sym_count_ctx ctx = { .count = 0, .name = func_name }; - kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count); + if (!mod) + kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count); - module_kallsyms_on_each_symbol(NULL, count_mod_symbols, &ctx); + module_kallsyms_on_each_symbol(mod, count_mod_symbols, &ctx); return ctx.count; } +static int validate_module_probe_symbol(const char *modname, const char *symbol) +{ + unsigned int count = number_of_same_symbols(modname, symbol); + + if (count > 1) { + /* + * Users should use ADDR to remove the ambiguity of + * using KSYM only. + */ + return -EADDRNOTAVAIL; + } else if (count == 0) { + /* + * We can return ENOENT earlier than when register the + * kprobe. + */ + return -ENOENT; + } + return 0; +} + +static int validate_probe_symbol(char *symbol) +{ + struct module *mod = NULL; + char *modname = NULL, *p; + int ret = 0; + + p = strchr(symbol, ':'); + if (p) { + modname = symbol; + symbol = p + 1; + *p = '\0'; + /* Return 0 (defer) if the module does not exist yet. */ + rcu_read_lock_sched(); + mod = find_module(modname); + if (mod && !try_module_get(mod)) + mod = NULL; + rcu_read_unlock_sched(); + if (!mod) + goto out; + } + + ret = validate_module_probe_symbol(modname, symbol); +out: + if (p) + *p = ':'; + if (mod) + module_put(mod); + return ret; +} + static int trace_kprobe_entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs); @@ -881,6 +947,14 @@ static int __trace_kprobe_create(int argc, const char *argv[]) trace_probe_log_err(0, BAD_PROBE_ADDR); goto parse_error; } + ret = validate_probe_symbol(symbol); + if (ret) { + if (ret == -EADDRNOTAVAIL) + trace_probe_log_err(0, NON_UNIQ_SYMBOL); + else + trace_probe_log_err(0, BAD_PROBE_ADDR); + goto parse_error; + } if (is_return) ctx.flags |= TPARG_FL_RETURN; ret = kprobe_on_func_entry(NULL, symbol, offset); @@ -893,31 +967,6 @@ static int __trace_kprobe_create(int argc, const char *argv[]) } } - if (symbol && !strchr(symbol, ':')) { - unsigned int count; - - count = number_of_same_symbols(symbol); - if (count > 1) { - /* - * Users should use ADDR to remove the ambiguity of - * using KSYM only. - */ - trace_probe_log_err(0, NON_UNIQ_SYMBOL); - ret = -EADDRNOTAVAIL; - - goto error; - } else if (count == 0) { - /* - * We can return ENOENT earlier than when register the - * kprobe. - */ - trace_probe_log_err(0, BAD_PROBE_ADDR); - ret = -ENOENT; - - goto error; - } - } - trace_probe_log_set_index(0); if (event) { ret = traceprobe_parse_event_name(&event, &group, gbuf, @@ -1835,21 +1884,9 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs, char *event; if (func) { - unsigned int count; - - count = number_of_same_symbols(func); - if (count > 1) - /* - * Users should use addr to remove the ambiguity of - * using func only. - */ - return ERR_PTR(-EADDRNOTAVAIL); - else if (count == 0) - /* - * We can return ENOENT earlier than when register the - * kprobe. - */ - return ERR_PTR(-ENOENT); + ret = validate_probe_symbol(func); + if (ret) + return ERR_PTR(ret); } /* From b10545b6b86b7a0b3e26b4c2a5c99b72d49bc4de Mon Sep 17 00:00:00 2001 From: "Masami Hiramatsu (Google)" Date: Wed, 10 Jul 2024 08:36:31 +0900 Subject: [PATCH 16/19] tracing/kprobes: Fix build error when find_module() is not available The kernel test robot reported that the find_module() is not available if CONFIG_MODULES=n. Fix this error by hiding find_modules() in #ifdef CONFIG_MODULES with related rcu locks as try_module_get_by_name(). Link: https://lore.kernel.org/all/172056819167.201571.250053007194508038.stgit@devnote2/ Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202407070744.RcLkn8sq-lkp@intel.com/ Closes: https://lore.kernel.org/oe-kbuild-all/202407070917.VVUCBlaS-lkp@intel.com/ Signed-off-by: Masami Hiramatsu (Google) --- kernel/trace/trace_kprobe.c | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 4cee3442bcce..61a6da808203 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -794,6 +794,24 @@ static int validate_module_probe_symbol(const char *modname, const char *symbol) return 0; } +#ifdef CONFIG_MODULES +/* Return NULL if the module is not loaded or under unloading. */ +static struct module *try_module_get_by_name(const char *name) +{ + struct module *mod; + + rcu_read_lock_sched(); + mod = find_module(name); + if (mod && !try_module_get(mod)) + mod = NULL; + rcu_read_unlock_sched(); + + return mod; +} +#else +#define try_module_get_by_name(name) (NULL) +#endif + static int validate_probe_symbol(char *symbol) { struct module *mod = NULL; @@ -805,12 +823,7 @@ static int validate_probe_symbol(char *symbol) modname = symbol; symbol = p + 1; *p = '\0'; - /* Return 0 (defer) if the module does not exist yet. */ - rcu_read_lock_sched(); - mod = find_module(modname); - if (mod && !try_module_get(mod)) - mod = NULL; - rcu_read_unlock_sched(); + mod = try_module_get_by_name(modname); if (!mod) goto out; } From 63ded110979bdd8741542ec66fb9e2d2074aed8c Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 12 Jul 2024 15:52:27 +0200 Subject: [PATCH 17/19] uprobe: Change uretprobe syscall scope and number After discussing with Arnd [1] it's preferable to change uretprobe syscall number to 467 to omit the merge conflict with xattrat syscalls. Also changing the ABI to 'common' which will ease up the global scripts/syscall.tbl management. One consequence is we generate uretprobe syscall numbers for ABIs that do not support uretprobe syscall, but the syscall still returns -ENOSYS when called in that ABI. [1] https://lore.kernel.org/lkml/784a34e5-4654-44c9-9c07-f9f4ffd952a0@app.fastmail.com/ Link: https://lore.kernel.org/all/20240712135228.1619332-2-jolsa@kernel.org/ Fixes: 190fec72df4a ("uprobe: Wire up uretprobe system call") Suggested-by: Arnd Bergmann Signed-off-by: Jiri Olsa Acked-by: Andrii Nakryiko Signed-off-by: Masami Hiramatsu (Google) --- arch/x86/entry/syscalls/syscall_64.tbl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index 6452c2ec469a..dabf1982de6d 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -384,7 +384,7 @@ 460 common lsm_set_self_attr sys_lsm_set_self_attr 461 common lsm_list_modules sys_lsm_list_modules 462 common mseal sys_mseal -463 64 uretprobe sys_uretprobe +467 common uretprobe sys_uretprobe # # Due to a historical design error, certain syscalls are numbered differently From 3e301b431b91e4b973dbc520e90e220acb5b91f5 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 12 Jul 2024 15:52:28 +0200 Subject: [PATCH 18/19] selftests/bpf: Change uretprobe syscall number in uprobe_syscall test Fixing the syscall number value. Link: https://lore.kernel.org/all/20240712135228.1619332-3-jolsa@kernel.org/ Fixes: 9e7f74e64ae5 ("selftests/bpf: Add uretprobe syscall call from user space test") Signed-off-by: Jiri Olsa Acked-by: Andrii Nakryiko Signed-off-by: Masami Hiramatsu (Google) --- tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c index c8517c8f5313..bd8c75b620c2 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_syscall.c @@ -216,7 +216,7 @@ static void test_uretprobe_regs_change(void) } #ifndef __NR_uretprobe -#define __NR_uretprobe 463 +#define __NR_uretprobe 467 #endif __naked unsigned long uretprobe_syscall_call_1(void) From c26b1b89b8a9fd8665e79cd798bd970e233772b6 Mon Sep 17 00:00:00 2001 From: "Masami Hiramatsu (Google)" Date: Fri, 12 Jul 2024 09:26:17 +0900 Subject: [PATCH 19/19] MAINTAINERS: Add uprobes entry Add uprobes entry to MAINTAINERS to clarify the maintainers. Link: https://lore.kernel.org/all/172074397710.247544.17045299807723238107.stgit@devnote2/ Suggested-by: Peter Zijlstra Signed-off-by: Masami Hiramatsu (Google) Acked-by: Peter Zijlstra (Intel) Acked-by: Oleg Nesterov --- MAINTAINERS | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index aacccb376c28..120158f0716e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -23109,6 +23109,19 @@ F: drivers/mtd/ubi/ F: include/linux/mtd/ubi.h F: include/uapi/mtd/ubi-user.h +UPROBES +M: Masami Hiramatsu +M: Oleg Nesterov +M: Peter Zijlstra +L: linux-kernel@vger.kernel.org +L: linux-trace-kernel@vger.kernel.org +S: Maintained +F: arch/*/include/asm/uprobes.h +F: arch/*/kernel/probes/uprobes.c +F: arch/*/kernel/uprobes.c +F: include/linux/uprobes.h +F: kernel/events/uprobes.c + USB "USBNET" DRIVER FRAMEWORK M: Oliver Neukum L: netdev@vger.kernel.org