1

probes updates for 6.3:

- Skip negative return code check for snprintf in eprobe.
 
 - Add recursive call test cases for kprobe unit test
 
 - Add 'char' type to probe events to show it as the character instead of value.
 
 - Update kselftest kprobe-event testcase to ignore '__pfx_' symbols.
 
 - Fix kselftest to check filter on eprobe event correctly.
 
 - Add filter on eprobe to the README file in tracefs.
 
 - Fix optprobes to check whether there is 'under unoptimizing' optprobe when optimizing another kprobe correctly.
 
 - Fix optprobe to check whether there is 'under unoptimizing' optprobe when fetching the original instruction correctly.
 
 - Fix optprobe to free 'forcibly unoptimized' optprobe correctly.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEh7BulGwFlgAOi5DV2/sHvwUrPxsFAmP0JdYACgkQ2/sHvwUr
 Pxt6sQf/TD9Kwqx3XG1tnLPev6yt2nuggUippHwWUFHlJtMyUaLV8aKFqByyEe+j
 tCQvrFIIJq242xg0Jac/MAf2exlWG9jsmVZPmvC1YzepOAbjXu2eBkIS7LsbeHjF
 JJypNnEceffWCpNoD6nlvR0xWXenqRbZJwdsGqo3u+fXnzTurEMY2GU2xOyv39tv
 S1uNLPANJxdMb/2iUsUE3hMbe82dqr8zPcApqWFtTBB6QPHI3B2SjuQHpQxwbTPl
 bzAl0yQkLSQXprVzT7xJ4xLnzbl1ljgJBci5aX8BFF+VD9oYkypdfYVczBH5VsP9
 E3eT9T9lRf4Q99EqxNy5uw7NqQXGQg==
 =CMPb
 -----END PGP SIGNATURE-----

Merge tag 'probes-v6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace

Pull kprobes updates from Masami Hiramatsu:

 - Skip negative return code check for snprintf in eprobe

 - Add recursive call test cases for kprobe unit test

 - Add 'char' type to probe events to show it as the character instead
   of value

 - Update kselftest kprobe-event testcase to ignore '__pfx_' symbols

 - Fix kselftest to check filter on eprobe event correctly

 - Add filter on eprobe to the README file in tracefs

 - Fix optprobes to check whether there is 'under unoptimizing' optprobe
   when optimizing another kprobe correctly

 - Fix optprobe to check whether there is 'under unoptimizing' optprobe
   when fetching the original instruction correctly

 - Fix optprobe to free 'forcibly unoptimized' optprobe correctly

* tag 'probes-v6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
  tracing/eprobe: no need to check for negative ret value for snprintf
  test_kprobes: Add recursed kprobe test case
  tracing/probe: add a char type to show the character value of traced arguments
  selftests/ftrace: Fix probepoint testcase to ignore __pfx_* symbols
  selftests/ftrace: Fix eprobe syntax test case to check filter support
  tracing/eprobe: Fix to add filter on eprobe description in README file
  x86/kprobes: Fix arch_check_optimized_kprobe check within optimized_kprobe range
  x86/kprobes: Fix __recover_optprobed_insn check optimizing logic
  kprobes: Fix to handle forcibly unoptimized kprobes on freeing_list
This commit is contained in:
Linus Torvalds 2023-02-23 13:03:08 -08:00
commit 2b79eb73e2
12 changed files with 117 additions and 33 deletions

View File

@ -58,7 +58,7 @@ Synopsis of kprobe_events
NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
(u8/u16/u32/u64/s8/s16/s32/s64), hexadecimal types
(x8/x16/x32/x64), "string", "ustring", "symbol", "symstr"
(x8/x16/x32/x64), "char", "string", "ustring", "symbol", "symstr"
and bitfield are supported.
(\*1) only for the probe on function entry (offs == 0).
@ -82,6 +82,8 @@ Note that the array can be applied to memory type fetchargs, you can not
apply it to registers/stack-entries etc. (for example, '$stack1:x8[8]' is
wrong, but '+8($stack):x8[8]' is OK.)
Char type can be used to show the character value of traced arguments.
String type is a special type, which fetches a "null-terminated" string from
kernel space. This means it will fail and store NULL if the string container
has been paged out. "ustring" type is an alternative of string for user-space.

View File

@ -46,8 +46,8 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
/* This function only handles jump-optimized kprobe */
if (kp && kprobe_optimized(kp)) {
op = container_of(kp, struct optimized_kprobe, kp);
/* If op->list is not empty, op is under optimizing */
if (list_empty(&op->list))
/* If op is optimized or under unoptimizing */
if (list_empty(&op->list) || optprobe_queued_unopt(op))
goto found;
}
}
@ -353,7 +353,7 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op)
for (i = 1; i < op->optinsn.size; i++) {
p = get_kprobe(op->kp.addr + i);
if (p && !kprobe_disabled(p))
if (p && !kprobe_disarmed(p))
return -EEXIST;
}

View File

@ -378,6 +378,8 @@ extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
DEFINE_INSN_CACHE_OPS(optinsn);
extern void wait_for_kprobe_optimizer(void);
bool optprobe_queued_unopt(struct optimized_kprobe *op);
bool kprobe_disarmed(struct kprobe *p);
#else /* !CONFIG_OPTPROBES */
static inline void wait_for_kprobe_optimizer(void) { }
#endif /* CONFIG_OPTPROBES */

View File

@ -458,7 +458,7 @@ static inline int kprobe_optready(struct kprobe *p)
}
/* Return true if the kprobe is disarmed. Note: p must be on hash list */
static inline bool kprobe_disarmed(struct kprobe *p)
bool kprobe_disarmed(struct kprobe *p)
{
struct optimized_kprobe *op;
@ -555,17 +555,15 @@ static void do_unoptimize_kprobes(void)
/* See comment in do_optimize_kprobes() */
lockdep_assert_cpus_held();
/* Unoptimization must be done anytime */
if (list_empty(&unoptimizing_list))
return;
if (!list_empty(&unoptimizing_list))
arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
/* Loop on 'freeing_list' for disarming */
/* Loop on 'freeing_list' for disarming and removing from kprobe hash list */
list_for_each_entry_safe(op, tmp, &freeing_list, list) {
/* Switching from detour code to origin */
op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
/* Disarm probes if marked disabled */
if (kprobe_disabled(&op->kp))
/* Disarm probes if marked disabled and not gone */
if (kprobe_disabled(&op->kp) && !kprobe_gone(&op->kp))
arch_disarm_kprobe(&op->kp);
if (kprobe_unused(&op->kp)) {
/*
@ -662,7 +660,7 @@ void wait_for_kprobe_optimizer(void)
mutex_unlock(&kprobe_mutex);
}
static bool optprobe_queued_unopt(struct optimized_kprobe *op)
bool optprobe_queued_unopt(struct optimized_kprobe *op)
{
struct optimized_kprobe *_op;
@ -797,14 +795,13 @@ static void kill_optimized_kprobe(struct kprobe *p)
op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
if (kprobe_unused(p)) {
/* Enqueue if it is unused */
list_add(&op->list, &freeing_list);
/*
* Remove unused probes from the hash list. After waiting
* for synchronization, this probe is reclaimed.
* (reclaiming is done by do_free_cleaned_kprobes().)
* Unused kprobe is on unoptimizing or freeing list. We move it
* to freeing_list and let the kprobe_optimizer() remove it from
* the kprobe hash list and free it.
*/
hlist_del_rcu(&op->kp.hlist);
if (optprobe_queued_unopt(op))
list_move(&op->list, &freeing_list);
}
/* Don't touch the code, because it is already freed. */

View File

@ -5646,7 +5646,7 @@ static const char readme_msg[] =
#ifdef CONFIG_HIST_TRIGGERS
"\t s:[synthetic/]<event> <field> [<field>]\n"
#endif
"\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]\n"
"\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
"\t -:[<group>/][<event>]\n"
#ifdef CONFIG_KPROBE_EVENTS
"\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
@ -5663,7 +5663,7 @@ static const char readme_msg[] =
"\t $stack<index>, $stack, $retval, $comm,\n"
#endif
"\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
"\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
"\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
"\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
"\t symstr, <type>\\[<array-size>\\]\n"
#ifdef CONFIG_HIST_TRIGGERS

View File

@ -923,17 +923,13 @@ static int trace_eprobe_parse_filter(struct trace_eprobe *ep, int argc, const ch
p = ep->filter_str;
for (i = 0; i < argc; i++) {
ret = snprintf(p, len, "%s ", argv[i]);
if (ret < 0)
goto error;
if (ret > len) {
ret = -E2BIG;
goto error;
}
if (i)
ret = snprintf(p, len, " %s", argv[i]);
else
ret = snprintf(p, len, "%s", argv[i]);
p += ret;
len -= ret;
}
p[-1] = '\0';
/*
* Ensure the filter string can be parsed correctly. Note, this

View File

@ -50,6 +50,7 @@ DEFINE_BASIC_PRINT_TYPE_FUNC(x8, u8, "0x%x")
DEFINE_BASIC_PRINT_TYPE_FUNC(x16, u16, "0x%x")
DEFINE_BASIC_PRINT_TYPE_FUNC(x32, u32, "0x%x")
DEFINE_BASIC_PRINT_TYPE_FUNC(x64, u64, "0x%Lx")
DEFINE_BASIC_PRINT_TYPE_FUNC(char, u8, "'%c'")
int PRINT_TYPE_FUNC_NAME(symbol)(struct trace_seq *s, void *data, void *ent)
{
@ -95,6 +96,7 @@ static const struct fetch_type probe_fetch_types[] = {
ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
ASSIGN_FETCH_TYPE_ALIAS(char, u8, u8, 0),
ASSIGN_FETCH_TYPE_ALIAS(symbol, ADDR_FETCH_TYPE, ADDR_FETCH_TYPE, 0),
ASSIGN_FETCH_TYPE_END

View File

@ -166,6 +166,7 @@ DECLARE_BASIC_PRINT_TYPE_FUNC(x16);
DECLARE_BASIC_PRINT_TYPE_FUNC(x32);
DECLARE_BASIC_PRINT_TYPE_FUNC(x64);
DECLARE_BASIC_PRINT_TYPE_FUNC(char);
DECLARE_BASIC_PRINT_TYPE_FUNC(string);
DECLARE_BASIC_PRINT_TYPE_FUNC(symbol);

View File

@ -14,6 +14,7 @@
static u32 rand1, preh_val, posth_val;
static u32 (*target)(u32 value);
static u32 (*recursed_target)(u32 value);
static u32 (*target2)(u32 value);
static struct kunit *current_test;
@ -27,18 +28,27 @@ static noinline u32 kprobe_target(u32 value)
return (value / div_factor);
}
static noinline u32 kprobe_recursed_target(u32 value)
{
return (value / div_factor);
}
static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
KUNIT_EXPECT_FALSE(current_test, preemptible());
preh_val = (rand1 / div_factor);
preh_val = recursed_target(rand1);
return 0;
}
static void kp_post_handler(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
u32 expval = recursed_target(rand1);
KUNIT_EXPECT_FALSE(current_test, preemptible());
KUNIT_EXPECT_EQ(current_test, preh_val, (rand1 / div_factor));
KUNIT_EXPECT_EQ(current_test, preh_val, expval);
posth_val = preh_val + div_factor;
}
@ -136,6 +146,29 @@ static void test_kprobes(struct kunit *test)
unregister_kprobes(kps, 2);
}
static struct kprobe kp_missed = {
.symbol_name = "kprobe_recursed_target",
.pre_handler = kp_pre_handler,
.post_handler = kp_post_handler,
};
static void test_kprobe_missed(struct kunit *test)
{
current_test = test;
preh_val = 0;
posth_val = 0;
KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp_missed));
recursed_target(rand1);
KUNIT_EXPECT_EQ(test, 2, kp_missed.nmissed);
KUNIT_EXPECT_NE(test, 0, preh_val);
KUNIT_EXPECT_NE(test, 0, posth_val);
unregister_kprobe(&kp_missed);
}
#ifdef CONFIG_KRETPROBES
static u32 krph_val;
@ -336,6 +369,7 @@ static int kprobes_test_init(struct kunit *test)
{
target = kprobe_target;
target2 = kprobe_target2;
recursed_target = kprobe_recursed_target;
stacktrace_target = kprobe_stacktrace_target;
internal_target = kprobe_stacktrace_internal_target;
stacktrace_driver = kprobe_stacktrace_driver;
@ -346,6 +380,7 @@ static int kprobes_test_init(struct kunit *test)
static struct kunit_case kprobes_testcases[] = {
KUNIT_CASE(test_kprobe),
KUNIT_CASE(test_kprobes),
KUNIT_CASE(test_kprobe_missed),
#ifdef CONFIG_KRETPROBES
KUNIT_CASE(test_kretprobe),
KUNIT_CASE(test_kretprobes),

View File

@ -22,6 +22,8 @@ check_error 'e:foo/^bar.1 syscalls/sys_enter_openat' # BAD_EVENT_NAME
check_error 'e:foo/bar syscalls/sys_enter_openat arg=^dfd' # BAD_FETCH_ARG
check_error 'e:foo/bar syscalls/sys_enter_openat ^arg=$foo' # BAD_ATTACH_ARG
check_error 'e:foo/bar syscalls/sys_enter_openat if ^' # NO_EP_FILTER
if grep -q '<attached-group>\.<attached-event>.*\[if <filter>\]' README; then
check_error 'e:foo/bar syscalls/sys_enter_openat if ^' # NO_EP_FILTER
fi
exit 0

View File

@ -0,0 +1,47 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: Kprobe event char type argument
# requires: kprobe_events
case `uname -m` in
x86_64)
ARG1=%di
;;
i[3456]86)
ARG1=%ax
;;
aarch64)
ARG1=%x0
;;
arm*)
ARG1=%r0
;;
ppc64*)
ARG1=%r3
;;
ppc*)
ARG1=%r3
;;
s390*)
ARG1=%r2
;;
mips*)
ARG1=%r4
;;
*)
echo "Please implement other architecture here"
exit_untested
esac
: "Test get argument (1)"
echo "p:testprobe tracefs_create_dir arg1=+0(${ARG1}):char" > kprobe_events
echo 1 > events/kprobes/testprobe/enable
echo "p:test $FUNCTION_FORK" >> kprobe_events
grep -qe "testprobe.* arg1='t'" trace
echo 0 > events/kprobes/testprobe/enable
: "Test get argument (2)"
echo "p:testprobe tracefs_create_dir arg1=+0(${ARG1}):char arg2=+0(${ARG1}):char[4]" > kprobe_events
echo 1 > events/kprobes/testprobe/enable
echo "p:test $FUNCTION_FORK" >> kprobe_events
grep -qe "testprobe.* arg1='t' arg2={'t','e','s','t'}" trace

View File

@ -21,7 +21,7 @@ set_offs() { # prev target next
# We have to decode symbol addresses to get correct offsets.
# If the offset is not an instruction boundary, it cause -EILSEQ.
set_offs `grep -A1 -B1 ${TARGET_FUNC} /proc/kallsyms | cut -f 1 -d " " | xargs`
set_offs `grep -v __pfx_ /proc/kallsyms | grep -A1 -B1 ${TARGET_FUNC} | cut -f 1 -d " " | xargs`
UINT_TEST=no
# printf "%x" -1 returns (unsigned long)-1.