2018-11-15 10:32:38 -07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _LINUX_KERNEL_FTRACE_INTERNAL_H
|
|
|
|
#define _LINUX_KERNEL_FTRACE_INTERNAL_H
|
|
|
|
|
2023-05-17 05:51:48 -07:00
|
|
|
int __register_ftrace_function(struct ftrace_ops *ops);
|
|
|
|
int __unregister_ftrace_function(struct ftrace_ops *ops);
|
|
|
|
|
2018-11-15 10:32:38 -07:00
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
|
|
|
|
|
|
extern struct mutex ftrace_lock;
|
|
|
|
extern struct ftrace_ops global_ops;
|
|
|
|
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
|
|
|
|
int ftrace_startup(struct ftrace_ops *ops, int command);
|
|
|
|
int ftrace_shutdown(struct ftrace_ops *ops, int command);
|
|
|
|
int ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs);
|
2024-06-05 13:26:48 -07:00
|
|
|
int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command);
|
ftrace: Add subops logic to allow one ops to manage many
There are cases where a single system will use a single function callback
to handle multiple users. For example, to allow function_graph tracer to
have multiple users where each can trace their own set of functions, it is
useful to only have one ftrace_ops registered to ftrace that will call a
function by the function_graph tracer to handle the multiplexing with the
different registered function_graph tracers.
Add a "subop_list" to the ftrace_ops that will hold a list of other
ftrace_ops that the top ftrace_ops will manage.
The function ftrace_startup_subops() that takes the manager ftrace_ops and
a subop ftrace_ops it will manage. If there are no subops with the
ftrace_ops yet, it will copy the ftrace_ops subop filters to the manager
ftrace_ops and register that with ftrace_startup(), and adds the subop to
its subop_list. If the manager ops already has something registered, it
will then merge the new subop filters with what it has and enable the new
functions that covers all the subops it has.
To remove a subop, ftrace_shutdown_subops() is called which will use the
subop_list of the manager ops to rebuild all the functions it needs to
trace, and update the ftrace records to only call the functions it now has
registered. If there are no more functions registered, it will then call
ftrace_shutdown() to disable itself completely.
Note, it is up to the manager ops callback to always make sure that the
subops callbacks are called if its filter matches, as there are times in
the update where the callback could be calling more functions than those
that are currently registered.
This could be updated to handle other systems other than function_graph,
for example, fprobes could use this (but will need an interface to call
ftrace_startup_subops()).
Link: https://lore.kernel.org/linux-trace-kernel/20240603190822.508431129@goodmis.org
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com>
Cc: Florent Revest <revest@chromium.org>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: bpf <bpf@vger.kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Alan Maguire <alan.maguire@oracle.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Guo Ren <guoren@kernel.org>
Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
2024-06-03 12:07:14 -07:00
|
|
|
int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command);
|
2018-11-15 10:32:38 -07:00
|
|
|
|
|
|
|
#else /* !CONFIG_DYNAMIC_FTRACE */
|
|
|
|
|
|
|
|
/* Keep as macros so we do not need to define the commands */
|
|
|
|
# define ftrace_startup(ops, command) \
|
|
|
|
({ \
|
|
|
|
int ___ret = __register_ftrace_function(ops); \
|
|
|
|
if (!___ret) \
|
|
|
|
(ops)->flags |= FTRACE_OPS_FL_ENABLED; \
|
|
|
|
___ret; \
|
|
|
|
})
|
|
|
|
# define ftrace_shutdown(ops, command) \
|
|
|
|
({ \
|
|
|
|
int ___ret = __unregister_ftrace_function(ops); \
|
|
|
|
if (!___ret) \
|
|
|
|
(ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
|
|
|
|
___ret; \
|
|
|
|
})
|
|
|
|
static inline int
|
|
|
|
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
2024-06-05 13:26:48 -07:00
|
|
|
static inline int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command)
|
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
static inline int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command)
|
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2018-11-15 10:32:38 -07:00
|
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
|
|
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
extern int ftrace_graph_active;
|
2024-06-07 06:48:33 -07:00
|
|
|
# ifdef CONFIG_DYNAMIC_FTRACE
|
2024-06-03 12:07:17 -07:00
|
|
|
extern void fgraph_update_pid_func(void);
|
2024-06-07 06:48:33 -07:00
|
|
|
# else
|
|
|
|
static inline void fgraph_update_pid_func(void) {}
|
|
|
|
# endif
|
2018-11-15 10:32:38 -07:00
|
|
|
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
|
|
|
|
# define ftrace_graph_active 0
|
2024-06-03 12:07:17 -07:00
|
|
|
static inline void fgraph_update_pid_func(void) {}
|
2018-11-15 10:32:38 -07:00
|
|
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
|
|
|
|
|
|
|
#else /* !CONFIG_FUNCTION_TRACER */
|
|
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|
|
|
|
|
|
|
|
#endif
|