diff --git a/tools/perf/util/bpf_ftrace.c b/tools/perf/util/bpf_ftrace.c index 7a4297d8fd2c..06d1c4018407 100644 --- a/tools/perf/util/bpf_ftrace.c +++ b/tools/perf/util/bpf_ftrace.c @@ -40,13 +40,17 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace) if (ftrace->target.cpu_list) { ncpus = perf_cpu_map__nr(ftrace->evlist->core.user_requested_cpus); bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); + skel->rodata->has_cpu = 1; } if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) { ntasks = perf_thread_map__nr(ftrace->evlist->core.threads); bpf_map__set_max_entries(skel->maps.task_filter, ntasks); + skel->rodata->has_task = 1; } + skel->rodata->use_nsec = ftrace->use_nsec; + set_max_rlimit(); err = func_latency_bpf__load(skel); @@ -59,7 +63,6 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace) u32 cpu; u8 val = 1; - skel->bss->has_cpu = 1; fd = bpf_map__fd(skel->maps.cpu_filter); for (i = 0; i < ncpus; i++) { @@ -72,7 +75,6 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace) u32 pid; u8 val = 1; - skel->bss->has_task = 1; fd = bpf_map__fd(skel->maps.task_filter); for (i = 0; i < ntasks; i++) { @@ -81,8 +83,6 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace) } } - skel->bss->use_nsec = ftrace->use_nsec; - skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin, false, func->name); if (IS_ERR(skel->links.func_begin)) { diff --git a/tools/perf/util/bpf_skel/func_latency.bpf.c b/tools/perf/util/bpf_skel/func_latency.bpf.c index 9d01e3af7479..f613dc9cb123 100644 --- a/tools/perf/util/bpf_skel/func_latency.bpf.c +++ b/tools/perf/util/bpf_skel/func_latency.bpf.c @@ -37,9 +37,10 @@ struct { int enabled = 0; -int has_cpu = 0; -int has_task = 0; -int use_nsec = 0; + +const volatile int has_cpu = 0; +const volatile int has_task = 0; +const volatile int use_nsec = 0; SEC("kprobe/func") int BPF_PROG(func_begin)