perf bpf-filter: Support pin/unpin BPF object
And use the pinned objects for unprivileged users to profile their own tasks. The BPF objects need to be pinned in the BPF-fs by root first and it'll be handled in the later patch. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: KP Singh <kpsingh@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: bpf@vger.kernel.org Link: https://lore.kernel.org/r/20240703223035.2024586-5-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
eb1693b115
commit
0715f65e94
@ -1,5 +1,8 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0 */
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <sys/ioctl.h>
|
||||||
|
#include <sys/stat.h>
|
||||||
|
|
||||||
#include <bpf/bpf.h>
|
#include <bpf/bpf.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
@ -23,6 +26,9 @@
|
|||||||
#define __PERF_SAMPLE_TYPE(tt, st, opt) { tt, #st, opt }
|
#define __PERF_SAMPLE_TYPE(tt, st, opt) { tt, #st, opt }
|
||||||
#define PERF_SAMPLE_TYPE(_st, opt) __PERF_SAMPLE_TYPE(PBF_TERM_##_st, PERF_SAMPLE_##_st, opt)
|
#define PERF_SAMPLE_TYPE(_st, opt) __PERF_SAMPLE_TYPE(PBF_TERM_##_st, PERF_SAMPLE_##_st, opt)
|
||||||
|
|
||||||
|
/* Index in the pinned 'filters' map. Should be released after use. */
|
||||||
|
static int pinned_filter_idx = -1;
|
||||||
|
|
||||||
static const struct perf_sample_info {
|
static const struct perf_sample_info {
|
||||||
enum perf_bpf_filter_term type;
|
enum perf_bpf_filter_term type;
|
||||||
const char *name;
|
const char *name;
|
||||||
@ -47,6 +53,8 @@ static const struct perf_sample_info {
|
|||||||
PERF_SAMPLE_TYPE(DATA_PAGE_SIZE, "--data-page-size"),
|
PERF_SAMPLE_TYPE(DATA_PAGE_SIZE, "--data-page-size"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static int get_pinned_fd(const char *name);
|
||||||
|
|
||||||
static const struct perf_sample_info *get_sample_info(enum perf_bpf_filter_term type)
|
static const struct perf_sample_info *get_sample_info(enum perf_bpf_filter_term type)
|
||||||
{
|
{
|
||||||
size_t i;
|
size_t i;
|
||||||
@ -167,19 +175,26 @@ static int convert_to_tgid(int tid)
|
|||||||
return tgid;
|
return tgid;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int update_pid_hash(struct sample_filter_bpf *skel, struct evsel *evsel,
|
static int update_pid_hash(struct evsel *evsel, struct perf_bpf_filter_entry *entry)
|
||||||
struct perf_bpf_filter_entry *entry)
|
|
||||||
{
|
{
|
||||||
int filter_idx;
|
int filter_idx;
|
||||||
int nr, last;
|
int fd, nr, last;
|
||||||
int fd = bpf_map__fd(skel->maps.filters);
|
|
||||||
struct perf_thread_map *threads;
|
struct perf_thread_map *threads;
|
||||||
|
|
||||||
|
fd = get_pinned_fd("filters");
|
||||||
|
if (fd < 0) {
|
||||||
|
pr_debug("cannot get fd for 'filters' map\n");
|
||||||
|
return fd;
|
||||||
|
}
|
||||||
|
|
||||||
/* Find the first available entry in the filters map */
|
/* Find the first available entry in the filters map */
|
||||||
for (filter_idx = 0; filter_idx < MAX_FILTERS; filter_idx++) {
|
for (filter_idx = 0; filter_idx < MAX_FILTERS; filter_idx++) {
|
||||||
if (bpf_map_update_elem(fd, &filter_idx, entry, BPF_NOEXIST) == 0)
|
if (bpf_map_update_elem(fd, &filter_idx, entry, BPF_NOEXIST) == 0) {
|
||||||
|
pinned_filter_idx = filter_idx;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
close(fd);
|
||||||
|
|
||||||
if (filter_idx == MAX_FILTERS) {
|
if (filter_idx == MAX_FILTERS) {
|
||||||
pr_err("Too many users for the filter map\n");
|
pr_err("Too many users for the filter map\n");
|
||||||
@ -193,7 +208,9 @@ static int update_pid_hash(struct sample_filter_bpf *skel, struct evsel *evsel,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* save the index to a hash map */
|
/* save the index to a hash map */
|
||||||
fd = bpf_map__fd(skel->maps.pid_hash);
|
fd = get_pinned_fd("pid_hash");
|
||||||
|
if (fd < 0)
|
||||||
|
return fd;
|
||||||
|
|
||||||
last = -1;
|
last = -1;
|
||||||
nr = perf_thread_map__nr(threads);
|
nr = perf_thread_map__nr(threads);
|
||||||
@ -214,10 +231,12 @@ static int update_pid_hash(struct sample_filter_bpf *skel, struct evsel *evsel,
|
|||||||
|
|
||||||
if (bpf_map_update_elem(fd, &tgid, &filter_idx, BPF_ANY) < 0) {
|
if (bpf_map_update_elem(fd, &tgid, &filter_idx, BPF_ANY) < 0) {
|
||||||
pr_err("Failed to update the pid hash\n");
|
pr_err("Failed to update the pid hash\n");
|
||||||
return -errno;
|
close(fd);
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
pr_debug("pid hash: %d -> %d\n", tgid, filter_idx);
|
pr_debug("pid hash: %d -> %d\n", tgid, filter_idx);
|
||||||
}
|
}
|
||||||
|
close(fd);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -240,40 +259,48 @@ int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target)
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
skel = sample_filter_bpf__open();
|
if (needs_pid_hash && geteuid() != 0) {
|
||||||
if (!skel) {
|
/* The filters map is shared among other processes */
|
||||||
pr_err("Failed to open perf sample-filter BPF skeleton\n");
|
ret = update_pid_hash(evsel, entry);
|
||||||
ret = -EPERM;
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (needs_pid_hash) {
|
|
||||||
bpf_map__set_max_entries(skel->maps.filters, MAX_FILTERS);
|
|
||||||
bpf_map__set_max_entries(skel->maps.pid_hash, MAX_PIDS);
|
|
||||||
skel->rodata->use_pid_hash = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sample_filter_bpf__load(skel) < 0) {
|
|
||||||
pr_err("Failed to load perf sample-filter BPF skeleton\n");
|
|
||||||
ret = -EPERM;
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (needs_pid_hash) {
|
|
||||||
/* The filters map is shared among other processes */
|
|
||||||
ret = update_pid_hash(skel, evsel, entry);
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err;
|
goto err;
|
||||||
} else {
|
|
||||||
i = 0;
|
|
||||||
fd = bpf_map__fd(skel->maps.filters);
|
|
||||||
|
|
||||||
/* The filters map has only one entry in this case */
|
fd = get_pinned_fd("perf_sample_filter");
|
||||||
if (bpf_map_update_elem(fd, &i, entry, BPF_ANY) < 0) {
|
if (fd < 0) {
|
||||||
ret = -errno;
|
ret = fd;
|
||||||
pr_err("Failed to update the filter map\n");
|
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (x = 0; x < xyarray__max_x(evsel->core.fd); x++) {
|
||||||
|
for (y = 0; y < xyarray__max_y(evsel->core.fd); y++) {
|
||||||
|
ret = ioctl(FD(evsel, x, y), PERF_EVENT_IOC_SET_BPF, fd);
|
||||||
|
if (ret < 0) {
|
||||||
|
pr_err("Failed to attach perf sample-filter\n");
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
close(fd);
|
||||||
|
free(entry);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
skel = sample_filter_bpf__open_and_load();
|
||||||
|
if (!skel) {
|
||||||
|
ret = -errno;
|
||||||
|
pr_err("Failed to load perf sample-filter BPF skeleton\n");
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
i = 0;
|
||||||
|
fd = bpf_map__fd(skel->maps.filters);
|
||||||
|
|
||||||
|
/* The filters map has only one entry in this case */
|
||||||
|
if (bpf_map_update_elem(fd, &i, entry, BPF_ANY) < 0) {
|
||||||
|
ret = -errno;
|
||||||
|
pr_err("Failed to update the filter map\n");
|
||||||
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
prog = skel->progs.perf_sample_filter;
|
prog = skel->progs.perf_sample_filter;
|
||||||
@ -306,6 +333,15 @@ int perf_bpf_filter__destroy(struct evsel *evsel)
|
|||||||
free(expr);
|
free(expr);
|
||||||
}
|
}
|
||||||
sample_filter_bpf__destroy(evsel->bpf_skel);
|
sample_filter_bpf__destroy(evsel->bpf_skel);
|
||||||
|
|
||||||
|
if (pinned_filter_idx >= 0) {
|
||||||
|
int fd = get_pinned_fd("filters");
|
||||||
|
|
||||||
|
bpf_map_delete_elem(fd, &pinned_filter_idx);
|
||||||
|
pinned_filter_idx = -1;
|
||||||
|
close(fd);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -349,3 +385,129 @@ int perf_bpf_filter__parse(struct list_head *expr_head, const char *str)
|
|||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int perf_bpf_filter__pin(void)
|
||||||
|
{
|
||||||
|
struct sample_filter_bpf *skel;
|
||||||
|
char *path = NULL;
|
||||||
|
int dir_fd, ret = -1;
|
||||||
|
|
||||||
|
skel = sample_filter_bpf__open();
|
||||||
|
if (!skel) {
|
||||||
|
ret = -errno;
|
||||||
|
pr_err("Failed to open perf sample-filter BPF skeleton\n");
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* pinned program will use pid-hash */
|
||||||
|
bpf_map__set_max_entries(skel->maps.filters, MAX_FILTERS);
|
||||||
|
bpf_map__set_max_entries(skel->maps.pid_hash, MAX_PIDS);
|
||||||
|
skel->rodata->use_pid_hash = 1;
|
||||||
|
|
||||||
|
if (sample_filter_bpf__load(skel) < 0) {
|
||||||
|
ret = -errno;
|
||||||
|
pr_err("Failed to load perf sample-filter BPF skeleton\n");
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (asprintf(&path, "%s/fs/bpf/%s", sysfs__mountpoint(),
|
||||||
|
PERF_BPF_FILTER_PIN_PATH) < 0) {
|
||||||
|
ret = -errno;
|
||||||
|
pr_err("Failed to allocate pathname in the BPF-fs\n");
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = bpf_object__pin(skel->obj, path);
|
||||||
|
if (ret < 0) {
|
||||||
|
pr_err("Failed to pin BPF filter objects\n");
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* setup access permissions for the pinned objects */
|
||||||
|
dir_fd = open(path, O_PATH);
|
||||||
|
if (dir_fd < 0) {
|
||||||
|
bpf_object__unpin(skel->obj, path);
|
||||||
|
ret = dir_fd;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* BPF-fs root has the sticky bit */
|
||||||
|
if (fchmodat(dir_fd, "..", 01755, 0) < 0) {
|
||||||
|
pr_debug("chmod for BPF-fs failed\n");
|
||||||
|
ret = -errno;
|
||||||
|
goto err_close;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* perf_filter directory */
|
||||||
|
if (fchmodat(dir_fd, ".", 0755, 0) < 0) {
|
||||||
|
pr_debug("chmod for perf_filter directory failed?\n");
|
||||||
|
ret = -errno;
|
||||||
|
goto err_close;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* programs need write permission for some reason */
|
||||||
|
if (fchmodat(dir_fd, "perf_sample_filter", 0777, 0) < 0) {
|
||||||
|
pr_debug("chmod for perf_sample_filter failed\n");
|
||||||
|
ret = -errno;
|
||||||
|
}
|
||||||
|
/* maps */
|
||||||
|
if (fchmodat(dir_fd, "filters", 0666, 0) < 0) {
|
||||||
|
pr_debug("chmod for filters failed\n");
|
||||||
|
ret = -errno;
|
||||||
|
}
|
||||||
|
if (fchmodat(dir_fd, "pid_hash", 0666, 0) < 0) {
|
||||||
|
pr_debug("chmod for pid_hash failed\n");
|
||||||
|
ret = -errno;
|
||||||
|
}
|
||||||
|
|
||||||
|
err_close:
|
||||||
|
close(dir_fd);
|
||||||
|
|
||||||
|
err:
|
||||||
|
free(path);
|
||||||
|
sample_filter_bpf__destroy(skel);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int perf_bpf_filter__unpin(void)
|
||||||
|
{
|
||||||
|
struct sample_filter_bpf *skel;
|
||||||
|
char *path = NULL;
|
||||||
|
int ret = -1;
|
||||||
|
|
||||||
|
skel = sample_filter_bpf__open_and_load();
|
||||||
|
if (!skel) {
|
||||||
|
ret = -errno;
|
||||||
|
pr_err("Failed to open perf sample-filter BPF skeleton\n");
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (asprintf(&path, "%s/fs/bpf/%s", sysfs__mountpoint(),
|
||||||
|
PERF_BPF_FILTER_PIN_PATH) < 0) {
|
||||||
|
ret = -errno;
|
||||||
|
pr_err("Failed to allocate pathname in the BPF-fs\n");
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = bpf_object__unpin(skel->obj, path);
|
||||||
|
|
||||||
|
err:
|
||||||
|
free(path);
|
||||||
|
sample_filter_bpf__destroy(skel);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int get_pinned_fd(const char *name)
|
||||||
|
{
|
||||||
|
char *path = NULL;
|
||||||
|
int fd;
|
||||||
|
|
||||||
|
if (asprintf(&path, "%s/fs/bpf/%s/%s", sysfs__mountpoint(),
|
||||||
|
PERF_BPF_FILTER_PIN_PATH, name) < 0)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
fd = bpf_obj_get(path);
|
||||||
|
|
||||||
|
free(path);
|
||||||
|
return fd;
|
||||||
|
}
|
||||||
|
@ -18,6 +18,9 @@ struct perf_bpf_filter_expr {
|
|||||||
struct evsel;
|
struct evsel;
|
||||||
struct target;
|
struct target;
|
||||||
|
|
||||||
|
/* path in BPF-fs for the pinned program and maps */
|
||||||
|
#define PERF_BPF_FILTER_PIN_PATH "perf_filter"
|
||||||
|
|
||||||
#ifdef HAVE_BPF_SKEL
|
#ifdef HAVE_BPF_SKEL
|
||||||
struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(enum perf_bpf_filter_term term,
|
struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(enum perf_bpf_filter_term term,
|
||||||
int part,
|
int part,
|
||||||
@ -27,6 +30,8 @@ int perf_bpf_filter__parse(struct list_head *expr_head, const char *str);
|
|||||||
int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target);
|
int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target);
|
||||||
int perf_bpf_filter__destroy(struct evsel *evsel);
|
int perf_bpf_filter__destroy(struct evsel *evsel);
|
||||||
u64 perf_bpf_filter__lost_count(struct evsel *evsel);
|
u64 perf_bpf_filter__lost_count(struct evsel *evsel);
|
||||||
|
int perf_bpf_filter__pin(void);
|
||||||
|
int perf_bpf_filter__unpin(void);
|
||||||
|
|
||||||
#else /* !HAVE_BPF_SKEL */
|
#else /* !HAVE_BPF_SKEL */
|
||||||
|
|
||||||
@ -48,5 +53,13 @@ static inline u64 perf_bpf_filter__lost_count(struct evsel *evsel __maybe_unused
|
|||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
static inline int perf_bpf_filter__pin(void)
|
||||||
|
{
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
static inline int perf_bpf_filter__unpin(void)
|
||||||
|
{
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
#endif /* HAVE_BPF_SKEL*/
|
#endif /* HAVE_BPF_SKEL*/
|
||||||
#endif /* PERF_UTIL_BPF_FILTER_H */
|
#endif /* PERF_UTIL_BPF_FILTER_H */
|
||||||
|
Loading…
Reference in New Issue
Block a user