2019-01-27 05:44:29 -07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2019-11-25 18:24:10 -07:00
|
|
|
#ifndef __PERF_MAPS_H
|
|
|
|
#define __PERF_MAPS_H
|
2019-01-27 05:44:29 -07:00
|
|
|
|
|
|
|
#include <linux/refcount.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
|
|
|
struct ref_reloc_sym;
|
|
|
|
struct machine;
|
|
|
|
struct map;
|
2019-11-25 17:58:33 -07:00
|
|
|
struct maps;
|
2019-01-27 05:44:29 -07:00
|
|
|
|
|
|
|
#define KMAP_NAME_LEN 256
|
|
|
|
|
|
|
|
struct kmap {
|
|
|
|
struct ref_reloc_sym *ref_reloc_sym;
|
2019-11-25 17:58:33 -07:00
|
|
|
struct maps *kmaps;
|
2019-01-27 05:44:29 -07:00
|
|
|
char name[KMAP_NAME_LEN];
|
|
|
|
};
|
|
|
|
|
2019-11-25 17:58:33 -07:00
|
|
|
struct maps *maps__new(struct machine *machine);
|
2019-11-25 18:21:28 -07:00
|
|
|
bool maps__empty(struct maps *maps);
|
2023-12-06 18:16:52 -07:00
|
|
|
int maps__copy_from(struct maps *maps, struct maps *parent);
|
perf maps: Add functions to access maps
Introduce functions to access struct maps. These functions reduce the
number of places reference counting is necessary. While tidying APIs do
some small const-ification, in particlar to unwind_libunwind_ops.
Committer notes:
Fixed up tools/perf/util/unwind-libunwind.c:
- return ops->get_entries(cb, arg, thread, data, max_stack);
+ return ops->get_entries(cb, arg, thread, data, max_stack, best_effort);
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: German Gomez <german.gomez@arm.com>
Cc: Hao Luo <haoluo@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Miaoqian Lin <linmq006@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Song Liu <song@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Stephen Brennan <stephen.s.brennan@oracle.com>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yury Norov <yury.norov@gmail.com>
Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-20 14:22:34 -07:00
|
|
|
|
|
|
|
struct maps *maps__get(struct maps *maps);
|
|
|
|
void maps__put(struct maps *maps);
|
2019-01-27 05:44:29 -07:00
|
|
|
|
2023-06-08 16:28:01 -07:00
|
|
|
static inline void __maps__zput(struct maps **map)
|
|
|
|
{
|
|
|
|
maps__put(*map);
|
|
|
|
*map = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define maps__zput(map) __maps__zput(&map)
|
|
|
|
|
2024-02-09 20:17:45 -07:00
|
|
|
bool maps__equal(struct maps *a, struct maps *b);
|
|
|
|
|
2023-12-06 18:16:36 -07:00
|
|
|
/* Iterate over map calling cb for each entry. */
|
|
|
|
int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data);
|
2023-12-06 18:16:48 -07:00
|
|
|
/* Iterate over map removing an entry if cb returns true. */
|
|
|
|
void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data);
|
2023-12-06 18:16:36 -07:00
|
|
|
|
2024-02-09 20:17:45 -07:00
|
|
|
struct machine *maps__machine(const struct maps *maps);
|
2024-02-09 20:17:46 -07:00
|
|
|
unsigned int maps__nr_maps(const struct maps *maps); /* Test only. */
|
|
|
|
refcount_t *maps__refcnt(struct maps *maps); /* Test only. */
|
2023-04-18 13:33:48 -07:00
|
|
|
|
perf maps: Add functions to access maps
Introduce functions to access struct maps. These functions reduce the
number of places reference counting is necessary. While tidying APIs do
some small const-ification, in particlar to unwind_libunwind_ops.
Committer notes:
Fixed up tools/perf/util/unwind-libunwind.c:
- return ops->get_entries(cb, arg, thread, data, max_stack);
+ return ops->get_entries(cb, arg, thread, data, max_stack, best_effort);
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: German Gomez <german.gomez@arm.com>
Cc: Hao Luo <haoluo@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Miaoqian Lin <linmq006@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Song Liu <song@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Stephen Brennan <stephen.s.brennan@oracle.com>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yury Norov <yury.norov@gmail.com>
Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-20 14:22:34 -07:00
|
|
|
#ifdef HAVE_LIBUNWIND_SUPPORT
|
2024-02-09 20:17:45 -07:00
|
|
|
void *maps__addr_space(const struct maps *maps);
|
|
|
|
void maps__set_addr_space(struct maps *maps, void *addr_space);
|
|
|
|
const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps);
|
|
|
|
void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops);
|
perf maps: Add functions to access maps
Introduce functions to access struct maps. These functions reduce the
number of places reference counting is necessary. While tidying APIs do
some small const-ification, in particlar to unwind_libunwind_ops.
Committer notes:
Fixed up tools/perf/util/unwind-libunwind.c:
- return ops->get_entries(cb, arg, thread, data, max_stack);
+ return ops->get_entries(cb, arg, thread, data, max_stack, best_effort);
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: German Gomez <german.gomez@arm.com>
Cc: Hao Luo <haoluo@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Miaoqian Lin <linmq006@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Song Liu <song@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Stephen Brennan <stephen.s.brennan@oracle.com>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Yury Norov <yury.norov@gmail.com>
Link: https://lore.kernel.org/r/20230320212248.1175731-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2023-03-20 14:22:34 -07:00
|
|
|
#endif
|
|
|
|
|
2019-11-25 18:21:28 -07:00
|
|
|
size_t maps__fprintf(struct maps *maps, FILE *fp);
|
2019-01-27 05:44:29 -07:00
|
|
|
|
2023-03-20 14:22:33 -07:00
|
|
|
int maps__insert(struct maps *maps, struct map *map);
|
2019-11-25 18:21:28 -07:00
|
|
|
void maps__remove(struct maps *maps, struct map *map);
|
2019-01-27 05:44:29 -07:00
|
|
|
|
perf maps: Switch from rbtree to lazily sorted array for addresses
Maps is a collection of maps primarily sorted by the starting address
of the map. Prior to this change the maps were held in an rbtree
requiring 4 pointers per node. Prior to reference count checking, the
rbnode was embedded in the map so 3 pointers per node were
necessary. This change switches the rbtree to an array lazily sorted
by address, much as the array sorting nodes by name. 1 pointer is
needed per node, but to avoid excessive resizing the backing array may
be twice the number of used elements. Meaning the memory overhead is
roughly half that of the rbtree. For a perf record with
"--no-bpf-event -g -a" of true, the memory overhead of perf inject is
reduce fom 3.3MB to 3MB, so 10% or 300KB is saved.
Map inserts always happen at the end of the array. The code tracks
whether the insertion violates the sorting property. O(log n) rb-tree
complexity is switched to O(1).
Remove slides the array, so O(log n) rb-tree complexity is degraded to
O(n).
A find may need to sort the array using qsort which is O(n*log n), but
in general the maps should be sorted and so average performance should
be O(log n) as with the rbtree.
An rbtree node consumes a cache line, but with the array 4 nodes fit
on a cache line. Iteration is simplified to scanning an array rather
than pointer chasing.
Overall it is expected the performance after the change should be
comparable to before, but with half of the memory consumed.
To avoid a list and repeated logic around splitting maps,
maps__merge_in is rewritten in terms of
maps__fixup_overlap_and_insert. maps_merge_in splits the given mapping
inserting remaining gaps. maps__fixup_overlap_and_insert splits the
existing mappings, then adds the incoming mapping. By adding the new
mapping first, then re-inserting the existing mappings the splitting
behavior matches.
Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
Cc: James Clark <james.clark@arm.com>
Cc: Vincent Whitchurch <vincent.whitchurch@axis.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Colin Ian King <colin.i.king@gmail.com>
Cc: Changbin Du <changbin.du@huawei.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Song Liu <song@kernel.org>
Cc: Leo Yan <leo.yan@linux.dev>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Artem Savkov <asavkov@redhat.com>
Cc: bpf@vger.kernel.org
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20240210031746.4057262-2-irogers@google.com
2024-02-09 20:17:41 -07:00
|
|
|
struct map *maps__find(struct maps *maps, u64 addr);
|
2019-11-25 18:21:28 -07:00
|
|
|
struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp);
|
|
|
|
struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp);
|
2019-01-27 05:44:29 -07:00
|
|
|
|
|
|
|
struct addr_map_symbol;
|
|
|
|
|
2019-11-25 18:21:28 -07:00
|
|
|
int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams);
|
2019-01-27 05:44:29 -07:00
|
|
|
|
2023-12-06 18:16:50 -07:00
|
|
|
int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new);
|
2019-01-27 05:44:29 -07:00
|
|
|
|
2019-11-25 18:21:28 -07:00
|
|
|
struct map *maps__find_by_name(struct maps *maps, const char *name);
|
2019-01-27 05:44:29 -07:00
|
|
|
|
2023-12-06 18:16:54 -07:00
|
|
|
struct map *maps__find_next_entry(struct maps *maps, struct map *map);
|
|
|
|
|
2019-11-25 17:58:33 -07:00
|
|
|
int maps__merge_in(struct maps *kmaps, struct map *new_map);
|
2019-05-08 06:20:07 -07:00
|
|
|
|
2023-11-27 15:08:25 -07:00
|
|
|
void maps__fixup_end(struct maps *maps);
|
|
|
|
|
2023-12-06 18:16:53 -07:00
|
|
|
void maps__load_first(struct maps *maps);
|
|
|
|
|
2019-11-25 18:24:10 -07:00
|
|
|
#endif // __PERF_MAPS_H
|