1

perf callchain: Fix stitch LBR memory leaks

The 'struct callchain_cursor_node' has a 'struct map_symbol' whose maps
and map members are reference counted. Ensure these values use a _get
routine to increment the reference counts and use map_symbol__exit() to
release the reference counts.

Do similar for 'struct thread's prev_lbr_cursor, but save the size of
the prev_lbr_cursor array so that it may be iterated.

Ensure that when stitch_nodes are placed on the free list the
map_symbols are exited.

Fix resolve_lbr_callchain_sample() by replacing list_replace_init() to
list_splice_init(), so the whole list is moved and nodes aren't leaked.

A reproduction of the memory leaks is possible with a leak sanitizer
build in the perf report command of:

  ```
  $ perf record -e cycles --call-graph lbr perf test -w thloop
  $ perf report --stitch-lbr
  ```

Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Fixes: ff165628d7 ("perf callchain: Stitch LBR call stack")
Signed-off-by: Ian Rogers <irogers@google.com>
[ Basic tests after applying the patch, repeating the example above ]
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Anne Macedo <retpolanne@posteo.net>
Cc: Changbin Du <changbin.du@huawei.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20240808054644.1286065-1-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Ian Rogers 2024-08-07 22:46:43 -07:00 committed by Arnaldo Carvalho de Melo
parent 37e2a19c98
commit 599c19397b
3 changed files with 20 additions and 2 deletions

View File

@ -2270,8 +2270,12 @@ static void save_lbr_cursor_node(struct thread *thread,
cursor->curr = cursor->first; cursor->curr = cursor->first;
else else
cursor->curr = cursor->curr->next; cursor->curr = cursor->curr->next;
map_symbol__exit(&lbr_stitch->prev_lbr_cursor[idx].ms);
memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr, memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
sizeof(struct callchain_cursor_node)); sizeof(struct callchain_cursor_node));
lbr_stitch->prev_lbr_cursor[idx].ms.maps = maps__get(cursor->curr->ms.maps);
lbr_stitch->prev_lbr_cursor[idx].ms.map = map__get(cursor->curr->ms.map);
lbr_stitch->prev_lbr_cursor[idx].valid = true; lbr_stitch->prev_lbr_cursor[idx].valid = true;
cursor->pos++; cursor->pos++;
@ -2482,6 +2486,9 @@ static bool has_stitched_lbr(struct thread *thread,
memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i], memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
sizeof(struct callchain_cursor_node)); sizeof(struct callchain_cursor_node));
stitch_node->cursor.ms.maps = maps__get(lbr_stitch->prev_lbr_cursor[i].ms.maps);
stitch_node->cursor.ms.map = map__get(lbr_stitch->prev_lbr_cursor[i].ms.map);
if (callee) if (callee)
list_add(&stitch_node->node, &lbr_stitch->lists); list_add(&stitch_node->node, &lbr_stitch->lists);
else else
@ -2505,6 +2512,8 @@ static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
if (!thread__lbr_stitch(thread)->prev_lbr_cursor) if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
goto free_lbr_stitch; goto free_lbr_stitch;
thread__lbr_stitch(thread)->prev_lbr_cursor_size = max_lbr + 1;
INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists); INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists); INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
@ -2560,8 +2569,12 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
max_lbr, callee); max_lbr, callee);
if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) { if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
list_replace_init(&lbr_stitch->lists, struct stitch_list *stitch_node;
&lbr_stitch->free_lists);
list_for_each_entry(stitch_node, &lbr_stitch->lists, node)
map_symbol__exit(&stitch_node->cursor.ms);
list_splice_init(&lbr_stitch->lists, &lbr_stitch->free_lists);
} }
memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample)); memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
} }

View File

@ -476,6 +476,7 @@ void thread__free_stitch_list(struct thread *thread)
return; return;
list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) { list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) {
map_symbol__exit(&pos->cursor.ms);
list_del_init(&pos->node); list_del_init(&pos->node);
free(pos); free(pos);
} }
@ -485,6 +486,9 @@ void thread__free_stitch_list(struct thread *thread)
free(pos); free(pos);
} }
for (unsigned int i = 0 ; i < lbr_stitch->prev_lbr_cursor_size; i++)
map_symbol__exit(&lbr_stitch->prev_lbr_cursor[i].ms);
zfree(&lbr_stitch->prev_lbr_cursor); zfree(&lbr_stitch->prev_lbr_cursor);
free(thread__lbr_stitch(thread)); free(thread__lbr_stitch(thread));
thread__set_lbr_stitch(thread, NULL); thread__set_lbr_stitch(thread, NULL);

View File

@ -26,6 +26,7 @@ struct lbr_stitch {
struct list_head free_lists; struct list_head free_lists;
struct perf_sample prev_sample; struct perf_sample prev_sample;
struct callchain_cursor_node *prev_lbr_cursor; struct callchain_cursor_node *prev_lbr_cursor;
unsigned int prev_lbr_cursor_size;
}; };
DECLARE_RC_STRUCT(thread) { DECLARE_RC_STRUCT(thread) {