perf lock contention: Use lock_stat_find{,new}
This is a preparation work to support complex keys of BPF maps. Now it has single value key according to the aggregation mode like stack_id or pid. But we want to use a combination of those keys. Then lock_contention_read() should still aggregate the result based on the key that was requested by user. The other key info will be used for filtering. So instead of creating a lock_stat entry always, Check if it's already there using lock_stat_find() first. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Hao Luo <haoluo@google.com> Cc: Ian Rogers <irogers@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Song Liu <song@kernel.org> Cc: bpf@vger.kernel.org Link: https://lore.kernel.org/r/20230203021324.143540-3-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
492fef218a
commit
16cad1d359
@ -465,7 +465,7 @@ static struct lock_stat *pop_from_result(void)
|
||||
return container_of(node, struct lock_stat, rb);
|
||||
}
|
||||
|
||||
static struct lock_stat *lock_stat_find(u64 addr)
|
||||
struct lock_stat *lock_stat_find(u64 addr)
|
||||
{
|
||||
struct hlist_head *entry = lockhashentry(addr);
|
||||
struct lock_stat *ret;
|
||||
@ -477,7 +477,7 @@ static struct lock_stat *lock_stat_find(u64 addr)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags)
|
||||
struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags)
|
||||
{
|
||||
struct hlist_head *entry = lockhashentry(addr);
|
||||
struct lock_stat *ret, *new;
|
||||
|
@ -154,7 +154,10 @@ perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter.o
|
||||
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter_cgroup.o
|
||||
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_ftrace.o
|
||||
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_off_cpu.o
|
||||
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_lock_contention.o
|
||||
|
||||
ifeq ($(CONFIG_LIBTRACEEVENT),y)
|
||||
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_lock_contention.o
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_LIBTRACEEVENT),y)
|
||||
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_kwork.o
|
||||
|
@ -256,12 +256,34 @@ int lock_contention_read(struct lock_contention *con)
|
||||
prev_key = NULL;
|
||||
while (!bpf_map_get_next_key(fd, prev_key, &key)) {
|
||||
s32 stack_id;
|
||||
const char *name;
|
||||
|
||||
/* to handle errors in the loop body */
|
||||
err = -1;
|
||||
|
||||
bpf_map_lookup_elem(fd, &key, &data);
|
||||
st = zalloc(sizeof(*st));
|
||||
|
||||
if (con->save_callstack) {
|
||||
stack_id = key.aggr_key;
|
||||
bpf_map_lookup_elem(stack, &stack_id, stack_trace);
|
||||
}
|
||||
|
||||
st = lock_stat_find(key.aggr_key);
|
||||
if (st != NULL) {
|
||||
st->wait_time_total += data.total_time;
|
||||
if (st->wait_time_max < data.max_time)
|
||||
st->wait_time_max = data.max_time;
|
||||
if (st->wait_time_min > data.min_time)
|
||||
st->wait_time_min = data.min_time;
|
||||
|
||||
st->nr_contended += data.count;
|
||||
if (st->nr_contended)
|
||||
st->avg_wait_time = st->wait_time_total / st->nr_contended;
|
||||
goto next;
|
||||
}
|
||||
|
||||
name = lock_contention_get_name(con, &key, stack_trace);
|
||||
st = lock_stat_findnew(key.aggr_key, name, data.flags);
|
||||
if (st == NULL)
|
||||
break;
|
||||
|
||||
@ -274,14 +296,6 @@ int lock_contention_read(struct lock_contention *con)
|
||||
st->avg_wait_time = data.total_time / data.count;
|
||||
|
||||
st->flags = data.flags;
|
||||
st->addr = key.aggr_key;
|
||||
|
||||
stack_id = key.aggr_key;
|
||||
bpf_map_lookup_elem(stack, &stack_id, stack_trace);
|
||||
|
||||
st->name = strdup(lock_contention_get_name(con, &key, stack_trace));
|
||||
if (st->name == NULL)
|
||||
break;
|
||||
|
||||
if (con->save_callstack) {
|
||||
st->callstack = memdup(stack_trace, stack_size);
|
||||
@ -289,19 +303,14 @@ int lock_contention_read(struct lock_contention *con)
|
||||
break;
|
||||
}
|
||||
|
||||
hlist_add_head(&st->hash_entry, con->result);
|
||||
next:
|
||||
prev_key = &key;
|
||||
|
||||
/* we're fine now, reset the values */
|
||||
st = NULL;
|
||||
/* we're fine now, reset the error */
|
||||
err = 0;
|
||||
}
|
||||
|
||||
free(stack_trace);
|
||||
if (st) {
|
||||
free(st->name);
|
||||
free(st);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -65,6 +65,9 @@ struct lock_stat {
|
||||
*/
|
||||
#define MAX_LOCK_DEPTH 48
|
||||
|
||||
struct lock_stat *lock_stat_find(u64 addr);
|
||||
struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags);
|
||||
|
||||
/*
|
||||
* struct lock_seq_stat:
|
||||
* Place to put on state of one lock sequence
|
||||
|
Loading…
Reference in New Issue
Block a user