1

libperf cpumap: Rename perf_cpu_map__empty() to perf_cpu_map__has_any_cpu_or_is_empty()

The name perf_cpu_map_empty is misleading as true is also returned
when the map contains an "any" CPU (aka dummy) map.

Rename to perf_cpu_map__has_any_cpu_or_is_empty(), later changes will
(re)introduce perf_cpu_map__empty() and perf_cpu_map__has_any_cpu().

Reviewed-by: James Clark <james.clark@arm.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
Cc: Andrew Jones <ajones@ventanamicro.com>
Cc: André Almeida <andrealmeid@igalia.com>
Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
Cc: Atish Patra <atishp@rivosinc.com>
Cc: Changbin Du <changbin.du@huawei.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Paran Lee <p4ranlee@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Steinar H. Gunderson <sesse@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Jihong <yangjihong1@huawei.com>
Cc: Yang Li <yang.lee@linux.alibaba.com>
Cc: Yanteng Si <siyanteng@loongson.cn>
Cc: bpf@vger.kernel.org
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Link: https://lore.kernel.org/r/20231129060211.1890454-4-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Ian Rogers 2023-11-28 22:02:00 -08:00 committed by Arnaldo Carvalho de Melo
parent 8f60f870a9
commit 923ca62a7b
14 changed files with 29 additions and 29 deletions

View File

@ -46,7 +46,7 @@ SYNOPSIS
void perf_cpu_map__put(struct perf_cpu_map *map); void perf_cpu_map__put(struct perf_cpu_map *map);
int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
int perf_cpu_map__nr(const struct perf_cpu_map *cpus); int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
bool perf_cpu_map__empty(const struct perf_cpu_map *map); bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map);
int perf_cpu_map__max(struct perf_cpu_map *map); int perf_cpu_map__max(struct perf_cpu_map *map);
bool perf_cpu_map__has(const struct perf_cpu_map *map, int cpu); bool perf_cpu_map__has(const struct perf_cpu_map *map, int cpu);

View File

@ -311,7 +311,7 @@ int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
return cpus ? __perf_cpu_map__nr(cpus) : 1; return cpus ? __perf_cpu_map__nr(cpus) : 1;
} }
bool perf_cpu_map__empty(const struct perf_cpu_map *map) bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map)
{ {
return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true; return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
} }

View File

@ -619,7 +619,7 @@ static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
/* One for each CPU */ /* One for each CPU */
nr_mmaps = perf_cpu_map__nr(evlist->all_cpus); nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
if (perf_cpu_map__empty(evlist->all_cpus)) { if (perf_cpu_map__has_any_cpu_or_is_empty(evlist->all_cpus)) {
/* Plus one for each thread */ /* Plus one for each thread */
nr_mmaps += perf_thread_map__nr(evlist->threads); nr_mmaps += perf_thread_map__nr(evlist->threads);
/* Minus the per-thread CPU (-1) */ /* Minus the per-thread CPU (-1) */
@ -653,7 +653,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
return -ENOMEM; return -ENOMEM;
if (perf_cpu_map__empty(cpus)) if (perf_cpu_map__has_any_cpu_or_is_empty(cpus))
return mmap_per_thread(evlist, ops, mp); return mmap_per_thread(evlist, ops, mp);
return mmap_per_cpu(evlist, ops, mp); return mmap_per_cpu(evlist, ops, mp);

View File

@ -47,9 +47,9 @@ LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus); LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
/** /**
* perf_cpu_map__empty - is map either empty or the "any CPU"/dummy value. * perf_cpu_map__has_any_cpu_or_is_empty - is map either empty or has the "any CPU"/dummy value.
*/ */
LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map); LIBPERF_API bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map);
LIBPERF_API struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map); LIBPERF_API struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map);
LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_cpu cpu); LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_cpu cpu);
LIBPERF_API bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, LIBPERF_API bool perf_cpu_map__equal(const struct perf_cpu_map *lhs,

View File

@ -9,7 +9,7 @@ LIBPERF_0.0.1 {
perf_cpu_map__read; perf_cpu_map__read;
perf_cpu_map__nr; perf_cpu_map__nr;
perf_cpu_map__cpu; perf_cpu_map__cpu;
perf_cpu_map__empty; perf_cpu_map__has_any_cpu_or_is_empty;
perf_cpu_map__max; perf_cpu_map__max;
perf_cpu_map__has; perf_cpu_map__has;
perf_thread_map__new_array; perf_thread_map__new_array;

View File

@ -211,7 +211,7 @@ static int cs_etm_validate_config(struct auxtrace_record *itr,
* program can run on any CPUs in this case, thus don't skip * program can run on any CPUs in this case, thus don't skip
* validation. * validation.
*/ */
if (!perf_cpu_map__empty(event_cpus) && if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus) &&
!perf_cpu_map__has(event_cpus, cpu)) !perf_cpu_map__has(event_cpus, cpu))
continue; continue;
@ -435,7 +435,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
* Also the case of per-cpu mmaps, need the contextID in order to be notified * Also the case of per-cpu mmaps, need the contextID in order to be notified
* when a context switch happened. * when a context switch happened.
*/ */
if (!perf_cpu_map__empty(cpus)) { if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel, evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel,
"timestamp", 1); "timestamp", 1);
evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel, evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel,
@ -461,7 +461,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
evsel->core.attr.sample_period = 1; evsel->core.attr.sample_period = 1;
/* In per-cpu case, always need the time of mmap events etc */ /* In per-cpu case, always need the time of mmap events etc */
if (!perf_cpu_map__empty(cpus)) if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus))
evsel__set_sample_bit(evsel, TIME); evsel__set_sample_bit(evsel, TIME);
err = cs_etm_validate_config(itr, cs_etm_evsel); err = cs_etm_validate_config(itr, cs_etm_evsel);
@ -539,7 +539,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL); struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
/* cpu map is not empty, we have specific CPUs to work with */ /* cpu map is not empty, we have specific CPUs to work with */
if (!perf_cpu_map__empty(event_cpus)) { if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) {
for (i = 0; i < cpu__max_cpu().cpu; i++) { for (i = 0; i < cpu__max_cpu().cpu; i++) {
struct perf_cpu cpu = { .cpu = i, }; struct perf_cpu cpu = { .cpu = i, };
@ -814,7 +814,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
return -EINVAL; return -EINVAL;
/* If the cpu_map is empty all online CPUs are involved */ /* If the cpu_map is empty all online CPUs are involved */
if (perf_cpu_map__empty(event_cpus)) { if (perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) {
cpu_map = online_cpus; cpu_map = online_cpus;
} else { } else {
/* Make sure all specified CPUs are online */ /* Make sure all specified CPUs are online */

View File

@ -232,7 +232,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
* In the case of per-cpu mmaps, sample CPU for AUX event; * In the case of per-cpu mmaps, sample CPU for AUX event;
* also enable the timestamp tracing for samples correlation. * also enable the timestamp tracing for samples correlation.
*/ */
if (!perf_cpu_map__empty(cpus)) { if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
evsel__set_sample_bit(arm_spe_evsel, CPU); evsel__set_sample_bit(arm_spe_evsel, CPU);
evsel__set_config_if_unset(arm_spe_pmu, arm_spe_evsel, evsel__set_config_if_unset(arm_spe_pmu, arm_spe_evsel,
"ts_enable", 1); "ts_enable", 1);
@ -265,7 +265,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
tracking_evsel->core.attr.sample_period = 1; tracking_evsel->core.attr.sample_period = 1;
/* In per-cpu case, always need the time of mmap events etc */ /* In per-cpu case, always need the time of mmap events etc */
if (!perf_cpu_map__empty(cpus)) { if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
evsel__set_sample_bit(tracking_evsel, TIME); evsel__set_sample_bit(tracking_evsel, TIME);
evsel__set_sample_bit(tracking_evsel, CPU); evsel__set_sample_bit(tracking_evsel, CPU);

View File

@ -143,7 +143,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
if (!opts->full_auxtrace) if (!opts->full_auxtrace)
return 0; return 0;
if (opts->full_auxtrace && !perf_cpu_map__empty(cpus)) { if (opts->full_auxtrace && !perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n"); pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n");
return -EINVAL; return -EINVAL;
} }
@ -224,7 +224,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
* In the case of per-cpu mmaps, we need the CPU on the * In the case of per-cpu mmaps, we need the CPU on the
* AUX event. * AUX event.
*/ */
if (!perf_cpu_map__empty(cpus)) if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus))
evsel__set_sample_bit(intel_bts_evsel, CPU); evsel__set_sample_bit(intel_bts_evsel, CPU);
} }

View File

@ -369,7 +369,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
ui__warning("Intel Processor Trace: TSC not available\n"); ui__warning("Intel Processor Trace: TSC not available\n");
} }
per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.user_requested_cpus); per_cpu_mmaps = !perf_cpu_map__has_any_cpu_or_is_empty(session->evlist->core.user_requested_cpus);
auxtrace_info->type = PERF_AUXTRACE_INTEL_PT; auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type; auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
@ -774,7 +774,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* Per-cpu recording needs sched_switch events to distinguish different * Per-cpu recording needs sched_switch events to distinguish different
* threads. * threads.
*/ */
if (have_timing_info && !perf_cpu_map__empty(cpus) && if (have_timing_info && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) &&
!record_opts__no_switch_events(opts)) { !record_opts__no_switch_events(opts)) {
if (perf_can_record_switch_events()) { if (perf_can_record_switch_events()) {
bool cpu_wide = !target__none(&opts->target) && bool cpu_wide = !target__none(&opts->target) &&
@ -832,7 +832,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* In the case of per-cpu mmaps, we need the CPU on the * In the case of per-cpu mmaps, we need the CPU on the
* AUX event. * AUX event.
*/ */
if (!perf_cpu_map__empty(cpus)) if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus))
evsel__set_sample_bit(intel_pt_evsel, CPU); evsel__set_sample_bit(intel_pt_evsel, CPU);
} }
@ -858,7 +858,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
tracking_evsel->immediate = true; tracking_evsel->immediate = true;
/* In per-cpu case, always need the time of mmap events etc */ /* In per-cpu case, always need the time of mmap events etc */
if (!perf_cpu_map__empty(cpus)) { if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
evsel__set_sample_bit(tracking_evsel, TIME); evsel__set_sample_bit(tracking_evsel, TIME);
/* And the CPU for switch events */ /* And the CPU for switch events */
evsel__set_sample_bit(tracking_evsel, CPU); evsel__set_sample_bit(tracking_evsel, CPU);
@ -870,7 +870,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* Warn the user when we do not have enough information to decode i.e. * Warn the user when we do not have enough information to decode i.e.
* per-cpu with no sched_switch (except workload-only). * per-cpu with no sched_switch (except workload-only).
*/ */
if (!ptr->have_sched_switch && !perf_cpu_map__empty(cpus) && if (!ptr->have_sched_switch && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) &&
!target__none(&opts->target) && !target__none(&opts->target) &&
!intel_pt_evsel->core.attr.exclude_user) !intel_pt_evsel->core.attr.exclude_user)
ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n"); ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");

View File

@ -2320,7 +2320,7 @@ static int setup_nodes(struct perf_session *session)
nodes[node] = set; nodes[node] = set;
/* empty node, skip */ /* empty node, skip */
if (perf_cpu_map__empty(map)) if (perf_cpu_map__has_any_cpu_or_is_empty(map))
continue; continue;
perf_cpu_map__for_each_cpu(cpu, idx, map) { perf_cpu_map__for_each_cpu(cpu, idx, map) {

View File

@ -1316,7 +1316,7 @@ static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map)
* be the first online CPU in the cache domain else use the * be the first online CPU in the cache domain else use the
* first online CPU of the cache domain as the ID. * first online CPU of the cache domain as the ID.
*/ */
if (perf_cpu_map__empty(cpu_map)) if (perf_cpu_map__has_any_cpu_or_is_empty(cpu_map))
id = cpu.cpu; id = cpu.cpu;
else else
id = perf_cpu_map__cpu(cpu_map, 0).cpu; id = perf_cpu_map__cpu(cpu_map, 0).cpu;
@ -1622,7 +1622,7 @@ static int perf_stat_init_aggr_mode(void)
* taking the highest cpu number to be the size of * taking the highest cpu number to be the size of
* the aggregation translate cpumap. * the aggregation translate cpumap.
*/ */
if (!perf_cpu_map__empty(evsel_list->core.user_requested_cpus)) if (!perf_cpu_map__has_any_cpu_or_is_empty(evsel_list->core.user_requested_cpus))
nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
else else
nr = 0; nr = 0;
@ -2289,7 +2289,7 @@ int process_stat_config_event(struct perf_session *session,
perf_event__read_stat_config(&stat_config, &event->stat_config); perf_event__read_stat_config(&stat_config, &event->stat_config);
if (perf_cpu_map__empty(st->cpus)) { if (perf_cpu_map__has_any_cpu_or_is_empty(st->cpus)) {
if (st->aggr_mode != AGGR_UNSET) if (st->aggr_mode != AGGR_UNSET)
pr_warning("warning: processing task data, aggregation mode not set\n"); pr_warning("warning: processing task data, aggregation mode not set\n");
} else if (st->aggr_mode != AGGR_UNSET) { } else if (st->aggr_mode != AGGR_UNSET) {

View File

@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
struct evlist *evlist, struct evlist *evlist,
struct evsel *evsel, int idx) struct evsel *evsel, int idx)
{ {
bool per_cpu = !perf_cpu_map__empty(evlist->core.user_requested_cpus); bool per_cpu = !perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus);
mp->mmap_needed = evsel->needs_auxtrace_mmap; mp->mmap_needed = evsel->needs_auxtrace_mmap;
@ -648,7 +648,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
static int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx) static int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
{ {
bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus); bool per_cpu_mmaps = !perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus);
if (per_cpu_mmaps) { if (per_cpu_mmaps) {
struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx); struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);

View File

@ -237,7 +237,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
evsel = evlist__last(temp_evlist); evsel = evlist__last(temp_evlist);
if (!evlist || perf_cpu_map__empty(evlist->core.user_requested_cpus)) { if (!evlist || perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus)) {
struct perf_cpu_map *cpus = perf_cpu_map__new(NULL); struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
if (cpus) if (cpus)

View File

@ -315,7 +315,7 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
if (!counter->per_pkg) if (!counter->per_pkg)
return 0; return 0;
if (perf_cpu_map__empty(cpus)) if (perf_cpu_map__has_any_cpu_or_is_empty(cpus))
return 0; return 0;
if (!mask) { if (!mask) {