Perf events fixes left over from the v6.11 cycle:
- Fix energy-pkg event enumeration on certain AMD CPUs - Set up the LBR branch stack for BPF counting events too Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmbquN0RHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1gE7A//QEpiJJ+I979t+cJNcleXZ97fKYRn59z6 Vkal94GTUDsaB09sgYo2RnjUWcIO8tSeeAIBjtCKgSwPwvXT1MxWVmQd/11vIpoo gLGKmUwuphtOMBvsWSof8j7QLGioUA/4AAnvVr3N6IS059c03NquCKONwoSShNII OOBENo/+AYywHkIhPSZw50bnNjLCQJ2gtcYFgsYwVKzbRHQqi3uNT42zwIIYAO14 f4cWuRqG/ZTvGXUMZCQhvuFPQehK4sTTFxs3rRMfKA2+wwDf5f8rhduzzIjGF8cE zcoAXJbTBmCeQLyHfUMwuTLCIK4SAsTICJQVIm+ZmwuFZbxeH3PlLYJZ3YZjrR06 uZi6+Lg2fXWOn/IQ3nWceE+qO7AdJOkWjOOQIh2GmXUR3UgVlHlS2BgZICssvFue qB5dvHYO6kJgWF4c5uKCF4A6YRlPLv5pUegnKojXrnL6CJ/JUh7qkhfKEYhxUKNE IOwUXci5XnGO4E/l9xZ+yW/I2F8sfjp49mvS+Myqb/fDJrb4p5AGopLPW+LuRcsy G5ByYNcjFpgHZ/xDsiE/N6+FbRwSzQ6/VizAu0aW3XpLQ5amOcXD0H+NjZvucof9 OoxOaQrmwfQxmFF72cbkNOtiiDT49GP2C80wuDBa0meGfrR0s8VasEdFEF1pfJx4 2UptHzXzzTQ= =BxbU -----END PGP SIGNATURE----- Merge tag 'perf-urgent-2024-09-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull perf event fixes from Ingo Molnar: "Left over from the v6.11 cycle: - Fix energy-pkg event enumeration on certain AMD CPUs - Set up the LBR branch stack for BPF counting events too" * tag 'perf-urgent-2024-09-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86/intel: Allow to setup LBR for counting event for BPF perf/x86/rapl: Fix the energy-pkg event for AMD CPUs
This commit is contained in:
commit
941c122da5
@ -3972,8 +3972,12 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
||||
x86_pmu.pebs_aliases(event);
|
||||
}
|
||||
|
||||
if (needs_branch_stack(event) && is_sampling_event(event))
|
||||
event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK;
|
||||
if (needs_branch_stack(event)) {
|
||||
/* Avoid branch stack setup for counting events in SAMPLE READ */
|
||||
if (is_sampling_event(event) ||
|
||||
!(event->attr.sample_type & PERF_SAMPLE_READ))
|
||||
event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK;
|
||||
}
|
||||
|
||||
if (branch_sample_counters(event)) {
|
||||
struct perf_event *leader, *sibling;
|
||||
|
@ -103,6 +103,19 @@ static struct perf_pmu_events_attr event_attr_##v = { \
|
||||
.event_str = str, \
|
||||
};
|
||||
|
||||
/*
|
||||
* RAPL Package energy counter scope:
|
||||
* 1. AMD/HYGON platforms have a per-PKG package energy counter
|
||||
* 2. For Intel platforms
|
||||
* 2.1. CLX-AP is multi-die and its RAPL MSRs are die-scope
|
||||
* 2.2. Other Intel platforms are single die systems so the scope can be
|
||||
* considered as either pkg-scope or die-scope, and we are considering
|
||||
* them as die-scope.
|
||||
*/
|
||||
#define rapl_pmu_is_pkg_scope() \
|
||||
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD || \
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
|
||||
|
||||
struct rapl_pmu {
|
||||
raw_spinlock_t lock;
|
||||
int n_active;
|
||||
@ -140,9 +153,25 @@ static unsigned int rapl_cntr_mask;
|
||||
static u64 rapl_timer_ms;
|
||||
static struct perf_msr *rapl_msrs;
|
||||
|
||||
/*
|
||||
* Helper functions to get the correct topology macros according to the
|
||||
* RAPL PMU scope.
|
||||
*/
|
||||
static inline unsigned int get_rapl_pmu_idx(int cpu)
|
||||
{
|
||||
return rapl_pmu_is_pkg_scope() ? topology_logical_package_id(cpu) :
|
||||
topology_logical_die_id(cpu);
|
||||
}
|
||||
|
||||
static inline const struct cpumask *get_rapl_pmu_cpumask(int cpu)
|
||||
{
|
||||
return rapl_pmu_is_pkg_scope() ? topology_core_cpumask(cpu) :
|
||||
topology_die_cpumask(cpu);
|
||||
}
|
||||
|
||||
static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
|
||||
{
|
||||
unsigned int rapl_pmu_idx = topology_logical_die_id(cpu);
|
||||
unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
|
||||
|
||||
/*
|
||||
* The unsigned check also catches the '-1' return value for non
|
||||
@ -552,7 +581,7 @@ static int rapl_cpu_offline(unsigned int cpu)
|
||||
|
||||
pmu->cpu = -1;
|
||||
/* Find a new cpu to collect rapl events */
|
||||
target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
|
||||
target = cpumask_any_but(get_rapl_pmu_cpumask(cpu), cpu);
|
||||
|
||||
/* Migrate rapl events to the new target */
|
||||
if (target < nr_cpu_ids) {
|
||||
@ -565,6 +594,11 @@ static int rapl_cpu_offline(unsigned int cpu)
|
||||
|
||||
static int rapl_cpu_online(unsigned int cpu)
|
||||
{
|
||||
s32 rapl_pmu_idx = get_rapl_pmu_idx(cpu);
|
||||
if (rapl_pmu_idx < 0) {
|
||||
pr_err("topology_logical_(package/die)_id() returned a negative value");
|
||||
return -EINVAL;
|
||||
}
|
||||
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
||||
int target;
|
||||
|
||||
@ -579,14 +613,14 @@ static int rapl_cpu_online(unsigned int cpu)
|
||||
pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
|
||||
rapl_hrtimer_init(pmu);
|
||||
|
||||
rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu;
|
||||
rapl_pmus->pmus[rapl_pmu_idx] = pmu;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if there is an online cpu in the package which collects rapl
|
||||
* events already.
|
||||
*/
|
||||
target = cpumask_any_and(&rapl_cpu_mask, topology_die_cpumask(cpu));
|
||||
target = cpumask_any_and(&rapl_cpu_mask, get_rapl_pmu_cpumask(cpu));
|
||||
if (target < nr_cpu_ids)
|
||||
return 0;
|
||||
|
||||
@ -675,7 +709,10 @@ static const struct attribute_group *rapl_attr_update[] = {
|
||||
|
||||
static int __init init_rapl_pmus(void)
|
||||
{
|
||||
int nr_rapl_pmu = topology_max_packages() * topology_max_dies_per_package();
|
||||
int nr_rapl_pmu = topology_max_packages();
|
||||
|
||||
if (!rapl_pmu_is_pkg_scope())
|
||||
nr_rapl_pmu *= topology_max_dies_per_package();
|
||||
|
||||
rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, nr_rapl_pmu), GFP_KERNEL);
|
||||
if (!rapl_pmus)
|
||||
|
Loading…
Reference in New Issue
Block a user