2019-05-19 05:08:20 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2013-11-12 09:58:50 -07:00
|
|
|
/*
|
2020-05-27 15:46:55 -07:00
|
|
|
* Support Intel/AMD RAPL energy consumption counters
|
2013-11-12 09:58:50 -07:00
|
|
|
* Copyright (C) 2013 Google, Inc., Stephane Eranian
|
|
|
|
*
|
|
|
|
* Intel RAPL interface is specified in the IA-32 Manual Vol3b
|
|
|
|
* section 14.7.1 (September 2013)
|
|
|
|
*
|
2020-05-27 15:46:55 -07:00
|
|
|
* AMD RAPL interface for Fam17h is described in the public PPR:
|
|
|
|
* https://bugzilla.kernel.org/show_bug.cgi?id=206537
|
|
|
|
*
|
2013-11-12 09:58:50 -07:00
|
|
|
* RAPL provides more controls than just reporting energy consumption
|
|
|
|
* however here we only expose the 3 energy consumption free running
|
|
|
|
* counters (pp0, pkg, dram).
|
|
|
|
*
|
|
|
|
* Each of those counters increments in a power unit defined by the
|
|
|
|
* RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules
|
|
|
|
* but it can vary.
|
|
|
|
*
|
|
|
|
* Counter to rapl events mappings:
|
|
|
|
*
|
|
|
|
* pp0 counter: consumption of all physical cores (power plane 0)
|
|
|
|
* event: rapl_energy_cores
|
|
|
|
* perf code: 0x1
|
|
|
|
*
|
|
|
|
* pkg counter: consumption of the whole processor package
|
|
|
|
* event: rapl_energy_pkg
|
|
|
|
* perf code: 0x2
|
|
|
|
*
|
|
|
|
* dram counter: consumption of the dram domain (servers only)
|
|
|
|
* event: rapl_energy_dram
|
|
|
|
* perf code: 0x3
|
|
|
|
*
|
2016-04-17 15:03:00 -07:00
|
|
|
* gpu counter: consumption of the builtin-gpu domain (client only)
|
2014-01-08 03:15:53 -07:00
|
|
|
* event: rapl_energy_gpu
|
|
|
|
* perf code: 0x4
|
|
|
|
*
|
2016-04-17 15:03:00 -07:00
|
|
|
* psys counter: consumption of the builtin-psys domain (client only)
|
|
|
|
* event: rapl_energy_psys
|
|
|
|
* perf code: 0x5
|
|
|
|
*
|
2013-11-12 09:58:50 -07:00
|
|
|
* We manage those counters as free running (read-only). They may be
|
|
|
|
* use simultaneously by other tools, such as turbostat.
|
|
|
|
*
|
|
|
|
* The events only support system-wide mode counting. There is no
|
|
|
|
* sampling support because it does not make sense and is not
|
|
|
|
* supported by the RAPL hardware.
|
|
|
|
*
|
|
|
|
* Because we want to avoid floating-point operations in the kernel,
|
|
|
|
* the events are all reported in fixed point arithmetic (32.32).
|
|
|
|
* Tools must adjust the counts to convert them to Watts using
|
|
|
|
* the duration of the measurement. Tools may use a function such as
|
|
|
|
* ldexp(raw_count, -32);
|
|
|
|
*/
|
2016-02-22 15:19:23 -07:00
|
|
|
|
|
|
|
#define pr_fmt(fmt) "RAPL PMU: " fmt
|
|
|
|
|
2013-11-12 09:58:50 -07:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/perf_event.h>
|
2019-06-16 07:03:56 -07:00
|
|
|
#include <linux/nospec.h>
|
2013-11-12 09:58:50 -07:00
|
|
|
#include <asm/cpu_device_id.h>
|
2016-06-02 17:19:30 -07:00
|
|
|
#include <asm/intel-family.h>
|
2020-05-27 15:46:55 -07:00
|
|
|
#include "perf_event.h"
|
|
|
|
#include "probe.h"
|
2013-11-12 09:58:50 -07:00
|
|
|
|
2024-05-30 13:12:03 -07:00
|
|
|
MODULE_DESCRIPTION("Support Intel/AMD RAPL energy consumption counters");
|
2016-03-19 00:20:50 -07:00
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
|
2013-11-12 09:58:50 -07:00
|
|
|
/*
|
|
|
|
* RAPL energy status counters
|
|
|
|
*/
|
2019-06-16 07:03:54 -07:00
|
|
|
enum perf_rapl_events {
|
|
|
|
PERF_RAPL_PP0 = 0, /* all cores */
|
|
|
|
PERF_RAPL_PKG, /* entire package */
|
|
|
|
PERF_RAPL_RAM, /* DRAM */
|
|
|
|
PERF_RAPL_PP1, /* gpu */
|
|
|
|
PERF_RAPL_PSYS, /* psys */
|
|
|
|
|
|
|
|
PERF_RAPL_MAX,
|
2019-06-16 07:03:56 -07:00
|
|
|
NR_RAPL_DOMAINS = PERF_RAPL_MAX,
|
2019-06-16 07:03:54 -07:00
|
|
|
};
|
|
|
|
|
2015-11-30 10:48:42 -07:00
|
|
|
static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
|
2015-03-26 14:28:45 -07:00
|
|
|
"pp0-core",
|
|
|
|
"package",
|
|
|
|
"dram",
|
|
|
|
"pp1-gpu",
|
2016-04-17 15:03:00 -07:00
|
|
|
"psys",
|
2015-03-26 14:28:45 -07:00
|
|
|
};
|
|
|
|
|
2013-11-12 09:58:50 -07:00
|
|
|
/*
|
|
|
|
* event code: LSB 8 bits, passed in attr->config
|
|
|
|
* any other bit is reserved
|
|
|
|
*/
|
|
|
|
#define RAPL_EVENT_MASK 0xFFULL
|
2016-02-22 15:19:24 -07:00
|
|
|
#define RAPL_CNTR_WIDTH 32
|
2013-11-12 09:58:50 -07:00
|
|
|
|
2015-12-04 03:07:41 -07:00
|
|
|
#define RAPL_EVENT_ATTR_STR(_name, v, str) \
|
|
|
|
static struct perf_pmu_events_attr event_attr_##v = { \
|
|
|
|
.attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
|
|
|
|
.id = 0, \
|
|
|
|
.event_str = str, \
|
2015-01-13 15:59:53 -07:00
|
|
|
};
|
|
|
|
|
perf/x86/rapl: Fix the energy-pkg event for AMD CPUs
After commit:
63edbaa48a57 ("x86/cpu/topology: Add support for the AMD 0x80000026 leaf")
... on AMD processors that support extended CPUID leaf 0x80000026, the
topology_die_cpumask() and topology_logical_die_id() macros no longer
return the package cpumask and package ID, instead they return the CCD
(Core Complex Die) mask and ID respectively.
This leads to the energy-pkg event scope to be modified to CCD instead of package.
So, change the PMU scope for AMD and Hygon back to package.
On a 12 CCD 1 Package AMD Zen4 Genoa machine:
Before:
$ cat /sys/devices/power/cpumask
0,8,16,24,32,40,48,56,64,72,80,88.
The expected cpumask here is supposed to be just "0", as it is a package
scope event, only one CPU will be collecting the event for all the CPUs in
the package.
After:
$ cat /sys/devices/power/cpumask
0
[ mingo: Cleaned up the changelog ]
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Link: https://lore.kernel.org/r/20240904100934.3260-1-Dhananjay.Ugwekar@amd.com
2024-07-29 21:49:18 -07:00
|
|
|
/*
|
|
|
|
* RAPL Package energy counter scope:
|
|
|
|
* 1. AMD/HYGON platforms have a per-PKG package energy counter
|
|
|
|
* 2. For Intel platforms
|
|
|
|
* 2.1. CLX-AP is multi-die and its RAPL MSRs are die-scope
|
|
|
|
* 2.2. Other Intel platforms are single die systems so the scope can be
|
|
|
|
* considered as either pkg-scope or die-scope, and we are considering
|
|
|
|
* them as die-scope.
|
|
|
|
*/
|
|
|
|
#define rapl_pmu_is_pkg_scope() \
|
|
|
|
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD || \
|
|
|
|
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
|
|
|
|
|
2013-11-12 09:58:50 -07:00
|
|
|
struct rapl_pmu {
|
2016-02-22 15:19:25 -07:00
|
|
|
raw_spinlock_t lock;
|
2016-02-22 15:19:24 -07:00
|
|
|
int n_active;
|
2016-02-22 15:19:25 -07:00
|
|
|
int cpu;
|
2016-02-22 15:19:24 -07:00
|
|
|
struct list_head active_list;
|
|
|
|
struct pmu *pmu;
|
|
|
|
ktime_t timer_interval;
|
|
|
|
struct hrtimer hrtimer;
|
2013-11-12 09:58:50 -07:00
|
|
|
};
|
|
|
|
|
2016-02-22 15:19:26 -07:00
|
|
|
struct rapl_pmus {
|
|
|
|
struct pmu pmu;
|
2024-05-02 02:51:14 -07:00
|
|
|
unsigned int nr_rapl_pmu;
|
|
|
|
struct rapl_pmu *pmus[] __counted_by(nr_rapl_pmu);
|
2016-02-22 15:19:26 -07:00
|
|
|
};
|
|
|
|
|
2020-08-11 08:31:48 -07:00
|
|
|
enum rapl_unit_quirk {
|
|
|
|
RAPL_UNIT_QUIRK_NONE,
|
|
|
|
RAPL_UNIT_QUIRK_INTEL_HSW,
|
2020-08-11 08:31:49 -07:00
|
|
|
RAPL_UNIT_QUIRK_INTEL_SPR,
|
2020-08-11 08:31:48 -07:00
|
|
|
};
|
|
|
|
|
2019-06-16 07:03:54 -07:00
|
|
|
struct rapl_model {
|
2020-05-27 15:46:56 -07:00
|
|
|
struct perf_msr *rapl_msrs;
|
2019-06-16 07:03:54 -07:00
|
|
|
unsigned long events;
|
2020-05-27 15:46:56 -07:00
|
|
|
unsigned int msr_power_unit;
|
2020-08-11 08:31:48 -07:00
|
|
|
enum rapl_unit_quirk unit_quirk;
|
2019-06-16 07:03:54 -07:00
|
|
|
};
|
|
|
|
|
2016-02-22 15:19:24 -07:00
|
|
|
/* 1/2^hw_unit Joule */
|
|
|
|
static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly;
|
2016-02-22 15:19:26 -07:00
|
|
|
static struct rapl_pmus *rapl_pmus;
|
2013-11-12 09:58:50 -07:00
|
|
|
static cpumask_t rapl_cpu_mask;
|
2016-02-22 15:19:26 -07:00
|
|
|
static unsigned int rapl_cntr_mask;
|
2016-02-22 15:19:22 -07:00
|
|
|
static u64 rapl_timer_ms;
|
2020-05-27 15:46:56 -07:00
|
|
|
static struct perf_msr *rapl_msrs;
|
2013-11-12 09:58:50 -07:00
|
|
|
|
perf/x86/rapl: Fix the energy-pkg event for AMD CPUs
After commit:
63edbaa48a57 ("x86/cpu/topology: Add support for the AMD 0x80000026 leaf")
... on AMD processors that support extended CPUID leaf 0x80000026, the
topology_die_cpumask() and topology_logical_die_id() macros no longer
return the package cpumask and package ID, instead they return the CCD
(Core Complex Die) mask and ID respectively.
This leads to the energy-pkg event scope to be modified to CCD instead of package.
So, change the PMU scope for AMD and Hygon back to package.
On a 12 CCD 1 Package AMD Zen4 Genoa machine:
Before:
$ cat /sys/devices/power/cpumask
0,8,16,24,32,40,48,56,64,72,80,88.
The expected cpumask here is supposed to be just "0", as it is a package
scope event, only one CPU will be collecting the event for all the CPUs in
the package.
After:
$ cat /sys/devices/power/cpumask
0
[ mingo: Cleaned up the changelog ]
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Link: https://lore.kernel.org/r/20240904100934.3260-1-Dhananjay.Ugwekar@amd.com
2024-07-29 21:49:18 -07:00
|
|
|
/*
|
|
|
|
* Helper functions to get the correct topology macros according to the
|
|
|
|
* RAPL PMU scope.
|
|
|
|
*/
|
|
|
|
static inline unsigned int get_rapl_pmu_idx(int cpu)
|
|
|
|
{
|
|
|
|
return rapl_pmu_is_pkg_scope() ? topology_logical_package_id(cpu) :
|
|
|
|
topology_logical_die_id(cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline const struct cpumask *get_rapl_pmu_cpumask(int cpu)
|
|
|
|
{
|
|
|
|
return rapl_pmu_is_pkg_scope() ? topology_core_cpumask(cpu) :
|
|
|
|
topology_die_cpumask(cpu);
|
|
|
|
}
|
|
|
|
|
2016-02-22 15:19:26 -07:00
|
|
|
static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
|
|
|
|
{
|
perf/x86/rapl: Fix the energy-pkg event for AMD CPUs
After commit:
63edbaa48a57 ("x86/cpu/topology: Add support for the AMD 0x80000026 leaf")
... on AMD processors that support extended CPUID leaf 0x80000026, the
topology_die_cpumask() and topology_logical_die_id() macros no longer
return the package cpumask and package ID, instead they return the CCD
(Core Complex Die) mask and ID respectively.
This leads to the energy-pkg event scope to be modified to CCD instead of package.
So, change the PMU scope for AMD and Hygon back to package.
On a 12 CCD 1 Package AMD Zen4 Genoa machine:
Before:
$ cat /sys/devices/power/cpumask
0,8,16,24,32,40,48,56,64,72,80,88.
The expected cpumask here is supposed to be just "0", as it is a package
scope event, only one CPU will be collecting the event for all the CPUs in
the package.
After:
$ cat /sys/devices/power/cpumask
0
[ mingo: Cleaned up the changelog ]
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Link: https://lore.kernel.org/r/20240904100934.3260-1-Dhananjay.Ugwekar@amd.com
2024-07-29 21:49:18 -07:00
|
|
|
unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
|
2017-01-31 15:58:38 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The unsigned check also catches the '-1' return value for non
|
|
|
|
* existent mappings in the topology map.
|
|
|
|
*/
|
2024-05-02 02:51:14 -07:00
|
|
|
return rapl_pmu_idx < rapl_pmus->nr_rapl_pmu ? rapl_pmus->pmus[rapl_pmu_idx] : NULL;
|
2016-02-22 15:19:26 -07:00
|
|
|
}
|
2013-11-12 09:58:50 -07:00
|
|
|
|
|
|
|
static inline u64 rapl_read_counter(struct perf_event *event)
|
|
|
|
{
|
|
|
|
u64 raw;
|
|
|
|
rdmsrl(event->hw.event_base, raw);
|
|
|
|
return raw;
|
|
|
|
}
|
|
|
|
|
2015-03-26 14:28:45 -07:00
|
|
|
static inline u64 rapl_scale(u64 v, int cfg)
|
2013-11-12 09:58:50 -07:00
|
|
|
{
|
2015-03-26 14:28:45 -07:00
|
|
|
if (cfg > NR_RAPL_DOMAINS) {
|
2016-02-22 15:19:23 -07:00
|
|
|
pr_warn("Invalid domain %d, failed to scale data\n", cfg);
|
2015-03-26 14:28:45 -07:00
|
|
|
return v;
|
|
|
|
}
|
2013-11-12 09:58:50 -07:00
|
|
|
/*
|
|
|
|
* scale delta to smallest unit (1/2^32)
|
|
|
|
* users must then scale back: count * 1/(1e9*2^32) to get Joules
|
|
|
|
* or use ldexp(count, -32).
|
|
|
|
* Watts = Joules/Time delta
|
|
|
|
*/
|
2015-03-26 14:28:45 -07:00
|
|
|
return v << (32 - rapl_hw_unit[cfg - 1]);
|
2013-11-12 09:58:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static u64 rapl_event_update(struct perf_event *event)
|
|
|
|
{
|
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
u64 prev_raw_count, new_raw_count;
|
|
|
|
s64 delta, sdelta;
|
|
|
|
int shift = RAPL_CNTR_WIDTH;
|
|
|
|
|
|
|
|
prev_raw_count = local64_read(&hwc->prev_count);
|
2023-08-07 07:51:15 -07:00
|
|
|
do {
|
|
|
|
rdmsrl(event->hw.event_base, new_raw_count);
|
|
|
|
} while (!local64_try_cmpxchg(&hwc->prev_count,
|
|
|
|
&prev_raw_count, new_raw_count));
|
2013-11-12 09:58:50 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now we have the new raw value and have updated the prev
|
|
|
|
* timestamp already. We can now calculate the elapsed delta
|
|
|
|
* (event-)time and add that to the generic event.
|
|
|
|
*
|
|
|
|
* Careful, not all hw sign-extends above the physical width
|
|
|
|
* of the count.
|
|
|
|
*/
|
|
|
|
delta = (new_raw_count << shift) - (prev_raw_count << shift);
|
|
|
|
delta >>= shift;
|
|
|
|
|
2015-03-26 14:28:45 -07:00
|
|
|
sdelta = rapl_scale(delta, event->hw.config);
|
2013-11-12 09:58:50 -07:00
|
|
|
|
|
|
|
local64_add(sdelta, &event->count);
|
|
|
|
|
|
|
|
return new_raw_count;
|
|
|
|
}
|
|
|
|
|
2013-11-12 09:58:51 -07:00
|
|
|
static void rapl_start_hrtimer(struct rapl_pmu *pmu)
|
|
|
|
{
|
2015-04-14 14:09:00 -07:00
|
|
|
hrtimer_start(&pmu->hrtimer, pmu->timer_interval,
|
|
|
|
HRTIMER_MODE_REL_PINNED);
|
2013-11-12 09:58:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
|
|
|
|
{
|
2016-02-22 15:19:25 -07:00
|
|
|
struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
|
2013-11-12 09:58:51 -07:00
|
|
|
struct perf_event *event;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!pmu->n_active)
|
|
|
|
return HRTIMER_NORESTART;
|
|
|
|
|
2016-02-22 15:19:25 -07:00
|
|
|
raw_spin_lock_irqsave(&pmu->lock, flags);
|
2013-11-12 09:58:51 -07:00
|
|
|
|
2016-02-22 15:19:24 -07:00
|
|
|
list_for_each_entry(event, &pmu->active_list, active_entry)
|
2013-11-12 09:58:51 -07:00
|
|
|
rapl_event_update(event);
|
|
|
|
|
2016-02-22 15:19:25 -07:00
|
|
|
raw_spin_unlock_irqrestore(&pmu->lock, flags);
|
2013-11-12 09:58:51 -07:00
|
|
|
|
|
|
|
hrtimer_forward_now(hrtimer, pmu->timer_interval);
|
|
|
|
|
|
|
|
return HRTIMER_RESTART;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rapl_hrtimer_init(struct rapl_pmu *pmu)
|
|
|
|
{
|
|
|
|
struct hrtimer *hr = &pmu->hrtimer;
|
|
|
|
|
|
|
|
hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
|
|
|
hr->function = rapl_hrtimer_handle;
|
|
|
|
}
|
|
|
|
|
2013-11-12 09:58:50 -07:00
|
|
|
static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
|
|
|
|
struct perf_event *event)
|
|
|
|
{
|
|
|
|
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
event->hw.state = 0;
|
|
|
|
|
|
|
|
list_add_tail(&event->active_entry, &pmu->active_list);
|
|
|
|
|
|
|
|
local64_set(&event->hw.prev_count, rapl_read_counter(event));
|
|
|
|
|
|
|
|
pmu->n_active++;
|
2013-11-12 09:58:51 -07:00
|
|
|
if (pmu->n_active == 1)
|
|
|
|
rapl_start_hrtimer(pmu);
|
2013-11-12 09:58:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void rapl_pmu_event_start(struct perf_event *event, int mode)
|
|
|
|
{
|
2016-02-22 15:19:25 -07:00
|
|
|
struct rapl_pmu *pmu = event->pmu_private;
|
2013-11-12 09:58:50 -07:00
|
|
|
unsigned long flags;
|
|
|
|
|
2016-02-22 15:19:25 -07:00
|
|
|
raw_spin_lock_irqsave(&pmu->lock, flags);
|
2013-11-12 09:58:50 -07:00
|
|
|
__rapl_pmu_event_start(pmu, event);
|
2016-02-22 15:19:25 -07:00
|
|
|
raw_spin_unlock_irqrestore(&pmu->lock, flags);
|
2013-11-12 09:58:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void rapl_pmu_event_stop(struct perf_event *event, int mode)
|
|
|
|
{
|
2016-02-22 15:19:25 -07:00
|
|
|
struct rapl_pmu *pmu = event->pmu_private;
|
2013-11-12 09:58:50 -07:00
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2016-02-22 15:19:25 -07:00
|
|
|
raw_spin_lock_irqsave(&pmu->lock, flags);
|
2013-11-12 09:58:50 -07:00
|
|
|
|
|
|
|
/* mark event as deactivated and stopped */
|
|
|
|
if (!(hwc->state & PERF_HES_STOPPED)) {
|
|
|
|
WARN_ON_ONCE(pmu->n_active <= 0);
|
|
|
|
pmu->n_active--;
|
2013-11-12 09:58:51 -07:00
|
|
|
if (pmu->n_active == 0)
|
2016-02-22 15:19:24 -07:00
|
|
|
hrtimer_cancel(&pmu->hrtimer);
|
2013-11-12 09:58:50 -07:00
|
|
|
|
|
|
|
list_del(&event->active_entry);
|
|
|
|
|
|
|
|
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
|
|
|
|
hwc->state |= PERF_HES_STOPPED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check if update of sw counter is necessary */
|
|
|
|
if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
|
|
|
|
/*
|
|
|
|
* Drain the remaining delta count out of a event
|
|
|
|
* that we are disabling:
|
|
|
|
*/
|
|
|
|
rapl_event_update(event);
|
|
|
|
hwc->state |= PERF_HES_UPTODATE;
|
|
|
|
}
|
|
|
|
|
2016-02-22 15:19:25 -07:00
|
|
|
raw_spin_unlock_irqrestore(&pmu->lock, flags);
|
2013-11-12 09:58:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int rapl_pmu_event_add(struct perf_event *event, int mode)
|
|
|
|
{
|
2016-02-22 15:19:25 -07:00
|
|
|
struct rapl_pmu *pmu = event->pmu_private;
|
2013-11-12 09:58:50 -07:00
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2016-02-22 15:19:25 -07:00
|
|
|
raw_spin_lock_irqsave(&pmu->lock, flags);
|
2013-11-12 09:58:50 -07:00
|
|
|
|
|
|
|
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
|
|
|
|
|
|
|
|
if (mode & PERF_EF_START)
|
|
|
|
__rapl_pmu_event_start(pmu, event);
|
|
|
|
|
2016-02-22 15:19:25 -07:00
|
|
|
raw_spin_unlock_irqrestore(&pmu->lock, flags);
|
2013-11-12 09:58:50 -07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rapl_pmu_event_del(struct perf_event *event, int flags)
|
|
|
|
{
|
|
|
|
rapl_pmu_event_stop(event, PERF_EF_UPDATE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rapl_pmu_event_init(struct perf_event *event)
|
|
|
|
{
|
|
|
|
u64 cfg = event->attr.config & RAPL_EVENT_MASK;
|
2019-06-16 07:03:56 -07:00
|
|
|
int bit, ret = 0;
|
2016-02-22 15:19:26 -07:00
|
|
|
struct rapl_pmu *pmu;
|
2013-11-12 09:58:50 -07:00
|
|
|
|
|
|
|
/* only look at RAPL events */
|
2016-02-22 15:19:26 -07:00
|
|
|
if (event->attr.type != rapl_pmus->pmu.type)
|
2013-11-12 09:58:50 -07:00
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
/* check only supported bits are set */
|
|
|
|
if (event->attr.config & ~RAPL_EVENT_MASK)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-02-22 15:19:25 -07:00
|
|
|
if (event->cpu < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2016-08-17 13:55:07 -07:00
|
|
|
event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
|
|
|
|
|
2019-06-16 07:03:56 -07:00
|
|
|
if (!cfg || cfg >= NR_RAPL_DOMAINS + 1)
|
2013-11-12 09:58:50 -07:00
|
|
|
return -EINVAL;
|
2019-06-16 07:03:56 -07:00
|
|
|
|
|
|
|
cfg = array_index_nospec((long)cfg, NR_RAPL_DOMAINS + 1);
|
|
|
|
bit = cfg - 1;
|
|
|
|
|
2013-11-12 09:58:50 -07:00
|
|
|
/* check event supported */
|
|
|
|
if (!(rapl_cntr_mask & (1 << bit)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* unsupported modes and filters */
|
2019-01-10 06:53:32 -07:00
|
|
|
if (event->attr.sample_period) /* no sampling */
|
2013-11-12 09:58:50 -07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* must be done before validate_group */
|
2016-02-22 15:19:26 -07:00
|
|
|
pmu = cpu_to_rapl_pmu(event->cpu);
|
2017-01-31 15:58:38 -07:00
|
|
|
if (!pmu)
|
|
|
|
return -EINVAL;
|
2016-02-22 15:19:25 -07:00
|
|
|
event->cpu = pmu->cpu;
|
|
|
|
event->pmu_private = pmu;
|
2019-06-16 07:03:56 -07:00
|
|
|
event->hw.event_base = rapl_msrs[bit].msr;
|
2013-11-12 09:58:50 -07:00
|
|
|
event->hw.config = cfg;
|
|
|
|
event->hw.idx = bit;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rapl_pmu_event_read(struct perf_event *event)
|
|
|
|
{
|
|
|
|
rapl_event_update(event);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t rapl_get_attr_cpumask(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
2014-09-30 06:48:22 -07:00
|
|
|
return cpumap_print_to_pagebuf(true, buf, &rapl_cpu_mask);
|
2013-11-12 09:58:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL);
|
|
|
|
|
|
|
|
static struct attribute *rapl_pmu_attrs[] = {
|
|
|
|
&dev_attr_cpumask.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group rapl_pmu_attr_group = {
|
|
|
|
.attrs = rapl_pmu_attrs,
|
|
|
|
};
|
|
|
|
|
2015-01-13 15:59:53 -07:00
|
|
|
RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
|
|
|
|
RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
|
|
|
|
RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
|
|
|
|
RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
|
2016-04-17 15:03:00 -07:00
|
|
|
RAPL_EVENT_ATTR_STR(energy-psys, rapl_psys, "event=0x05");
|
2013-11-12 09:58:50 -07:00
|
|
|
|
2015-01-13 15:59:53 -07:00
|
|
|
RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
|
|
|
|
RAPL_EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules");
|
|
|
|
RAPL_EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules");
|
|
|
|
RAPL_EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules");
|
2016-04-17 15:03:00 -07:00
|
|
|
RAPL_EVENT_ATTR_STR(energy-psys.unit, rapl_psys_unit, "Joules");
|
2013-11-12 09:58:50 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* we compute in 0.23 nJ increments regardless of MSR
|
|
|
|
*/
|
2015-01-13 15:59:53 -07:00
|
|
|
RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
|
|
|
|
RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10");
|
|
|
|
RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10");
|
|
|
|
RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10");
|
2016-04-17 15:03:00 -07:00
|
|
|
RAPL_EVENT_ATTR_STR(energy-psys.scale, rapl_psys_scale, "2.3283064365386962890625e-10");
|
2013-11-12 09:58:50 -07:00
|
|
|
|
2019-06-16 07:03:54 -07:00
|
|
|
/*
|
|
|
|
* There are no default events, but we need to create
|
|
|
|
* "events" group (with empty attrs) before updating
|
|
|
|
* it with detected events.
|
|
|
|
*/
|
|
|
|
static struct attribute *attrs_empty[] = {
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2013-11-12 09:58:50 -07:00
|
|
|
static struct attribute_group rapl_pmu_events_group = {
|
|
|
|
.name = "events",
|
2019-06-16 07:03:54 -07:00
|
|
|
.attrs = attrs_empty,
|
2013-11-12 09:58:50 -07:00
|
|
|
};
|
|
|
|
|
2020-11-13 11:31:26 -07:00
|
|
|
PMU_FORMAT_ATTR(event, "config:0-7");
|
2013-11-12 09:58:50 -07:00
|
|
|
static struct attribute *rapl_formats_attr[] = {
|
|
|
|
&format_attr_event.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group rapl_pmu_format_group = {
|
|
|
|
.name = "format",
|
|
|
|
.attrs = rapl_formats_attr,
|
|
|
|
};
|
|
|
|
|
2017-08-10 08:57:09 -07:00
|
|
|
static const struct attribute_group *rapl_attr_groups[] = {
|
2013-11-12 09:58:50 -07:00
|
|
|
&rapl_pmu_attr_group,
|
|
|
|
&rapl_pmu_format_group,
|
|
|
|
&rapl_pmu_events_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2019-06-16 07:03:54 -07:00
|
|
|
static struct attribute *rapl_events_cores[] = {
|
|
|
|
EVENT_PTR(rapl_cores),
|
|
|
|
EVENT_PTR(rapl_cores_unit),
|
|
|
|
EVENT_PTR(rapl_cores_scale),
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group rapl_events_cores_group = {
|
|
|
|
.name = "events",
|
|
|
|
.attrs = rapl_events_cores,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute *rapl_events_pkg[] = {
|
|
|
|
EVENT_PTR(rapl_pkg),
|
|
|
|
EVENT_PTR(rapl_pkg_unit),
|
|
|
|
EVENT_PTR(rapl_pkg_scale),
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group rapl_events_pkg_group = {
|
|
|
|
.name = "events",
|
|
|
|
.attrs = rapl_events_pkg,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute *rapl_events_ram[] = {
|
|
|
|
EVENT_PTR(rapl_ram),
|
|
|
|
EVENT_PTR(rapl_ram_unit),
|
|
|
|
EVENT_PTR(rapl_ram_scale),
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group rapl_events_ram_group = {
|
|
|
|
.name = "events",
|
|
|
|
.attrs = rapl_events_ram,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute *rapl_events_gpu[] = {
|
|
|
|
EVENT_PTR(rapl_gpu),
|
|
|
|
EVENT_PTR(rapl_gpu_unit),
|
|
|
|
EVENT_PTR(rapl_gpu_scale),
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group rapl_events_gpu_group = {
|
|
|
|
.name = "events",
|
|
|
|
.attrs = rapl_events_gpu,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute *rapl_events_psys[] = {
|
|
|
|
EVENT_PTR(rapl_psys),
|
|
|
|
EVENT_PTR(rapl_psys_unit),
|
|
|
|
EVENT_PTR(rapl_psys_scale),
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group rapl_events_psys_group = {
|
|
|
|
.name = "events",
|
|
|
|
.attrs = rapl_events_psys,
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool test_msr(int idx, void *data)
|
|
|
|
{
|
|
|
|
return test_bit(idx, (unsigned long *) data);
|
|
|
|
}
|
|
|
|
|
2021-02-04 09:18:15 -07:00
|
|
|
/* Only lower 32bits of the MSR represents the energy counter */
|
|
|
|
#define RAPL_MSR_MASK 0xFFFFFFFF
|
|
|
|
|
2020-05-27 15:46:56 -07:00
|
|
|
static struct perf_msr intel_rapl_msrs[] = {
|
2021-02-04 09:18:15 -07:00
|
|
|
[PERF_RAPL_PP0] = { MSR_PP0_ENERGY_STATUS, &rapl_events_cores_group, test_msr, false, RAPL_MSR_MASK },
|
|
|
|
[PERF_RAPL_PKG] = { MSR_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr, false, RAPL_MSR_MASK },
|
|
|
|
[PERF_RAPL_RAM] = { MSR_DRAM_ENERGY_STATUS, &rapl_events_ram_group, test_msr, false, RAPL_MSR_MASK },
|
|
|
|
[PERF_RAPL_PP1] = { MSR_PP1_ENERGY_STATUS, &rapl_events_gpu_group, test_msr, false, RAPL_MSR_MASK },
|
|
|
|
[PERF_RAPL_PSYS] = { MSR_PLATFORM_ENERGY_STATUS, &rapl_events_psys_group, test_msr, false, RAPL_MSR_MASK },
|
2019-06-16 07:03:54 -07:00
|
|
|
};
|
|
|
|
|
perf/x86/rapl: Fix psys-energy event on Intel SPR platform
There are several things special for the RAPL Psys energy counter, on
Intel Sapphire Rapids platform.
1. it contains one Psys master package, and only CPUs on the master
package can read valid value of the Psys energy counter, reading the
MSR on CPUs in the slave package returns 0.
2. The master package does not have to be Physical package 0. And when
all the CPUs on the Psys master package are offlined, we lose the Psys
energy counter, at runtime.
3. The Psys energy counter can be disabled by BIOS, while all the other
energy counters are not affected.
It is not easy to handle all of these in the current RAPL PMU design
because
a) perf_msr_probe() validates the MSR on some random CPU, which may either
be in the Psys master package or in the Psys slave package.
b) all the RAPL events share the same PMU, and there is not API to remove
the psys-energy event cleanly, without affecting the other events in
the same PMU.
This patch addresses the problems in a simple way.
First, by setting .no_check bit for RAPL Psys MSR, the psys-energy event
is always added, so we don't have to check the Psys ENERGY_STATUS MSR on
master package.
Then, by removing rapl_not_visible(), the psys-energy event is always
available in sysfs. This does not affect the previous code because, for
the RAPL MSRs with .no_check cleared, the .is_visible() callback is always
overriden in the perf_msr_probe() function.
Note, although RAPL PMU is die-based, and the Psys energy counter MSR on
Intel SPR is package scope, this is not a problem because there is only
one die in each package on SPR.
Signed-off-by: Zhang Rui <rui.zhang@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Link: https://lkml.kernel.org/r/20210204161816.12649-3-rui.zhang@intel.com
2021-02-04 09:18:16 -07:00
|
|
|
static struct perf_msr intel_rapl_spr_msrs[] = {
|
|
|
|
[PERF_RAPL_PP0] = { MSR_PP0_ENERGY_STATUS, &rapl_events_cores_group, test_msr, false, RAPL_MSR_MASK },
|
|
|
|
[PERF_RAPL_PKG] = { MSR_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr, false, RAPL_MSR_MASK },
|
|
|
|
[PERF_RAPL_RAM] = { MSR_DRAM_ENERGY_STATUS, &rapl_events_ram_group, test_msr, false, RAPL_MSR_MASK },
|
|
|
|
[PERF_RAPL_PP1] = { MSR_PP1_ENERGY_STATUS, &rapl_events_gpu_group, test_msr, false, RAPL_MSR_MASK },
|
|
|
|
[PERF_RAPL_PSYS] = { MSR_PLATFORM_ENERGY_STATUS, &rapl_events_psys_group, test_msr, true, RAPL_MSR_MASK },
|
|
|
|
};
|
|
|
|
|
2020-05-27 15:46:59 -07:00
|
|
|
/*
|
|
|
|
* Force to PERF_RAPL_MAX size due to:
|
|
|
|
* - perf_msr_probe(PERF_RAPL_MAX)
|
|
|
|
* - want to use same event codes across both architectures
|
|
|
|
*/
|
2022-01-05 11:56:59 -07:00
|
|
|
static struct perf_msr amd_rapl_msrs[] = {
|
2023-08-01 08:56:51 -07:00
|
|
|
[PERF_RAPL_PP0] = { 0, &rapl_events_cores_group, NULL, false, 0 },
|
2022-01-05 11:56:59 -07:00
|
|
|
[PERF_RAPL_PKG] = { MSR_AMD_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr, false, RAPL_MSR_MASK },
|
2023-08-01 08:56:51 -07:00
|
|
|
[PERF_RAPL_RAM] = { 0, &rapl_events_ram_group, NULL, false, 0 },
|
|
|
|
[PERF_RAPL_PP1] = { 0, &rapl_events_gpu_group, NULL, false, 0 },
|
|
|
|
[PERF_RAPL_PSYS] = { 0, &rapl_events_psys_group, NULL, false, 0 },
|
2020-05-27 15:46:59 -07:00
|
|
|
};
|
|
|
|
|
2016-07-13 10:16:15 -07:00
|
|
|
static int rapl_cpu_offline(unsigned int cpu)
|
2013-11-12 09:58:50 -07:00
|
|
|
{
|
2016-02-22 15:19:26 -07:00
|
|
|
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
|
|
|
int target;
|
2013-11-12 09:58:50 -07:00
|
|
|
|
2016-02-22 15:19:26 -07:00
|
|
|
/* Check if exiting cpu is used for collecting rapl events */
|
|
|
|
if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
|
2016-07-13 10:16:15 -07:00
|
|
|
return 0;
|
2013-11-12 09:58:50 -07:00
|
|
|
|
2016-02-22 15:19:26 -07:00
|
|
|
pmu->cpu = -1;
|
|
|
|
/* Find a new cpu to collect rapl events */
|
perf/x86/rapl: Fix the energy-pkg event for AMD CPUs
After commit:
63edbaa48a57 ("x86/cpu/topology: Add support for the AMD 0x80000026 leaf")
... on AMD processors that support extended CPUID leaf 0x80000026, the
topology_die_cpumask() and topology_logical_die_id() macros no longer
return the package cpumask and package ID, instead they return the CCD
(Core Complex Die) mask and ID respectively.
This leads to the energy-pkg event scope to be modified to CCD instead of package.
So, change the PMU scope for AMD and Hygon back to package.
On a 12 CCD 1 Package AMD Zen4 Genoa machine:
Before:
$ cat /sys/devices/power/cpumask
0,8,16,24,32,40,48,56,64,72,80,88.
The expected cpumask here is supposed to be just "0", as it is a package
scope event, only one CPU will be collecting the event for all the CPUs in
the package.
After:
$ cat /sys/devices/power/cpumask
0
[ mingo: Cleaned up the changelog ]
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Link: https://lore.kernel.org/r/20240904100934.3260-1-Dhananjay.Ugwekar@amd.com
2024-07-29 21:49:18 -07:00
|
|
|
target = cpumask_any_but(get_rapl_pmu_cpumask(cpu), cpu);
|
2013-11-12 09:58:51 -07:00
|
|
|
|
2016-02-22 15:19:26 -07:00
|
|
|
/* Migrate rapl events to the new target */
|
|
|
|
if (target < nr_cpu_ids) {
|
|
|
|
cpumask_set_cpu(target, &rapl_cpu_mask);
|
|
|
|
pmu->cpu = target;
|
|
|
|
perf_pmu_migrate_context(pmu->pmu, cpu, target);
|
|
|
|
}
|
2016-07-13 10:16:15 -07:00
|
|
|
return 0;
|
2013-11-12 09:58:50 -07:00
|
|
|
}
|
|
|
|
|
2016-07-13 10:16:15 -07:00
|
|
|
static int rapl_cpu_online(unsigned int cpu)
|
2013-11-12 09:58:50 -07:00
|
|
|
{
|
perf/x86/rapl: Fix the energy-pkg event for AMD CPUs
After commit:
63edbaa48a57 ("x86/cpu/topology: Add support for the AMD 0x80000026 leaf")
... on AMD processors that support extended CPUID leaf 0x80000026, the
topology_die_cpumask() and topology_logical_die_id() macros no longer
return the package cpumask and package ID, instead they return the CCD
(Core Complex Die) mask and ID respectively.
This leads to the energy-pkg event scope to be modified to CCD instead of package.
So, change the PMU scope for AMD and Hygon back to package.
On a 12 CCD 1 Package AMD Zen4 Genoa machine:
Before:
$ cat /sys/devices/power/cpumask
0,8,16,24,32,40,48,56,64,72,80,88.
The expected cpumask here is supposed to be just "0", as it is a package
scope event, only one CPU will be collecting the event for all the CPUs in
the package.
After:
$ cat /sys/devices/power/cpumask
0
[ mingo: Cleaned up the changelog ]
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Link: https://lore.kernel.org/r/20240904100934.3260-1-Dhananjay.Ugwekar@amd.com
2024-07-29 21:49:18 -07:00
|
|
|
s32 rapl_pmu_idx = get_rapl_pmu_idx(cpu);
|
|
|
|
if (rapl_pmu_idx < 0) {
|
|
|
|
pr_err("topology_logical_(package/die)_id() returned a negative value");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2016-02-22 15:19:26 -07:00
|
|
|
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
|
|
|
int target;
|
|
|
|
|
2017-01-31 15:58:38 -07:00
|
|
|
if (!pmu) {
|
|
|
|
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
|
|
|
|
if (!pmu)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
raw_spin_lock_init(&pmu->lock);
|
|
|
|
INIT_LIST_HEAD(&pmu->active_list);
|
|
|
|
pmu->pmu = &rapl_pmus->pmu;
|
|
|
|
pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
|
|
|
|
rapl_hrtimer_init(pmu);
|
|
|
|
|
perf/x86/rapl: Fix the energy-pkg event for AMD CPUs
After commit:
63edbaa48a57 ("x86/cpu/topology: Add support for the AMD 0x80000026 leaf")
... on AMD processors that support extended CPUID leaf 0x80000026, the
topology_die_cpumask() and topology_logical_die_id() macros no longer
return the package cpumask and package ID, instead they return the CCD
(Core Complex Die) mask and ID respectively.
This leads to the energy-pkg event scope to be modified to CCD instead of package.
So, change the PMU scope for AMD and Hygon back to package.
On a 12 CCD 1 Package AMD Zen4 Genoa machine:
Before:
$ cat /sys/devices/power/cpumask
0,8,16,24,32,40,48,56,64,72,80,88.
The expected cpumask here is supposed to be just "0", as it is a package
scope event, only one CPU will be collecting the event for all the CPUs in
the package.
After:
$ cat /sys/devices/power/cpumask
0
[ mingo: Cleaned up the changelog ]
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Link: https://lore.kernel.org/r/20240904100934.3260-1-Dhananjay.Ugwekar@amd.com
2024-07-29 21:49:18 -07:00
|
|
|
rapl_pmus->pmus[rapl_pmu_idx] = pmu;
|
2017-01-31 15:58:38 -07:00
|
|
|
}
|
|
|
|
|
2016-02-22 15:19:26 -07:00
|
|
|
/*
|
|
|
|
* Check if there is an online cpu in the package which collects rapl
|
|
|
|
* events already.
|
|
|
|
*/
|
perf/x86/rapl: Fix the energy-pkg event for AMD CPUs
After commit:
63edbaa48a57 ("x86/cpu/topology: Add support for the AMD 0x80000026 leaf")
... on AMD processors that support extended CPUID leaf 0x80000026, the
topology_die_cpumask() and topology_logical_die_id() macros no longer
return the package cpumask and package ID, instead they return the CCD
(Core Complex Die) mask and ID respectively.
This leads to the energy-pkg event scope to be modified to CCD instead of package.
So, change the PMU scope for AMD and Hygon back to package.
On a 12 CCD 1 Package AMD Zen4 Genoa machine:
Before:
$ cat /sys/devices/power/cpumask
0,8,16,24,32,40,48,56,64,72,80,88.
The expected cpumask here is supposed to be just "0", as it is a package
scope event, only one CPU will be collecting the event for all the CPUs in
the package.
After:
$ cat /sys/devices/power/cpumask
0
[ mingo: Cleaned up the changelog ]
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Link: https://lore.kernel.org/r/20240904100934.3260-1-Dhananjay.Ugwekar@amd.com
2024-07-29 21:49:18 -07:00
|
|
|
target = cpumask_any_and(&rapl_cpu_mask, get_rapl_pmu_cpumask(cpu));
|
2016-02-22 15:19:26 -07:00
|
|
|
if (target < nr_cpu_ids)
|
2016-07-13 10:16:15 -07:00
|
|
|
return 0;
|
2013-11-12 09:58:50 -07:00
|
|
|
|
|
|
|
cpumask_set_cpu(cpu, &rapl_cpu_mask);
|
2016-02-22 15:19:26 -07:00
|
|
|
pmu->cpu = cpu;
|
2016-07-13 10:16:15 -07:00
|
|
|
return 0;
|
2013-11-12 09:58:50 -07:00
|
|
|
}
|
|
|
|
|
2020-05-27 15:46:56 -07:00
|
|
|
static int rapl_check_hw_unit(struct rapl_model *rm)
|
2015-03-26 14:28:45 -07:00
|
|
|
{
|
|
|
|
u64 msr_rapl_power_unit_bits;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* protect rdmsrl() to handle virtualization */
|
2020-05-27 15:46:56 -07:00
|
|
|
if (rdmsrl_safe(rm->msr_power_unit, &msr_rapl_power_unit_bits))
|
2015-03-26 14:28:45 -07:00
|
|
|
return -1;
|
|
|
|
for (i = 0; i < NR_RAPL_DOMAINS; i++)
|
|
|
|
rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
|
|
|
|
|
2020-08-11 08:31:48 -07:00
|
|
|
switch (rm->unit_quirk) {
|
2016-03-08 09:40:41 -07:00
|
|
|
/*
|
|
|
|
* DRAM domain on HSW server and KNL has fixed energy unit which can be
|
|
|
|
* different than the unit from power unit MSR. See
|
|
|
|
* "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
|
|
|
|
* of 2. Datasheet, September 2014, Reference Number: 330784-001 "
|
|
|
|
*/
|
2020-08-11 08:31:48 -07:00
|
|
|
case RAPL_UNIT_QUIRK_INTEL_HSW:
|
2019-06-16 07:03:56 -07:00
|
|
|
rapl_hw_unit[PERF_RAPL_RAM] = 16;
|
2020-08-11 08:31:48 -07:00
|
|
|
break;
|
2022-09-23 22:47:37 -07:00
|
|
|
/* SPR uses a fixed energy unit for Psys domain. */
|
2020-08-11 08:31:49 -07:00
|
|
|
case RAPL_UNIT_QUIRK_INTEL_SPR:
|
|
|
|
rapl_hw_unit[PERF_RAPL_PSYS] = 0;
|
|
|
|
break;
|
2020-08-11 08:31:48 -07:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-02-22 15:19:22 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate the timer rate:
|
|
|
|
* Use reference of 200W for scaling the timeout to avoid counter
|
|
|
|
* overflows. 200W = 200 Joules/sec
|
|
|
|
* Divide interval by 2 to avoid lockstep (2 * 100)
|
|
|
|
* if hw unit is 32, then we use 2 ms 1/200/2
|
|
|
|
*/
|
|
|
|
rapl_timer_ms = 2;
|
|
|
|
if (rapl_hw_unit[0] < 32) {
|
|
|
|
rapl_timer_ms = (1000 / (2 * 100));
|
|
|
|
rapl_timer_ms *= (1ULL << (32 - rapl_hw_unit[0] - 1));
|
|
|
|
}
|
2015-03-26 14:28:45 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-22 15:19:23 -07:00
|
|
|
static void __init rapl_advertise(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
pr_info("API unit is 2^-32 Joules, %d fixed counters, %llu ms ovfl timer\n",
|
|
|
|
hweight32(rapl_cntr_mask), rapl_timer_ms);
|
|
|
|
|
|
|
|
for (i = 0; i < NR_RAPL_DOMAINS; i++) {
|
|
|
|
if (rapl_cntr_mask & (1 << i)) {
|
|
|
|
pr_info("hw unit of domain %s 2^-%d Joules\n",
|
|
|
|
rapl_domain_names[i], rapl_hw_unit[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-19 00:20:50 -07:00
|
|
|
static void cleanup_rapl_pmus(void)
|
2016-02-22 15:19:21 -07:00
|
|
|
{
|
2016-02-22 15:19:26 -07:00
|
|
|
int i;
|
|
|
|
|
2024-05-02 02:51:14 -07:00
|
|
|
for (i = 0; i < rapl_pmus->nr_rapl_pmu; i++)
|
2016-05-24 07:53:49 -07:00
|
|
|
kfree(rapl_pmus->pmus[i]);
|
2016-02-22 15:19:26 -07:00
|
|
|
kfree(rapl_pmus);
|
|
|
|
}
|
2016-02-22 15:19:21 -07:00
|
|
|
|
2019-08-08 10:44:02 -07:00
|
|
|
static const struct attribute_group *rapl_attr_update[] = {
|
2019-06-16 07:03:54 -07:00
|
|
|
&rapl_events_cores_group,
|
|
|
|
&rapl_events_pkg_group,
|
|
|
|
&rapl_events_ram_group,
|
|
|
|
&rapl_events_gpu_group,
|
2020-08-11 08:31:47 -07:00
|
|
|
&rapl_events_psys_group,
|
2019-06-16 07:03:54 -07:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2016-02-22 15:19:26 -07:00
|
|
|
static int __init init_rapl_pmus(void)
|
|
|
|
{
|
perf/x86/rapl: Fix the energy-pkg event for AMD CPUs
After commit:
63edbaa48a57 ("x86/cpu/topology: Add support for the AMD 0x80000026 leaf")
... on AMD processors that support extended CPUID leaf 0x80000026, the
topology_die_cpumask() and topology_logical_die_id() macros no longer
return the package cpumask and package ID, instead they return the CCD
(Core Complex Die) mask and ID respectively.
This leads to the energy-pkg event scope to be modified to CCD instead of package.
So, change the PMU scope for AMD and Hygon back to package.
On a 12 CCD 1 Package AMD Zen4 Genoa machine:
Before:
$ cat /sys/devices/power/cpumask
0,8,16,24,32,40,48,56,64,72,80,88.
The expected cpumask here is supposed to be just "0", as it is a package
scope event, only one CPU will be collecting the event for all the CPUs in
the package.
After:
$ cat /sys/devices/power/cpumask
0
[ mingo: Cleaned up the changelog ]
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Link: https://lore.kernel.org/r/20240904100934.3260-1-Dhananjay.Ugwekar@amd.com
2024-07-29 21:49:18 -07:00
|
|
|
int nr_rapl_pmu = topology_max_packages();
|
|
|
|
|
|
|
|
if (!rapl_pmu_is_pkg_scope())
|
|
|
|
nr_rapl_pmu *= topology_max_dies_per_package();
|
2016-02-22 15:19:26 -07:00
|
|
|
|
2024-05-02 02:51:14 -07:00
|
|
|
rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, nr_rapl_pmu), GFP_KERNEL);
|
2016-02-22 15:19:26 -07:00
|
|
|
if (!rapl_pmus)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2024-05-02 02:51:14 -07:00
|
|
|
rapl_pmus->nr_rapl_pmu = nr_rapl_pmu;
|
2016-02-22 15:19:26 -07:00
|
|
|
rapl_pmus->pmu.attr_groups = rapl_attr_groups;
|
2019-06-16 07:03:54 -07:00
|
|
|
rapl_pmus->pmu.attr_update = rapl_attr_update;
|
2016-02-22 15:19:26 -07:00
|
|
|
rapl_pmus->pmu.task_ctx_nr = perf_invalid_context;
|
|
|
|
rapl_pmus->pmu.event_init = rapl_pmu_event_init;
|
|
|
|
rapl_pmus->pmu.add = rapl_pmu_event_add;
|
|
|
|
rapl_pmus->pmu.del = rapl_pmu_event_del;
|
|
|
|
rapl_pmus->pmu.start = rapl_pmu_event_start;
|
|
|
|
rapl_pmus->pmu.stop = rapl_pmu_event_stop;
|
|
|
|
rapl_pmus->pmu.read = rapl_pmu_event_read;
|
2016-12-22 18:17:40 -07:00
|
|
|
rapl_pmus->pmu.module = THIS_MODULE;
|
2019-01-10 06:53:32 -07:00
|
|
|
rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
|
2016-02-22 15:19:26 -07:00
|
|
|
return 0;
|
2016-02-22 15:19:21 -07:00
|
|
|
}
|
|
|
|
|
2019-06-16 07:03:54 -07:00
|
|
|
static struct rapl_model model_snb = {
|
|
|
|
.events = BIT(PERF_RAPL_PP0) |
|
|
|
|
BIT(PERF_RAPL_PKG) |
|
|
|
|
BIT(PERF_RAPL_PP1),
|
2020-05-27 15:46:56 -07:00
|
|
|
.msr_power_unit = MSR_RAPL_POWER_UNIT,
|
|
|
|
.rapl_msrs = intel_rapl_msrs,
|
2019-06-16 07:03:54 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct rapl_model model_snbep = {
|
|
|
|
.events = BIT(PERF_RAPL_PP0) |
|
|
|
|
BIT(PERF_RAPL_PKG) |
|
|
|
|
BIT(PERF_RAPL_RAM),
|
2020-05-27 15:46:56 -07:00
|
|
|
.msr_power_unit = MSR_RAPL_POWER_UNIT,
|
|
|
|
.rapl_msrs = intel_rapl_msrs,
|
2019-06-16 07:03:54 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct rapl_model model_hsw = {
|
|
|
|
.events = BIT(PERF_RAPL_PP0) |
|
|
|
|
BIT(PERF_RAPL_PKG) |
|
|
|
|
BIT(PERF_RAPL_RAM) |
|
|
|
|
BIT(PERF_RAPL_PP1),
|
2020-05-27 15:46:56 -07:00
|
|
|
.msr_power_unit = MSR_RAPL_POWER_UNIT,
|
|
|
|
.rapl_msrs = intel_rapl_msrs,
|
2019-06-16 07:03:54 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct rapl_model model_hsx = {
|
|
|
|
.events = BIT(PERF_RAPL_PP0) |
|
|
|
|
BIT(PERF_RAPL_PKG) |
|
|
|
|
BIT(PERF_RAPL_RAM),
|
2020-08-11 08:31:48 -07:00
|
|
|
.unit_quirk = RAPL_UNIT_QUIRK_INTEL_HSW,
|
2020-05-27 15:46:56 -07:00
|
|
|
.msr_power_unit = MSR_RAPL_POWER_UNIT,
|
|
|
|
.rapl_msrs = intel_rapl_msrs,
|
2019-06-16 07:03:54 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct rapl_model model_knl = {
|
|
|
|
.events = BIT(PERF_RAPL_PKG) |
|
|
|
|
BIT(PERF_RAPL_RAM),
|
2020-08-11 08:31:48 -07:00
|
|
|
.unit_quirk = RAPL_UNIT_QUIRK_INTEL_HSW,
|
2020-05-27 15:46:56 -07:00
|
|
|
.msr_power_unit = MSR_RAPL_POWER_UNIT,
|
|
|
|
.rapl_msrs = intel_rapl_msrs,
|
2019-06-16 07:03:54 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct rapl_model model_skl = {
|
|
|
|
.events = BIT(PERF_RAPL_PP0) |
|
|
|
|
BIT(PERF_RAPL_PKG) |
|
|
|
|
BIT(PERF_RAPL_RAM) |
|
|
|
|
BIT(PERF_RAPL_PP1) |
|
|
|
|
BIT(PERF_RAPL_PSYS),
|
2020-05-27 15:46:56 -07:00
|
|
|
.msr_power_unit = MSR_RAPL_POWER_UNIT,
|
|
|
|
.rapl_msrs = intel_rapl_msrs,
|
2019-06-16 07:03:54 -07:00
|
|
|
};
|
|
|
|
|
2020-08-11 08:31:49 -07:00
|
|
|
static struct rapl_model model_spr = {
|
|
|
|
.events = BIT(PERF_RAPL_PP0) |
|
|
|
|
BIT(PERF_RAPL_PKG) |
|
|
|
|
BIT(PERF_RAPL_RAM) |
|
|
|
|
BIT(PERF_RAPL_PSYS),
|
|
|
|
.unit_quirk = RAPL_UNIT_QUIRK_INTEL_SPR,
|
|
|
|
.msr_power_unit = MSR_RAPL_POWER_UNIT,
|
perf/x86/rapl: Fix psys-energy event on Intel SPR platform
There are several things special for the RAPL Psys energy counter, on
Intel Sapphire Rapids platform.
1. it contains one Psys master package, and only CPUs on the master
package can read valid value of the Psys energy counter, reading the
MSR on CPUs in the slave package returns 0.
2. The master package does not have to be Physical package 0. And when
all the CPUs on the Psys master package are offlined, we lose the Psys
energy counter, at runtime.
3. The Psys energy counter can be disabled by BIOS, while all the other
energy counters are not affected.
It is not easy to handle all of these in the current RAPL PMU design
because
a) perf_msr_probe() validates the MSR on some random CPU, which may either
be in the Psys master package or in the Psys slave package.
b) all the RAPL events share the same PMU, and there is not API to remove
the psys-energy event cleanly, without affecting the other events in
the same PMU.
This patch addresses the problems in a simple way.
First, by setting .no_check bit for RAPL Psys MSR, the psys-energy event
is always added, so we don't have to check the Psys ENERGY_STATUS MSR on
master package.
Then, by removing rapl_not_visible(), the psys-energy event is always
available in sysfs. This does not affect the previous code because, for
the RAPL MSRs with .no_check cleared, the .is_visible() callback is always
overriden in the perf_msr_probe() function.
Note, although RAPL PMU is die-based, and the Psys energy counter MSR on
Intel SPR is package scope, this is not a problem because there is only
one die in each package on SPR.
Signed-off-by: Zhang Rui <rui.zhang@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Link: https://lkml.kernel.org/r/20210204161816.12649-3-rui.zhang@intel.com
2021-02-04 09:18:16 -07:00
|
|
|
.rapl_msrs = intel_rapl_spr_msrs,
|
2020-08-11 08:31:49 -07:00
|
|
|
};
|
|
|
|
|
2021-05-14 06:59:20 -07:00
|
|
|
static struct rapl_model model_amd_hygon = {
|
2020-05-27 15:46:59 -07:00
|
|
|
.events = BIT(PERF_RAPL_PKG),
|
|
|
|
.msr_power_unit = MSR_AMD_RAPL_POWER_UNIT,
|
|
|
|
.rapl_msrs = amd_rapl_msrs,
|
|
|
|
};
|
|
|
|
|
2019-06-16 07:03:54 -07:00
|
|
|
static const struct x86_cpu_id rapl_model_match[] __initconst = {
|
2024-05-20 15:46:14 -07:00
|
|
|
X86_MATCH_FEATURE(X86_FEATURE_RAPL, &model_amd_hygon),
|
|
|
|
X86_MATCH_VFM(INTEL_SANDYBRIDGE, &model_snb),
|
|
|
|
X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &model_snbep),
|
|
|
|
X86_MATCH_VFM(INTEL_IVYBRIDGE, &model_snb),
|
|
|
|
X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &model_snbep),
|
|
|
|
X86_MATCH_VFM(INTEL_HASWELL, &model_hsw),
|
|
|
|
X86_MATCH_VFM(INTEL_HASWELL_X, &model_hsx),
|
|
|
|
X86_MATCH_VFM(INTEL_HASWELL_L, &model_hsw),
|
|
|
|
X86_MATCH_VFM(INTEL_HASWELL_G, &model_hsw),
|
|
|
|
X86_MATCH_VFM(INTEL_BROADWELL, &model_hsw),
|
|
|
|
X86_MATCH_VFM(INTEL_BROADWELL_G, &model_hsw),
|
|
|
|
X86_MATCH_VFM(INTEL_BROADWELL_X, &model_hsx),
|
|
|
|
X86_MATCH_VFM(INTEL_BROADWELL_D, &model_hsx),
|
|
|
|
X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &model_knl),
|
|
|
|
X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &model_knl),
|
|
|
|
X86_MATCH_VFM(INTEL_SKYLAKE_L, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_SKYLAKE, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_SKYLAKE_X, &model_hsx),
|
|
|
|
X86_MATCH_VFM(INTEL_KABYLAKE_L, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_KABYLAKE, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_CANNONLAKE_L, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &model_hsw),
|
|
|
|
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &model_hsw),
|
|
|
|
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &model_hsw),
|
|
|
|
X86_MATCH_VFM(INTEL_ICELAKE_L, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_ICELAKE, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_ICELAKE_D, &model_hsx),
|
|
|
|
X86_MATCH_VFM(INTEL_ICELAKE_X, &model_hsx),
|
|
|
|
X86_MATCH_VFM(INTEL_COMETLAKE_L, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_COMETLAKE, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_TIGERLAKE_L, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_TIGERLAKE, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_ALDERLAKE, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_ALDERLAKE_L, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &model_spr),
|
|
|
|
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &model_spr),
|
|
|
|
X86_MATCH_VFM(INTEL_RAPTORLAKE, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_METEORLAKE, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_METEORLAKE_L, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_ARROWLAKE, &model_skl),
|
|
|
|
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &model_skl),
|
2019-06-16 07:03:54 -07:00
|
|
|
{},
|
|
|
|
};
|
2019-06-16 07:03:58 -07:00
|
|
|
MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
|
|
|
|
|
2013-11-12 09:58:50 -07:00
|
|
|
static int __init rapl_pmu_init(void)
|
|
|
|
{
|
2016-03-19 00:20:50 -07:00
|
|
|
const struct x86_cpu_id *id;
|
2019-06-16 07:03:54 -07:00
|
|
|
struct rapl_model *rm;
|
2016-02-22 15:19:24 -07:00
|
|
|
int ret;
|
2013-11-12 09:58:50 -07:00
|
|
|
|
2019-06-16 07:03:54 -07:00
|
|
|
id = x86_match_cpu(rapl_model_match);
|
|
|
|
if (!id)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
rm = (struct rapl_model *) id->driver_data;
|
2020-05-27 15:46:56 -07:00
|
|
|
|
|
|
|
rapl_msrs = rm->rapl_msrs;
|
|
|
|
|
2019-06-16 07:03:55 -07:00
|
|
|
rapl_cntr_mask = perf_msr_probe(rapl_msrs, PERF_RAPL_MAX,
|
|
|
|
false, (void *) &rm->events);
|
2019-06-16 07:03:54 -07:00
|
|
|
|
2020-05-27 15:46:56 -07:00
|
|
|
ret = rapl_check_hw_unit(rm);
|
2015-03-26 14:28:45 -07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2014-03-10 13:38:09 -07:00
|
|
|
|
2016-02-22 15:19:26 -07:00
|
|
|
ret = init_rapl_pmus();
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2016-07-13 10:16:15 -07:00
|
|
|
/*
|
|
|
|
* Install callbacks. Core will call them for each online cpu.
|
|
|
|
*/
|
|
|
|
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
|
2016-12-21 12:19:54 -07:00
|
|
|
"perf/x86/rapl:online",
|
2016-07-13 10:16:15 -07:00
|
|
|
rapl_cpu_online, rapl_cpu_offline);
|
|
|
|
if (ret)
|
2017-01-31 15:58:38 -07:00
|
|
|
goto out;
|
2016-07-13 10:16:15 -07:00
|
|
|
|
2016-02-22 15:19:26 -07:00
|
|
|
ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
|
2016-02-22 15:19:23 -07:00
|
|
|
if (ret)
|
2017-01-31 15:58:38 -07:00
|
|
|
goto out1;
|
2013-11-12 09:58:50 -07:00
|
|
|
|
2016-02-22 15:19:23 -07:00
|
|
|
rapl_advertise();
|
2013-11-12 09:58:50 -07:00
|
|
|
return 0;
|
2016-02-22 15:19:21 -07:00
|
|
|
|
2016-07-13 10:16:15 -07:00
|
|
|
out1:
|
2017-01-31 15:58:38 -07:00
|
|
|
cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
2016-02-22 15:19:21 -07:00
|
|
|
out:
|
2016-02-22 15:19:23 -07:00
|
|
|
pr_warn("Initialization failed (%d), disabled\n", ret);
|
2016-02-22 15:19:21 -07:00
|
|
|
cleanup_rapl_pmus();
|
|
|
|
return ret;
|
2013-11-12 09:58:50 -07:00
|
|
|
}
|
2016-03-19 00:20:50 -07:00
|
|
|
module_init(rapl_pmu_init);
|
|
|
|
|
|
|
|
static void __exit intel_rapl_exit(void)
|
|
|
|
{
|
2016-07-13 10:16:15 -07:00
|
|
|
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
2016-03-19 00:20:50 -07:00
|
|
|
perf_pmu_unregister(&rapl_pmus->pmu);
|
|
|
|
cleanup_rapl_pmus();
|
|
|
|
}
|
|
|
|
module_exit(intel_rapl_exit);
|