2019-05-29 07:18:02 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2016-09-01 13:37:10 -07:00
|
|
|
/*
|
2022-02-14 03:14:47 -07:00
|
|
|
* cppc.c: CPPC Interface for x86
|
2016-09-01 13:37:10 -07:00
|
|
|
* Copyright (c) 2016, Intel Corporation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <acpi/cppc_acpi.h>
|
|
|
|
#include <asm/msr.h>
|
2022-02-14 03:14:48 -07:00
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/topology.h>
|
2016-09-01 13:37:10 -07:00
|
|
|
|
2024-08-26 14:13:55 -07:00
|
|
|
#define CPPC_HIGHEST_PERF_PREFCORE 166
|
|
|
|
|
|
|
|
enum amd_pref_core {
|
|
|
|
AMD_PREF_CORE_UNKNOWN = 0,
|
|
|
|
AMD_PREF_CORE_SUPPORTED,
|
|
|
|
AMD_PREF_CORE_UNSUPPORTED,
|
|
|
|
};
|
|
|
|
static enum amd_pref_core amd_pref_core_detected;
|
|
|
|
static u64 boost_numerator;
|
|
|
|
|
2016-09-01 13:37:10 -07:00
|
|
|
/* Refer to drivers/acpi/cppc_acpi.c for the description of functions */
|
|
|
|
|
2022-07-05 11:29:15 -07:00
|
|
|
bool cpc_supported_by_cpu(void)
|
|
|
|
{
|
|
|
|
switch (boot_cpu_data.x86_vendor) {
|
|
|
|
case X86_VENDOR_AMD:
|
|
|
|
case X86_VENDOR_HYGON:
|
2022-07-13 10:53:46 -07:00
|
|
|
if (boot_cpu_data.x86 == 0x19 && ((boot_cpu_data.x86_model <= 0x0f) ||
|
|
|
|
(boot_cpu_data.x86_model >= 0x20 && boot_cpu_data.x86_model <= 0x2f)))
|
|
|
|
return true;
|
|
|
|
else if (boot_cpu_data.x86 == 0x17 &&
|
2024-02-07 20:46:28 -07:00
|
|
|
boot_cpu_data.x86_model >= 0x30 && boot_cpu_data.x86_model <= 0x7f)
|
2022-07-13 10:53:46 -07:00
|
|
|
return true;
|
2022-07-05 11:29:15 -07:00
|
|
|
return boot_cpu_has(X86_FEATURE_CPPC);
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-09-01 13:37:10 -07:00
|
|
|
bool cpc_ffh_supported(void)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = rdmsrl_safe_on_cpu(cpunum, reg->address, val);
|
|
|
|
if (!err) {
|
|
|
|
u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
|
|
|
|
reg->bit_offset);
|
|
|
|
|
|
|
|
*val &= mask;
|
|
|
|
*val >>= reg->bit_offset;
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
|
|
|
|
{
|
|
|
|
u64 rd_val;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = rdmsrl_safe_on_cpu(cpunum, reg->address, &rd_val);
|
|
|
|
if (!err) {
|
|
|
|
u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1,
|
|
|
|
reg->bit_offset);
|
|
|
|
|
|
|
|
val <<= reg->bit_offset;
|
|
|
|
val &= mask;
|
|
|
|
rd_val &= ~mask;
|
|
|
|
rd_val |= val;
|
|
|
|
err = wrmsrl_safe_on_cpu(cpunum, reg->address, rd_val);
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
2022-02-14 03:14:48 -07:00
|
|
|
|
2022-04-15 12:19:54 -07:00
|
|
|
static void amd_set_max_freq_ratio(void)
|
2022-02-14 03:14:48 -07:00
|
|
|
{
|
|
|
|
struct cppc_perf_caps perf_caps;
|
2024-08-26 14:13:52 -07:00
|
|
|
u64 numerator, nominal_perf;
|
2022-02-14 03:14:48 -07:00
|
|
|
u64 perf_ratio;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = cppc_get_perf_caps(0, &perf_caps);
|
|
|
|
if (rc) {
|
2024-08-26 14:13:53 -07:00
|
|
|
pr_warn("Could not retrieve perf counters (%d)\n", rc);
|
2022-04-15 12:19:54 -07:00
|
|
|
return;
|
2022-02-14 03:14:48 -07:00
|
|
|
}
|
|
|
|
|
2024-08-26 14:13:52 -07:00
|
|
|
rc = amd_get_boost_ratio_numerator(0, &numerator);
|
|
|
|
if (rc) {
|
2024-08-26 14:13:53 -07:00
|
|
|
pr_warn("Could not retrieve highest performance (%d)\n", rc);
|
2024-08-26 14:13:52 -07:00
|
|
|
return;
|
|
|
|
}
|
2022-02-14 03:14:48 -07:00
|
|
|
nominal_perf = perf_caps.nominal_perf;
|
|
|
|
|
2024-08-26 14:13:52 -07:00
|
|
|
if (!nominal_perf) {
|
2024-08-26 14:13:53 -07:00
|
|
|
pr_warn("Could not retrieve nominal performance\n");
|
2022-04-15 12:19:54 -07:00
|
|
|
return;
|
2022-02-14 03:14:48 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* midpoint between max_boost and max_P */
|
2024-08-27 11:50:45 -07:00
|
|
|
perf_ratio = (div_u64(numerator * SCHED_CAPACITY_SCALE, nominal_perf) + SCHED_CAPACITY_SCALE) >> 1;
|
2022-02-14 03:14:48 -07:00
|
|
|
|
2022-04-15 12:19:54 -07:00
|
|
|
freq_invariance_set_perf_ratio(perf_ratio, false);
|
2022-02-14 03:14:48 -07:00
|
|
|
}
|
2022-02-14 03:14:50 -07:00
|
|
|
|
|
|
|
static DEFINE_MUTEX(freq_invariance_lock);
|
|
|
|
|
|
|
|
void init_freq_invariance_cppc(void)
|
|
|
|
{
|
2022-04-15 12:19:54 -07:00
|
|
|
static bool init_done;
|
2022-02-14 03:14:50 -07:00
|
|
|
|
2022-04-15 12:19:54 -07:00
|
|
|
if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
|
|
|
|
return;
|
2022-02-14 03:14:50 -07:00
|
|
|
|
2022-04-15 12:19:54 -07:00
|
|
|
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
|
|
|
|
return;
|
2022-02-14 03:14:50 -07:00
|
|
|
|
2022-04-15 12:19:54 -07:00
|
|
|
mutex_lock(&freq_invariance_lock);
|
|
|
|
if (!init_done)
|
|
|
|
amd_set_max_freq_ratio();
|
|
|
|
init_done = true;
|
2022-02-14 03:14:50 -07:00
|
|
|
mutex_unlock(&freq_invariance_lock);
|
|
|
|
}
|
2024-08-26 14:13:51 -07:00
|
|
|
|
2024-08-26 14:13:54 -07:00
|
|
|
/*
|
|
|
|
* Get the highest performance register value.
|
|
|
|
* @cpu: CPU from which to get highest performance.
|
|
|
|
* @highest_perf: Return address for highest performance value.
|
|
|
|
*
|
|
|
|
* Return: 0 for success, negative error code otherwise.
|
|
|
|
*/
|
|
|
|
int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
|
|
|
|
{
|
|
|
|
u64 val;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
|
|
|
|
ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &val);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
val = AMD_CPPC_HIGHEST_PERF(val);
|
|
|
|
} else {
|
|
|
|
ret = cppc_get_highest_perf(cpu, &val);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
WRITE_ONCE(*highest_perf, (u32)val);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(amd_get_highest_perf);
|
|
|
|
|
2024-08-26 14:13:55 -07:00
|
|
|
/**
|
|
|
|
* amd_detect_prefcore: Detect if CPUs in the system support preferred cores
|
|
|
|
* @detected: Output variable for the result of the detection.
|
|
|
|
*
|
|
|
|
* Determine whether CPUs in the system support preferred cores. On systems
|
|
|
|
* that support preferred cores, different highest perf values will be found
|
|
|
|
* on different cores. On other systems, the highest perf value will be the
|
|
|
|
* same on all cores.
|
|
|
|
*
|
|
|
|
* The result of the detection will be stored in the 'detected' parameter.
|
|
|
|
*
|
|
|
|
* Return: 0 for success, negative error code otherwise
|
|
|
|
*/
|
|
|
|
int amd_detect_prefcore(bool *detected)
|
|
|
|
{
|
|
|
|
int cpu, count = 0;
|
|
|
|
u64 highest_perf[2] = {0};
|
|
|
|
|
|
|
|
if (WARN_ON(!detected))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
switch (amd_pref_core_detected) {
|
|
|
|
case AMD_PREF_CORE_SUPPORTED:
|
|
|
|
*detected = true;
|
|
|
|
return 0;
|
|
|
|
case AMD_PREF_CORE_UNSUPPORTED:
|
|
|
|
*detected = false;
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_present_cpu(cpu) {
|
|
|
|
u32 tmp;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = amd_get_highest_perf(cpu, &tmp);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (!count || (count == 1 && tmp != highest_perf[0]))
|
|
|
|
highest_perf[count++] = tmp;
|
|
|
|
|
|
|
|
if (count == 2)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
*detected = (count == 2);
|
|
|
|
boost_numerator = highest_perf[0];
|
|
|
|
|
|
|
|
amd_pref_core_detected = *detected ? AMD_PREF_CORE_SUPPORTED :
|
|
|
|
AMD_PREF_CORE_UNSUPPORTED;
|
|
|
|
|
|
|
|
pr_debug("AMD CPPC preferred core is %ssupported (highest perf: 0x%llx)\n",
|
|
|
|
*detected ? "" : "un", highest_perf[0]);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(amd_detect_prefcore);
|
|
|
|
|
2024-08-26 14:13:52 -07:00
|
|
|
/**
|
|
|
|
* amd_get_boost_ratio_numerator: Get the numerator to use for boost ratio calculation
|
|
|
|
* @cpu: CPU to get numerator for.
|
|
|
|
* @numerator: Output variable for numerator.
|
|
|
|
*
|
|
|
|
* Determine the numerator to use for calculating the boost ratio on
|
|
|
|
* a CPU. On systems that support preferred cores, this will be a hardcoded
|
|
|
|
* value. On other systems this will the highest performance register value.
|
|
|
|
*
|
2024-08-26 14:13:55 -07:00
|
|
|
* If booting the system with amd-pstate enabled but preferred cores disabled then
|
|
|
|
* the correct boost numerator will be returned to match hardware capabilities
|
|
|
|
* even if the preferred cores scheduling hints are not enabled.
|
|
|
|
*
|
2024-08-26 14:13:52 -07:00
|
|
|
* Return: 0 for success, negative error code otherwise.
|
|
|
|
*/
|
|
|
|
int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
|
2024-08-26 14:13:51 -07:00
|
|
|
{
|
2024-08-26 14:13:55 -07:00
|
|
|
bool prefcore;
|
|
|
|
int ret;
|
2024-08-26 14:13:51 -07:00
|
|
|
|
2024-08-26 14:13:55 -07:00
|
|
|
ret = amd_detect_prefcore(&prefcore);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2024-08-26 14:13:51 -07:00
|
|
|
|
2024-08-26 14:13:55 -07:00
|
|
|
/* without preferred cores, return the highest perf register value */
|
|
|
|
if (!prefcore) {
|
|
|
|
*numerator = boost_numerator;
|
2024-08-26 14:13:52 -07:00
|
|
|
return 0;
|
|
|
|
}
|
2024-08-26 14:13:55 -07:00
|
|
|
*numerator = CPPC_HIGHEST_PERF_PREFCORE;
|
2024-08-26 14:13:51 -07:00
|
|
|
|
2024-08-26 14:13:52 -07:00
|
|
|
return 0;
|
2024-08-26 14:13:51 -07:00
|
|
|
}
|
2024-08-26 14:13:52 -07:00
|
|
|
EXPORT_SYMBOL_GPL(amd_get_boost_ratio_numerator);
|