iommu/vt-d: Clean up cpumask and hotplug for perfmon
The iommu PMU is system-wide scope, which is supported by the generic perf_event subsystem now. Set the scope for the iommu PMU and remove all the cpumask and hotplug codes. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20240802151643.1691631-5-kan.liang@linux.intel.com
This commit is contained in:
parent
08155c7f2a
commit
a8c73b82f7
@ -687,8 +687,6 @@ struct iommu_pmu {
|
||||
DECLARE_BITMAP(used_mask, IOMMU_PMU_IDX_MAX);
|
||||
struct perf_event *event_list[IOMMU_PMU_IDX_MAX];
|
||||
unsigned char irq_name[16];
|
||||
struct hlist_node cpuhp_node;
|
||||
int cpu;
|
||||
};
|
||||
|
||||
#define IOMMU_IRQ_ID_OFFSET_PRQ (DMAR_UNITS_SUPPORTED)
|
||||
|
@ -34,28 +34,9 @@ static struct attribute_group iommu_pmu_events_attr_group = {
|
||||
.attrs = attrs_empty,
|
||||
};
|
||||
|
||||
static cpumask_t iommu_pmu_cpu_mask;
|
||||
|
||||
static ssize_t
|
||||
cpumask_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpumap_print_to_pagebuf(true, buf, &iommu_pmu_cpu_mask);
|
||||
}
|
||||
static DEVICE_ATTR_RO(cpumask);
|
||||
|
||||
static struct attribute *iommu_pmu_cpumask_attrs[] = {
|
||||
&dev_attr_cpumask.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group iommu_pmu_cpumask_attr_group = {
|
||||
.attrs = iommu_pmu_cpumask_attrs,
|
||||
};
|
||||
|
||||
static const struct attribute_group *iommu_pmu_attr_groups[] = {
|
||||
&iommu_pmu_format_attr_group,
|
||||
&iommu_pmu_events_attr_group,
|
||||
&iommu_pmu_cpumask_attr_group,
|
||||
NULL
|
||||
};
|
||||
|
||||
@ -565,6 +546,7 @@ static int __iommu_pmu_register(struct intel_iommu *iommu)
|
||||
iommu_pmu->pmu.attr_groups = iommu_pmu_attr_groups;
|
||||
iommu_pmu->pmu.attr_update = iommu_pmu_attr_update;
|
||||
iommu_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
|
||||
iommu_pmu->pmu.scope = PERF_PMU_SCOPE_SYS_WIDE;
|
||||
iommu_pmu->pmu.module = THIS_MODULE;
|
||||
|
||||
return perf_pmu_register(&iommu_pmu->pmu, iommu_pmu->pmu.name, -1);
|
||||
@ -773,89 +755,6 @@ static void iommu_pmu_unset_interrupt(struct intel_iommu *iommu)
|
||||
iommu->perf_irq = 0;
|
||||
}
|
||||
|
||||
static int iommu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
|
||||
{
|
||||
struct iommu_pmu *iommu_pmu = hlist_entry_safe(node, typeof(*iommu_pmu), cpuhp_node);
|
||||
|
||||
if (cpumask_empty(&iommu_pmu_cpu_mask))
|
||||
cpumask_set_cpu(cpu, &iommu_pmu_cpu_mask);
|
||||
|
||||
if (cpumask_test_cpu(cpu, &iommu_pmu_cpu_mask))
|
||||
iommu_pmu->cpu = cpu;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iommu_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
|
||||
{
|
||||
struct iommu_pmu *iommu_pmu = hlist_entry_safe(node, typeof(*iommu_pmu), cpuhp_node);
|
||||
int target = cpumask_first(&iommu_pmu_cpu_mask);
|
||||
|
||||
/*
|
||||
* The iommu_pmu_cpu_mask has been updated when offline the CPU
|
||||
* for the first iommu_pmu. Migrate the other iommu_pmu to the
|
||||
* new target.
|
||||
*/
|
||||
if (target < nr_cpu_ids && target != iommu_pmu->cpu) {
|
||||
perf_pmu_migrate_context(&iommu_pmu->pmu, cpu, target);
|
||||
iommu_pmu->cpu = target;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!cpumask_test_and_clear_cpu(cpu, &iommu_pmu_cpu_mask))
|
||||
return 0;
|
||||
|
||||
target = cpumask_any_but(cpu_online_mask, cpu);
|
||||
|
||||
if (target < nr_cpu_ids)
|
||||
cpumask_set_cpu(target, &iommu_pmu_cpu_mask);
|
||||
else
|
||||
return 0;
|
||||
|
||||
perf_pmu_migrate_context(&iommu_pmu->pmu, cpu, target);
|
||||
iommu_pmu->cpu = target;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nr_iommu_pmu;
|
||||
static enum cpuhp_state iommu_cpuhp_slot;
|
||||
|
||||
static int iommu_pmu_cpuhp_setup(struct iommu_pmu *iommu_pmu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!nr_iommu_pmu) {
|
||||
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
|
||||
"driver/iommu/intel/perfmon:online",
|
||||
iommu_pmu_cpu_online,
|
||||
iommu_pmu_cpu_offline);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
iommu_cpuhp_slot = ret;
|
||||
}
|
||||
|
||||
ret = cpuhp_state_add_instance(iommu_cpuhp_slot, &iommu_pmu->cpuhp_node);
|
||||
if (ret) {
|
||||
if (!nr_iommu_pmu)
|
||||
cpuhp_remove_multi_state(iommu_cpuhp_slot);
|
||||
return ret;
|
||||
}
|
||||
nr_iommu_pmu++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iommu_pmu_cpuhp_free(struct iommu_pmu *iommu_pmu)
|
||||
{
|
||||
cpuhp_state_remove_instance(iommu_cpuhp_slot, &iommu_pmu->cpuhp_node);
|
||||
|
||||
if (--nr_iommu_pmu)
|
||||
return;
|
||||
|
||||
cpuhp_remove_multi_state(iommu_cpuhp_slot);
|
||||
}
|
||||
|
||||
void iommu_pmu_register(struct intel_iommu *iommu)
|
||||
{
|
||||
struct iommu_pmu *iommu_pmu = iommu->pmu;
|
||||
@ -866,17 +765,12 @@ void iommu_pmu_register(struct intel_iommu *iommu)
|
||||
if (__iommu_pmu_register(iommu))
|
||||
goto err;
|
||||
|
||||
if (iommu_pmu_cpuhp_setup(iommu_pmu))
|
||||
goto unregister;
|
||||
|
||||
/* Set interrupt for overflow */
|
||||
if (iommu_pmu_set_interrupt(iommu))
|
||||
goto cpuhp_free;
|
||||
goto unregister;
|
||||
|
||||
return;
|
||||
|
||||
cpuhp_free:
|
||||
iommu_pmu_cpuhp_free(iommu_pmu);
|
||||
unregister:
|
||||
perf_pmu_unregister(&iommu_pmu->pmu);
|
||||
err:
|
||||
@ -892,6 +786,5 @@ void iommu_pmu_unregister(struct intel_iommu *iommu)
|
||||
return;
|
||||
|
||||
iommu_pmu_unset_interrupt(iommu);
|
||||
iommu_pmu_cpuhp_free(iommu_pmu);
|
||||
perf_pmu_unregister(&iommu_pmu->pmu);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user