x86/resctrl: Allow arch to allocate memory needed in resctrl_arch_rmid_read()
Depending on the number of monitors available, Arm's MPAM may need to allocate a monitor prior to reading the counter value. Allocating a contended resource may involve sleeping. __check_limbo() and mon_event_count() each make multiple calls to resctrl_arch_rmid_read(), to avoid extra work on contended systems, the allocation should be valid for multiple invocations of resctrl_arch_rmid_read(). The memory or hardware allocated is not specific to a domain. Add arch hooks for this allocation, which need calling before resctrl_arch_rmid_read(). The allocated monitor is passed to resctrl_arch_rmid_read(), then freed again afterwards. The helper can be called on any CPU, and can sleep. Signed-off-by: James Morse <james.morse@arm.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Reviewed-by: Shaopeng Tan <tan.shaopeng@fujitsu.com> Reviewed-by: Reinette Chatre <reinette.chatre@intel.com> Reviewed-by: Babu Moger <babu.moger@amd.com> Tested-by: Shaopeng Tan <tan.shaopeng@fujitsu.com> Tested-by: Peter Newman <peternewman@google.com> Tested-by: Babu Moger <babu.moger@amd.com> Tested-by: Carl Worth <carl@os.amperecomputing.com> # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-16-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
This commit is contained in:
parent
6fde1424f2
commit
e557999f80
@ -136,6 +136,17 @@ static inline u32 resctrl_arch_rmid_idx_encode(u32 ignored, u32 rmid)
|
|||||||
return rmid;
|
return rmid;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* x86 can always read an rmid, nothing needs allocating */
|
||||||
|
struct rdt_resource;
|
||||||
|
static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid)
|
||||||
|
{
|
||||||
|
might_sleep();
|
||||||
|
return NULL;
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid,
|
||||||
|
void *ctx) { };
|
||||||
|
|
||||||
void resctrl_cpu_detect(struct cpuinfo_x86 *c);
|
void resctrl_cpu_detect(struct cpuinfo_x86 *c);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
@ -546,6 +546,11 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
|
|||||||
rr->d = d;
|
rr->d = d;
|
||||||
rr->val = 0;
|
rr->val = 0;
|
||||||
rr->first = first;
|
rr->first = first;
|
||||||
|
rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid);
|
||||||
|
if (IS_ERR(rr->arch_mon_ctx)) {
|
||||||
|
rr->err = -EINVAL;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
cpu = cpumask_any_housekeeping(&d->cpu_mask);
|
cpu = cpumask_any_housekeeping(&d->cpu_mask);
|
||||||
|
|
||||||
@ -559,6 +564,8 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
|
|||||||
smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
|
smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
|
||||||
else
|
else
|
||||||
smp_call_on_cpu(cpu, smp_mon_event_count, rr, false);
|
smp_call_on_cpu(cpu, smp_mon_event_count, rr, false);
|
||||||
|
|
||||||
|
resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
int rdtgroup_mondata_show(struct seq_file *m, void *arg)
|
int rdtgroup_mondata_show(struct seq_file *m, void *arg)
|
||||||
|
@ -137,6 +137,7 @@ struct rmid_read {
|
|||||||
bool first;
|
bool first;
|
||||||
int err;
|
int err;
|
||||||
u64 val;
|
u64 val;
|
||||||
|
void *arch_mon_ctx;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern bool rdt_alloc_capable;
|
extern bool rdt_alloc_capable;
|
||||||
|
@ -269,7 +269,7 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
|
|||||||
|
|
||||||
int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
|
int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
|
||||||
u32 unused, u32 rmid, enum resctrl_event_id eventid,
|
u32 unused, u32 rmid, enum resctrl_event_id eventid,
|
||||||
u64 *val)
|
u64 *val, void *ignored)
|
||||||
{
|
{
|
||||||
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
|
||||||
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
|
struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
|
||||||
@ -324,9 +324,17 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
|
|||||||
u32 idx_limit = resctrl_arch_system_num_rmid_idx();
|
u32 idx_limit = resctrl_arch_system_num_rmid_idx();
|
||||||
struct rmid_entry *entry;
|
struct rmid_entry *entry;
|
||||||
u32 idx, cur_idx = 1;
|
u32 idx, cur_idx = 1;
|
||||||
|
void *arch_mon_ctx;
|
||||||
bool rmid_dirty;
|
bool rmid_dirty;
|
||||||
u64 val = 0;
|
u64 val = 0;
|
||||||
|
|
||||||
|
arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID);
|
||||||
|
if (IS_ERR(arch_mon_ctx)) {
|
||||||
|
pr_warn_ratelimited("Failed to allocate monitor context: %ld",
|
||||||
|
PTR_ERR(arch_mon_ctx));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Skip RMID 0 and start from RMID 1 and check all the RMIDs that
|
* Skip RMID 0 and start from RMID 1 and check all the RMIDs that
|
||||||
* are marked as busy for occupancy < threshold. If the occupancy
|
* are marked as busy for occupancy < threshold. If the occupancy
|
||||||
@ -340,7 +348,8 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
|
|||||||
|
|
||||||
entry = __rmid_entry(idx);
|
entry = __rmid_entry(idx);
|
||||||
if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid,
|
if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid,
|
||||||
QOS_L3_OCCUP_EVENT_ID, &val)) {
|
QOS_L3_OCCUP_EVENT_ID, &val,
|
||||||
|
arch_mon_ctx)) {
|
||||||
rmid_dirty = true;
|
rmid_dirty = true;
|
||||||
} else {
|
} else {
|
||||||
rmid_dirty = (val >= resctrl_rmid_realloc_threshold);
|
rmid_dirty = (val >= resctrl_rmid_realloc_threshold);
|
||||||
@ -353,6 +362,8 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
|
|||||||
}
|
}
|
||||||
cur_idx = idx + 1;
|
cur_idx = idx + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool has_busy_rmid(struct rdt_domain *d)
|
bool has_busy_rmid(struct rdt_domain *d)
|
||||||
@ -533,7 +544,7 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, rr->evtid,
|
rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, rr->evtid,
|
||||||
&tval);
|
&tval, rr->arch_mon_ctx);
|
||||||
if (rr->err)
|
if (rr->err)
|
||||||
return rr->err;
|
return rr->err;
|
||||||
|
|
||||||
@ -722,11 +733,27 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d,
|
|||||||
if (is_mbm_total_enabled()) {
|
if (is_mbm_total_enabled()) {
|
||||||
rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID;
|
rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID;
|
||||||
rr.val = 0;
|
rr.val = 0;
|
||||||
|
rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid);
|
||||||
|
if (IS_ERR(rr.arch_mon_ctx)) {
|
||||||
|
pr_warn_ratelimited("Failed to allocate monitor context: %ld",
|
||||||
|
PTR_ERR(rr.arch_mon_ctx));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
__mon_event_count(closid, rmid, &rr);
|
__mon_event_count(closid, rmid, &rr);
|
||||||
|
|
||||||
|
resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
|
||||||
}
|
}
|
||||||
if (is_mbm_local_enabled()) {
|
if (is_mbm_local_enabled()) {
|
||||||
rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
|
rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
|
||||||
rr.val = 0;
|
rr.val = 0;
|
||||||
|
rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid);
|
||||||
|
if (IS_ERR(rr.arch_mon_ctx)) {
|
||||||
|
pr_warn_ratelimited("Failed to allocate monitor context: %ld",
|
||||||
|
PTR_ERR(rr.arch_mon_ctx));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
__mon_event_count(closid, rmid, &rr);
|
__mon_event_count(closid, rmid, &rr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -736,6 +763,8 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d,
|
|||||||
*/
|
*/
|
||||||
if (is_mba_sc(NULL))
|
if (is_mba_sc(NULL))
|
||||||
mbm_bw_count(closid, rmid, &rr);
|
mbm_bw_count(closid, rmid, &rr);
|
||||||
|
|
||||||
|
resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,6 +235,9 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d);
|
|||||||
* @rmid: rmid of the counter to read.
|
* @rmid: rmid of the counter to read.
|
||||||
* @eventid: eventid to read, e.g. L3 occupancy.
|
* @eventid: eventid to read, e.g. L3 occupancy.
|
||||||
* @val: result of the counter read in bytes.
|
* @val: result of the counter read in bytes.
|
||||||
|
* @arch_mon_ctx: An architecture specific value from
|
||||||
|
* resctrl_arch_mon_ctx_alloc(), for MPAM this identifies
|
||||||
|
* the hardware monitor allocated for this read request.
|
||||||
*
|
*
|
||||||
* Some architectures need to sleep when first programming some of the counters.
|
* Some architectures need to sleep when first programming some of the counters.
|
||||||
* (specifically: arm64's MPAM cache occupancy counters can return 'not ready'
|
* (specifically: arm64's MPAM cache occupancy counters can return 'not ready'
|
||||||
@ -248,7 +251,7 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d);
|
|||||||
*/
|
*/
|
||||||
int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
|
int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
|
||||||
u32 closid, u32 rmid, enum resctrl_event_id eventid,
|
u32 closid, u32 rmid, enum resctrl_event_id eventid,
|
||||||
u64 *val);
|
u64 *val, void *arch_mon_ctx);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* resctrl_arch_rmid_read_context_check() - warn about invalid contexts
|
* resctrl_arch_rmid_read_context_check() - warn about invalid contexts
|
||||||
|
Loading…
Reference in New Issue
Block a user