1

cgroup/cpuset: remove use_parent_ecpus of cpuset

use_parent_ecpus is used to track whether the children are using the
parent's effective_cpus. When a parent's effective_cpus is changed
due to changes in a child partition's effective_xcpus, any child
using parent'effective_cpus must call update_cpumasks_hier. However,
if a child is not a valid partition, it is sufficient to determine
whether to call update_cpumasks_hier based on whether the child's
effective_cpus is going to change. To make the code more succinct,
it is suggested to remove use_parent_ecpus.

Signed-off-by: Chen Ridong <chenridong@huawei.com>
Reviewed-by: Waiman Long <longman@redhat.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
Chen Ridong 2024-08-20 03:01:26 +00:00 committed by Tejun Heo
parent 9414f68d45
commit 3c2acae888

View File

@ -185,12 +185,6 @@ struct cpuset {
/* partition root state */ /* partition root state */
int partition_root_state; int partition_root_state;
/*
* Default hierarchy only:
* use_parent_ecpus - set if using parent's effective_cpus
*/
int use_parent_ecpus;
/* /*
* number of SCHED_DEADLINE tasks attached to this cpuset, so that we * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
* know when to rebuild associated root domain bandwidth information. * know when to rebuild associated root domain bandwidth information.
@ -1505,11 +1499,8 @@ static void reset_partition_data(struct cpuset *cs)
if (is_cpu_exclusive(cs)) if (is_cpu_exclusive(cs))
clear_bit(CS_CPU_EXCLUSIVE, &cs->flags); clear_bit(CS_CPU_EXCLUSIVE, &cs->flags);
} }
if (!cpumask_and(cs->effective_cpus, if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed))
parent->effective_cpus, cs->cpus_allowed)) {
cs->use_parent_ecpus = true;
cpumask_copy(cs->effective_cpus, parent->effective_cpus); cpumask_copy(cs->effective_cpus, parent->effective_cpus);
}
} }
/* /*
@ -1683,8 +1674,6 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
spin_lock_irq(&callback_lock); spin_lock_irq(&callback_lock);
isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus); isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
list_add(&cs->remote_sibling, &remote_children); list_add(&cs->remote_sibling, &remote_children);
if (cs->use_parent_ecpus)
cs->use_parent_ecpus = false;
spin_unlock_irq(&callback_lock); spin_unlock_irq(&callback_lock);
update_unbound_workqueue_cpumask(isolcpus_updated); update_unbound_workqueue_cpumask(isolcpus_updated);
@ -2309,13 +2298,8 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
* it is a partition root that has explicitly distributed * it is a partition root that has explicitly distributed
* out all its CPUs. * out all its CPUs.
*/ */
if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus)) { if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus))
cpumask_copy(tmp->new_cpus, parent->effective_cpus); cpumask_copy(tmp->new_cpus, parent->effective_cpus);
if (!cp->use_parent_ecpus)
cp->use_parent_ecpus = true;
} else if (cp->use_parent_ecpus) {
cp->use_parent_ecpus = false;
}
if (remote) if (remote)
goto get_css; goto get_css;
@ -2452,8 +2436,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
* Check all its siblings and call update_cpumasks_hier() * Check all its siblings and call update_cpumasks_hier()
* if their effective_cpus will need to be changed. * if their effective_cpus will need to be changed.
* *
* With the addition of effective_xcpus which is a subset of * It is possible a change in parent's effective_cpus
* cpus_allowed. It is possible a change in parent's effective_cpus
* due to a change in a child partition's effective_xcpus will impact * due to a change in a child partition's effective_xcpus will impact
* its siblings even if they do not inherit parent's effective_cpus * its siblings even if they do not inherit parent's effective_cpus
* directly. * directly.
@ -2467,8 +2450,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
cpuset_for_each_child(sibling, pos_css, parent) { cpuset_for_each_child(sibling, pos_css, parent) {
if (sibling == cs) if (sibling == cs)
continue; continue;
if (!sibling->use_parent_ecpus && if (!is_partition_valid(sibling)) {
!is_partition_valid(sibling)) {
compute_effective_cpumask(tmp->new_cpus, sibling, compute_effective_cpumask(tmp->new_cpus, sibling,
parent); parent);
if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus)) if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus))
@ -4128,7 +4110,6 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
if (is_in_v2_mode()) { if (is_in_v2_mode()) {
cpumask_copy(cs->effective_cpus, parent->effective_cpus); cpumask_copy(cs->effective_cpus, parent->effective_cpus);
cs->effective_mems = parent->effective_mems; cs->effective_mems = parent->effective_mems;
cs->use_parent_ecpus = true;
} }
spin_unlock_irq(&callback_lock); spin_unlock_irq(&callback_lock);
@ -4194,9 +4175,6 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
is_sched_load_balance(cs)) is_sched_load_balance(cs))
update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
if (cs->use_parent_ecpus)
cs->use_parent_ecpus = false;
cpuset_dec(); cpuset_dec();
clear_bit(CS_ONLINE, &cs->flags); clear_bit(CS_ONLINE, &cs->flags);