cgroup/cpuset: move validate_change_legacy to cpuset-v1.c
The validate_change_legacy functions is used for v1, move it to cpuset-v1.c. And two micro 'cpuset_for_each_child' and 'cpuset_for_each_descendant_pre' are common for v1 and v2, move them to cpuset-internal.h. Signed-off-by: Chen Ridong <chenridong@huawei.com> Acked-by: Waiman Long <longman@redhat.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
23ca5237e3
commit
be126b5b1b
@ -238,6 +238,34 @@ static inline int is_spread_slab(const struct cpuset *cs)
|
||||
return test_bit(CS_SPREAD_SLAB, &cs->flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuset_for_each_child - traverse online children of a cpuset
|
||||
* @child_cs: loop cursor pointing to the current child
|
||||
* @pos_css: used for iteration
|
||||
* @parent_cs: target cpuset to walk children of
|
||||
*
|
||||
* Walk @child_cs through the online children of @parent_cs. Must be used
|
||||
* with RCU read locked.
|
||||
*/
|
||||
#define cpuset_for_each_child(child_cs, pos_css, parent_cs) \
|
||||
css_for_each_child((pos_css), &(parent_cs)->css) \
|
||||
if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
|
||||
|
||||
/**
|
||||
* cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
|
||||
* @des_cs: loop cursor pointing to the current descendant
|
||||
* @pos_css: used for iteration
|
||||
* @root_cs: target cpuset to walk ancestor of
|
||||
*
|
||||
* Walk @des_cs through the online descendants of @root_cs. Must be used
|
||||
* with RCU read locked. The caller may modify @pos_css by calling
|
||||
* css_rightmost_descendant() to skip subtree. @root_cs is included in the
|
||||
* iteration and the first node to be visited.
|
||||
*/
|
||||
#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
|
||||
css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
|
||||
if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
|
||||
|
||||
void rebuild_sched_domains_locked(void);
|
||||
void callback_lock_irq(void);
|
||||
void callback_unlock_irq(void);
|
||||
@ -258,5 +286,6 @@ void update_tasks_flags(struct cpuset *cs);
|
||||
void hotplug_update_tasks_legacy(struct cpuset *cs,
|
||||
struct cpumask *new_cpus, nodemask_t *new_mems,
|
||||
bool cpus_updated, bool mems_updated);
|
||||
int validate_change_legacy(struct cpuset *cur, struct cpuset *trial);
|
||||
|
||||
#endif /* __CPUSET_INTERNAL_H */
|
||||
|
@ -327,3 +327,48 @@ void hotplug_update_tasks_legacy(struct cpuset *cs,
|
||||
schedule_work(&s->work);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
|
||||
*
|
||||
* One cpuset is a subset of another if all its allowed CPUs and
|
||||
* Memory Nodes are a subset of the other, and its exclusive flags
|
||||
* are only set if the other's are set. Call holding cpuset_mutex.
|
||||
*/
|
||||
|
||||
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
|
||||
{
|
||||
return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
|
||||
nodes_subset(p->mems_allowed, q->mems_allowed) &&
|
||||
is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
|
||||
is_mem_exclusive(p) <= is_mem_exclusive(q);
|
||||
}
|
||||
|
||||
/*
|
||||
* validate_change_legacy() - Validate conditions specific to legacy (v1)
|
||||
* behavior.
|
||||
*/
|
||||
int validate_change_legacy(struct cpuset *cur, struct cpuset *trial)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
struct cpuset *c, *par;
|
||||
int ret;
|
||||
|
||||
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||
|
||||
/* Each of our child cpusets must be a subset of us */
|
||||
ret = -EBUSY;
|
||||
cpuset_for_each_child(c, css, cur)
|
||||
if (!is_cpuset_subset(c, trial))
|
||||
goto out;
|
||||
|
||||
/* On legacy hierarchy, we must be a subset of our parent cpuset. */
|
||||
ret = -EACCES;
|
||||
par = parent_cs(cur);
|
||||
if (par && !is_cpuset_subset(trial, par))
|
||||
goto out;
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
@ -186,34 +186,6 @@ static struct cpuset top_cpuset = {
|
||||
.remote_sibling = LIST_HEAD_INIT(top_cpuset.remote_sibling),
|
||||
};
|
||||
|
||||
/**
|
||||
* cpuset_for_each_child - traverse online children of a cpuset
|
||||
* @child_cs: loop cursor pointing to the current child
|
||||
* @pos_css: used for iteration
|
||||
* @parent_cs: target cpuset to walk children of
|
||||
*
|
||||
* Walk @child_cs through the online children of @parent_cs. Must be used
|
||||
* with RCU read locked.
|
||||
*/
|
||||
#define cpuset_for_each_child(child_cs, pos_css, parent_cs) \
|
||||
css_for_each_child((pos_css), &(parent_cs)->css) \
|
||||
if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
|
||||
|
||||
/**
|
||||
* cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
|
||||
* @des_cs: loop cursor pointing to the current descendant
|
||||
* @pos_css: used for iteration
|
||||
* @root_cs: target cpuset to walk ancestor of
|
||||
*
|
||||
* Walk @des_cs through the online descendants of @root_cs. Must be used
|
||||
* with RCU read locked. The caller may modify @pos_css by calling
|
||||
* css_rightmost_descendant() to skip subtree. @root_cs is included in the
|
||||
* iteration and the first node to be visited.
|
||||
*/
|
||||
#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
|
||||
css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
|
||||
if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
|
||||
|
||||
/*
|
||||
* There are two global locks guarding cpuset structures - cpuset_mutex and
|
||||
* callback_lock. We also require taking task_lock() when dereferencing a
|
||||
@ -409,22 +381,6 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
|
||||
nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
|
||||
}
|
||||
|
||||
/*
|
||||
* is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
|
||||
*
|
||||
* One cpuset is a subset of another if all its allowed CPUs and
|
||||
* Memory Nodes are a subset of the other, and its exclusive flags
|
||||
* are only set if the other's are set. Call holding cpuset_mutex.
|
||||
*/
|
||||
|
||||
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
|
||||
{
|
||||
return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
|
||||
nodes_subset(p->mems_allowed, q->mems_allowed) &&
|
||||
is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
|
||||
is_mem_exclusive(p) <= is_mem_exclusive(q);
|
||||
}
|
||||
|
||||
/**
|
||||
* alloc_cpumasks - allocate three cpumasks for cpuset
|
||||
* @cs: the cpuset that have cpumasks to be allocated.
|
||||
@ -555,35 +511,6 @@ static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* validate_change_legacy() - Validate conditions specific to legacy (v1)
|
||||
* behavior.
|
||||
*/
|
||||
static int validate_change_legacy(struct cpuset *cur, struct cpuset *trial)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
struct cpuset *c, *par;
|
||||
int ret;
|
||||
|
||||
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||
|
||||
/* Each of our child cpusets must be a subset of us */
|
||||
ret = -EBUSY;
|
||||
cpuset_for_each_child(c, css, cur)
|
||||
if (!is_cpuset_subset(c, trial))
|
||||
goto out;
|
||||
|
||||
/* On legacy hierarchy, we must be a subset of our parent cpuset. */
|
||||
ret = -EACCES;
|
||||
par = parent_cs(cur);
|
||||
if (par && !is_cpuset_subset(trial, par))
|
||||
goto out;
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* validate_change() - Used to validate that any proposed cpuset change
|
||||
* follows the structural rules for cpusets.
|
||||
|
Loading…
Reference in New Issue
Block a user