cgroup/cpuset: rename functions shared between v1 and v2
Some functions name declared in cpuset-internel.h are generic. To avoid confilicting with other variables for the same name, rename these functions with cpuset_/cpuset1_ prefix to make them unique to cpuset. Signed-off-by: Chen Ridong <chenridong@huawei.com> Acked-by: Waiman Long <longman@redhat.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
b0ced9d378
commit
381b53c3b5
@ -267,11 +267,11 @@ static inline int is_spread_slab(const struct cpuset *cs)
|
|||||||
if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
|
if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
|
||||||
|
|
||||||
void rebuild_sched_domains_locked(void);
|
void rebuild_sched_domains_locked(void);
|
||||||
void callback_lock_irq(void);
|
void cpuset_callback_lock_irq(void);
|
||||||
void callback_unlock_irq(void);
|
void cpuset_callback_unlock_irq(void);
|
||||||
void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus);
|
void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus);
|
||||||
void update_tasks_nodemask(struct cpuset *cs);
|
void cpuset_update_tasks_nodemask(struct cpuset *cs);
|
||||||
int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, int turning_on);
|
int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs, int turning_on);
|
||||||
ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
|
ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
|
||||||
char *buf, size_t nbytes, loff_t off);
|
char *buf, size_t nbytes, loff_t off);
|
||||||
int cpuset_common_seq_show(struct seq_file *sf, void *v);
|
int cpuset_common_seq_show(struct seq_file *sf, void *v);
|
||||||
@ -279,14 +279,14 @@ int cpuset_common_seq_show(struct seq_file *sf, void *v);
|
|||||||
/*
|
/*
|
||||||
* cpuset-v1.c
|
* cpuset-v1.c
|
||||||
*/
|
*/
|
||||||
extern struct cftype legacy_files[];
|
extern struct cftype cpuset1_files[];
|
||||||
void fmeter_init(struct fmeter *fmp);
|
void fmeter_init(struct fmeter *fmp);
|
||||||
void cpuset_update_task_spread_flags(struct cpuset *cs,
|
void cpuset1_update_task_spread_flags(struct cpuset *cs,
|
||||||
struct task_struct *tsk);
|
struct task_struct *tsk);
|
||||||
void update_tasks_flags(struct cpuset *cs);
|
void cpuset1_update_tasks_flags(struct cpuset *cs);
|
||||||
void hotplug_update_tasks_legacy(struct cpuset *cs,
|
void cpuset1_hotplug_update_tasks(struct cpuset *cs,
|
||||||
struct cpumask *new_cpus, nodemask_t *new_mems,
|
struct cpumask *new_cpus, nodemask_t *new_mems,
|
||||||
bool cpus_updated, bool mems_updated);
|
bool cpus_updated, bool mems_updated);
|
||||||
int validate_change_legacy(struct cpuset *cur, struct cpuset *trial);
|
int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial);
|
||||||
|
|
||||||
#endif /* __CPUSET_INTERNAL_H */
|
#endif /* __CPUSET_INTERNAL_H */
|
||||||
|
@ -209,7 +209,7 @@ static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
|
|||||||
* Call with callback_lock or cpuset_mutex held. The check can be skipped
|
* Call with callback_lock or cpuset_mutex held. The check can be skipped
|
||||||
* if on default hierarchy.
|
* if on default hierarchy.
|
||||||
*/
|
*/
|
||||||
void cpuset_update_task_spread_flags(struct cpuset *cs,
|
void cpuset1_update_task_spread_flags(struct cpuset *cs,
|
||||||
struct task_struct *tsk)
|
struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
|
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
|
||||||
@ -227,21 +227,21 @@ void cpuset_update_task_spread_flags(struct cpuset *cs,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* update_tasks_flags - update the spread flags of tasks in the cpuset.
|
* cpuset1_update_tasks_flags - update the spread flags of tasks in the cpuset.
|
||||||
* @cs: the cpuset in which each task's spread flags needs to be changed
|
* @cs: the cpuset in which each task's spread flags needs to be changed
|
||||||
*
|
*
|
||||||
* Iterate through each task of @cs updating its spread flags. As this
|
* Iterate through each task of @cs updating its spread flags. As this
|
||||||
* function is called with cpuset_mutex held, cpuset membership stays
|
* function is called with cpuset_mutex held, cpuset membership stays
|
||||||
* stable.
|
* stable.
|
||||||
*/
|
*/
|
||||||
void update_tasks_flags(struct cpuset *cs)
|
void cpuset1_update_tasks_flags(struct cpuset *cs)
|
||||||
{
|
{
|
||||||
struct css_task_iter it;
|
struct css_task_iter it;
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
|
|
||||||
css_task_iter_start(&cs->css, 0, &it);
|
css_task_iter_start(&cs->css, 0, &it);
|
||||||
while ((task = css_task_iter_next(&it)))
|
while ((task = css_task_iter_next(&it)))
|
||||||
cpuset_update_task_spread_flags(cs, task);
|
cpuset1_update_task_spread_flags(cs, task);
|
||||||
css_task_iter_end(&it);
|
css_task_iter_end(&it);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -282,27 +282,27 @@ static void cpuset_migrate_tasks_workfn(struct work_struct *work)
|
|||||||
kfree(s);
|
kfree(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
void hotplug_update_tasks_legacy(struct cpuset *cs,
|
void cpuset1_hotplug_update_tasks(struct cpuset *cs,
|
||||||
struct cpumask *new_cpus, nodemask_t *new_mems,
|
struct cpumask *new_cpus, nodemask_t *new_mems,
|
||||||
bool cpus_updated, bool mems_updated)
|
bool cpus_updated, bool mems_updated)
|
||||||
{
|
{
|
||||||
bool is_empty;
|
bool is_empty;
|
||||||
|
|
||||||
callback_lock_irq();
|
cpuset_callback_lock_irq();
|
||||||
cpumask_copy(cs->cpus_allowed, new_cpus);
|
cpumask_copy(cs->cpus_allowed, new_cpus);
|
||||||
cpumask_copy(cs->effective_cpus, new_cpus);
|
cpumask_copy(cs->effective_cpus, new_cpus);
|
||||||
cs->mems_allowed = *new_mems;
|
cs->mems_allowed = *new_mems;
|
||||||
cs->effective_mems = *new_mems;
|
cs->effective_mems = *new_mems;
|
||||||
callback_unlock_irq();
|
cpuset_callback_unlock_irq();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
|
* Don't call cpuset_update_tasks_cpumask() if the cpuset becomes empty,
|
||||||
* as the tasks will be migrated to an ancestor.
|
* as the tasks will be migrated to an ancestor.
|
||||||
*/
|
*/
|
||||||
if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
|
if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
|
||||||
update_tasks_cpumask(cs, new_cpus);
|
cpuset_update_tasks_cpumask(cs, new_cpus);
|
||||||
if (mems_updated && !nodes_empty(cs->mems_allowed))
|
if (mems_updated && !nodes_empty(cs->mems_allowed))
|
||||||
update_tasks_nodemask(cs);
|
cpuset_update_tasks_nodemask(cs);
|
||||||
|
|
||||||
is_empty = cpumask_empty(cs->cpus_allowed) ||
|
is_empty = cpumask_empty(cs->cpus_allowed) ||
|
||||||
nodes_empty(cs->mems_allowed);
|
nodes_empty(cs->mems_allowed);
|
||||||
@ -345,10 +345,10 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* validate_change_legacy() - Validate conditions specific to legacy (v1)
|
* cpuset1_validate_change() - Validate conditions specific to legacy (v1)
|
||||||
* behavior.
|
* behavior.
|
||||||
*/
|
*/
|
||||||
int validate_change_legacy(struct cpuset *cur, struct cpuset *trial)
|
int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial)
|
||||||
{
|
{
|
||||||
struct cgroup_subsys_state *css;
|
struct cgroup_subsys_state *css;
|
||||||
struct cpuset *c, *par;
|
struct cpuset *c, *par;
|
||||||
@ -421,28 +421,28 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
|
|||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case FILE_CPU_EXCLUSIVE:
|
case FILE_CPU_EXCLUSIVE:
|
||||||
retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
|
retval = cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, val);
|
||||||
break;
|
break;
|
||||||
case FILE_MEM_EXCLUSIVE:
|
case FILE_MEM_EXCLUSIVE:
|
||||||
retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
|
retval = cpuset_update_flag(CS_MEM_EXCLUSIVE, cs, val);
|
||||||
break;
|
break;
|
||||||
case FILE_MEM_HARDWALL:
|
case FILE_MEM_HARDWALL:
|
||||||
retval = update_flag(CS_MEM_HARDWALL, cs, val);
|
retval = cpuset_update_flag(CS_MEM_HARDWALL, cs, val);
|
||||||
break;
|
break;
|
||||||
case FILE_SCHED_LOAD_BALANCE:
|
case FILE_SCHED_LOAD_BALANCE:
|
||||||
retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
|
retval = cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
|
||||||
break;
|
break;
|
||||||
case FILE_MEMORY_MIGRATE:
|
case FILE_MEMORY_MIGRATE:
|
||||||
retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
|
retval = cpuset_update_flag(CS_MEMORY_MIGRATE, cs, val);
|
||||||
break;
|
break;
|
||||||
case FILE_MEMORY_PRESSURE_ENABLED:
|
case FILE_MEMORY_PRESSURE_ENABLED:
|
||||||
cpuset_memory_pressure_enabled = !!val;
|
cpuset_memory_pressure_enabled = !!val;
|
||||||
break;
|
break;
|
||||||
case FILE_SPREAD_PAGE:
|
case FILE_SPREAD_PAGE:
|
||||||
retval = update_flag(CS_SPREAD_PAGE, cs, val);
|
retval = cpuset_update_flag(CS_SPREAD_PAGE, cs, val);
|
||||||
break;
|
break;
|
||||||
case FILE_SPREAD_SLAB:
|
case FILE_SPREAD_SLAB:
|
||||||
retval = update_flag(CS_SPREAD_SLAB, cs, val);
|
retval = cpuset_update_flag(CS_SPREAD_SLAB, cs, val);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
retval = -EINVAL;
|
retval = -EINVAL;
|
||||||
@ -458,7 +458,7 @@ out_unlock:
|
|||||||
* for the common functions, 'private' gives the type of file
|
* for the common functions, 'private' gives the type of file
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct cftype legacy_files[] = {
|
struct cftype cpuset1_files[] = {
|
||||||
{
|
{
|
||||||
.name = "cpus",
|
.name = "cpus",
|
||||||
.seq_show = cpuset_common_seq_show,
|
.seq_show = cpuset_common_seq_show,
|
||||||
|
@ -239,12 +239,12 @@ void cpuset_unlock(void)
|
|||||||
|
|
||||||
static DEFINE_SPINLOCK(callback_lock);
|
static DEFINE_SPINLOCK(callback_lock);
|
||||||
|
|
||||||
void callback_lock_irq(void)
|
void cpuset_callback_lock_irq(void)
|
||||||
{
|
{
|
||||||
spin_lock_irq(&callback_lock);
|
spin_lock_irq(&callback_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void callback_unlock_irq(void)
|
void cpuset_callback_unlock_irq(void)
|
||||||
{
|
{
|
||||||
spin_unlock_irq(&callback_lock);
|
spin_unlock_irq(&callback_lock);
|
||||||
}
|
}
|
||||||
@ -540,7 +540,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
if (!is_in_v2_mode())
|
if (!is_in_v2_mode())
|
||||||
ret = validate_change_legacy(cur, trial);
|
ret = cpuset1_validate_change(cur, trial);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@ -1053,7 +1053,7 @@ void rebuild_sched_domains(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
|
* cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
|
||||||
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
|
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
|
||||||
* @new_cpus: the temp variable for the new effective_cpus mask
|
* @new_cpus: the temp variable for the new effective_cpus mask
|
||||||
*
|
*
|
||||||
@ -1063,7 +1063,7 @@ void rebuild_sched_domains(void)
|
|||||||
* is used instead of effective_cpus to make sure all offline CPUs are also
|
* is used instead of effective_cpus to make sure all offline CPUs are also
|
||||||
* included as hotplug code won't update cpumasks for tasks in top_cpuset.
|
* included as hotplug code won't update cpumasks for tasks in top_cpuset.
|
||||||
*/
|
*/
|
||||||
void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
|
void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
|
||||||
{
|
{
|
||||||
struct css_task_iter it;
|
struct css_task_iter it;
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
@ -1126,11 +1126,11 @@ static int update_partition_exclusive(struct cpuset *cs, int new_prs)
|
|||||||
bool exclusive = (new_prs > PRS_MEMBER);
|
bool exclusive = (new_prs > PRS_MEMBER);
|
||||||
|
|
||||||
if (exclusive && !is_cpu_exclusive(cs)) {
|
if (exclusive && !is_cpu_exclusive(cs)) {
|
||||||
if (update_flag(CS_CPU_EXCLUSIVE, cs, 1))
|
if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1))
|
||||||
return PERR_NOTEXCL;
|
return PERR_NOTEXCL;
|
||||||
} else if (!exclusive && is_cpu_exclusive(cs)) {
|
} else if (!exclusive && is_cpu_exclusive(cs)) {
|
||||||
/* Turning off CS_CPU_EXCLUSIVE will not return error */
|
/* Turning off CS_CPU_EXCLUSIVE will not return error */
|
||||||
update_flag(CS_CPU_EXCLUSIVE, cs, 0);
|
cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1380,7 +1380,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
|
|||||||
/*
|
/*
|
||||||
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
|
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
|
||||||
*/
|
*/
|
||||||
update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
|
cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
|
||||||
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
|
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1416,7 +1416,7 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
|
|||||||
/*
|
/*
|
||||||
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
|
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
|
||||||
*/
|
*/
|
||||||
update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
|
cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
|
||||||
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
|
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1468,7 +1468,7 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
|
|||||||
/*
|
/*
|
||||||
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
|
* Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
|
||||||
*/
|
*/
|
||||||
update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
|
cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
|
||||||
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
|
update_sibling_cpumasks(&top_cpuset, NULL, tmp);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -1840,7 +1840,7 @@ write_error:
|
|||||||
update_partition_exclusive(cs, new_prs);
|
update_partition_exclusive(cs, new_prs);
|
||||||
|
|
||||||
if (adding || deleting) {
|
if (adding || deleting) {
|
||||||
update_tasks_cpumask(parent, tmp->addmask);
|
cpuset_update_tasks_cpumask(parent, tmp->addmask);
|
||||||
update_sibling_cpumasks(parent, cs, tmp);
|
update_sibling_cpumasks(parent, cs, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2023,7 +2023,7 @@ update_parent_effective:
|
|||||||
/*
|
/*
|
||||||
* update_parent_effective_cpumask() should have been called
|
* update_parent_effective_cpumask() should have been called
|
||||||
* for cs already in update_cpumask(). We should also call
|
* for cs already in update_cpumask(). We should also call
|
||||||
* update_tasks_cpumask() again for tasks in the parent
|
* cpuset_update_tasks_cpumask() again for tasks in the parent
|
||||||
* cpuset if the parent's effective_cpus changes.
|
* cpuset if the parent's effective_cpus changes.
|
||||||
*/
|
*/
|
||||||
if ((cp != cs) && old_prs) {
|
if ((cp != cs) && old_prs) {
|
||||||
@ -2080,7 +2080,7 @@ get_css:
|
|||||||
WARN_ON(!is_in_v2_mode() &&
|
WARN_ON(!is_in_v2_mode() &&
|
||||||
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
|
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
|
||||||
|
|
||||||
update_tasks_cpumask(cp, cp->effective_cpus);
|
cpuset_update_tasks_cpumask(cp, cp->effective_cpus);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
|
* On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
|
||||||
@ -2507,14 +2507,14 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
|
|||||||
static void *cpuset_being_rebound;
|
static void *cpuset_being_rebound;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
|
* cpuset_update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
|
||||||
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
|
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
|
||||||
*
|
*
|
||||||
* Iterate through each task of @cs updating its mems_allowed to the
|
* Iterate through each task of @cs updating its mems_allowed to the
|
||||||
* effective cpuset's. As this function is called with cpuset_mutex held,
|
* effective cpuset's. As this function is called with cpuset_mutex held,
|
||||||
* cpuset membership stays stable.
|
* cpuset membership stays stable.
|
||||||
*/
|
*/
|
||||||
void update_tasks_nodemask(struct cpuset *cs)
|
void cpuset_update_tasks_nodemask(struct cpuset *cs)
|
||||||
{
|
{
|
||||||
static nodemask_t newmems; /* protected by cpuset_mutex */
|
static nodemask_t newmems; /* protected by cpuset_mutex */
|
||||||
struct css_task_iter it;
|
struct css_task_iter it;
|
||||||
@ -2612,7 +2612,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
|
|||||||
WARN_ON(!is_in_v2_mode() &&
|
WARN_ON(!is_in_v2_mode() &&
|
||||||
!nodes_equal(cp->mems_allowed, cp->effective_mems));
|
!nodes_equal(cp->mems_allowed, cp->effective_mems));
|
||||||
|
|
||||||
update_tasks_nodemask(cp);
|
cpuset_update_tasks_nodemask(cp);
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
css_put(&cp->css);
|
css_put(&cp->css);
|
||||||
@ -2699,7 +2699,7 @@ bool current_cpuset_is_being_rebound(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* update_flag - read a 0 or a 1 in a file and update associated flag
|
* cpuset_update_flag - read a 0 or a 1 in a file and update associated flag
|
||||||
* bit: the bit to update (see cpuset_flagbits_t)
|
* bit: the bit to update (see cpuset_flagbits_t)
|
||||||
* cs: the cpuset to update
|
* cs: the cpuset to update
|
||||||
* turning_on: whether the flag is being set or cleared
|
* turning_on: whether the flag is being set or cleared
|
||||||
@ -2707,7 +2707,7 @@ bool current_cpuset_is_being_rebound(void)
|
|||||||
* Call with cpuset_mutex held.
|
* Call with cpuset_mutex held.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
|
int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
|
||||||
int turning_on)
|
int turning_on)
|
||||||
{
|
{
|
||||||
struct cpuset *trialcs;
|
struct cpuset *trialcs;
|
||||||
@ -2743,7 +2743,7 @@ int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
|
|||||||
rebuild_sched_domains_locked();
|
rebuild_sched_domains_locked();
|
||||||
|
|
||||||
if (spread_flag_changed)
|
if (spread_flag_changed)
|
||||||
update_tasks_flags(cs);
|
cpuset1_update_tasks_flags(cs);
|
||||||
out:
|
out:
|
||||||
free_cpuset(trialcs);
|
free_cpuset(trialcs);
|
||||||
return err;
|
return err;
|
||||||
@ -3008,7 +3008,7 @@ static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
|
|||||||
WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
|
WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
|
||||||
|
|
||||||
cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
|
cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
|
||||||
cpuset_update_task_spread_flags(cs, task);
|
cpuset1_update_task_spread_flags(cs, task);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cpuset_attach(struct cgroup_taskset *tset)
|
static void cpuset_attach(struct cgroup_taskset *tset)
|
||||||
@ -3484,7 +3484,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
|
|||||||
|
|
||||||
if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
|
if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
|
||||||
is_sched_load_balance(cs))
|
is_sched_load_balance(cs))
|
||||||
update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
|
cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
|
||||||
|
|
||||||
cpuset_dec();
|
cpuset_dec();
|
||||||
clear_bit(CS_ONLINE, &cs->flags);
|
clear_bit(CS_ONLINE, &cs->flags);
|
||||||
@ -3623,7 +3623,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
|
|||||||
.can_fork = cpuset_can_fork,
|
.can_fork = cpuset_can_fork,
|
||||||
.cancel_fork = cpuset_cancel_fork,
|
.cancel_fork = cpuset_cancel_fork,
|
||||||
.fork = cpuset_fork,
|
.fork = cpuset_fork,
|
||||||
.legacy_cftypes = legacy_files,
|
.legacy_cftypes = cpuset1_files,
|
||||||
.dfl_cftypes = dfl_files,
|
.dfl_cftypes = dfl_files,
|
||||||
.early_init = true,
|
.early_init = true,
|
||||||
.threaded = true,
|
.threaded = true,
|
||||||
@ -3683,9 +3683,9 @@ hotplug_update_tasks(struct cpuset *cs,
|
|||||||
spin_unlock_irq(&callback_lock);
|
spin_unlock_irq(&callback_lock);
|
||||||
|
|
||||||
if (cpus_updated)
|
if (cpus_updated)
|
||||||
update_tasks_cpumask(cs, new_cpus);
|
cpuset_update_tasks_cpumask(cs, new_cpus);
|
||||||
if (mems_updated)
|
if (mems_updated)
|
||||||
update_tasks_nodemask(cs);
|
cpuset_update_tasks_nodemask(cs);
|
||||||
}
|
}
|
||||||
|
|
||||||
void cpuset_force_rebuild(void)
|
void cpuset_force_rebuild(void)
|
||||||
@ -3786,7 +3786,7 @@ update_tasks:
|
|||||||
hotplug_update_tasks(cs, &new_cpus, &new_mems,
|
hotplug_update_tasks(cs, &new_cpus, &new_mems,
|
||||||
cpus_updated, mems_updated);
|
cpus_updated, mems_updated);
|
||||||
else
|
else
|
||||||
hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
|
cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems,
|
||||||
cpus_updated, mems_updated);
|
cpus_updated, mems_updated);
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
@ -3871,7 +3871,7 @@ static void cpuset_handle_hotplug(void)
|
|||||||
top_cpuset.mems_allowed = new_mems;
|
top_cpuset.mems_allowed = new_mems;
|
||||||
top_cpuset.effective_mems = new_mems;
|
top_cpuset.effective_mems = new_mems;
|
||||||
spin_unlock_irq(&callback_lock);
|
spin_unlock_irq(&callback_lock);
|
||||||
update_tasks_nodemask(&top_cpuset);
|
cpuset_update_tasks_nodemask(&top_cpuset);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&cpuset_mutex);
|
mutex_unlock(&cpuset_mutex);
|
||||||
|
Loading…
Reference in New Issue
Block a user