cgroup/cpuset: move relax_domain_level to cpuset-v1.c
Setting domain level is not supported at cpuset v2, so move corresponding code into cpuset-v1.c. The 'cpuset_write_s64' and 'cpuset_read_s64' are only used for setting domain level, move them to cpuset-v1.c. Currently, expose to cpuset.c. After cpuset legacy interface files are move to cpuset-v1.c, they can be static. The 'rebuild_sched_domains_locked' is exposed to cpuset-v1.c. The change from original code is that using 'cpuset_lock' and 'cpuset_unlock' functions to lock or unlock cpuset_mutex. Signed-off-by: Chen Ridong <chenridong@huawei.com> Acked-by: Waiman Long <longman@redhat.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
49434094ef
commit
047b830974
@ -238,11 +238,15 @@ static inline int is_spread_slab(const struct cpuset *cs)
|
||||
return test_bit(CS_SPREAD_SLAB, &cs->flags);
|
||||
}
|
||||
|
||||
void rebuild_sched_domains_locked(void);
|
||||
|
||||
/*
|
||||
* cpuset-v1.c
|
||||
*/
|
||||
|
||||
void fmeter_init(struct fmeter *fmp);
|
||||
int fmeter_getrate(struct fmeter *fmp);
|
||||
int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
s64 val);
|
||||
s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft);
|
||||
|
||||
#endif /* __CPUSET_INTERNAL_H */
|
||||
|
@ -135,3 +135,62 @@ void __cpuset_memory_pressure_bump(void)
|
||||
fmeter_markevent(&task_cs(current)->fmeter);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int update_relax_domain_level(struct cpuset *cs, s64 val)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (val < -1 || val > sched_domain_level_max + 1)
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
||||
if (val != cs->relax_domain_level) {
|
||||
cs->relax_domain_level = val;
|
||||
if (!cpumask_empty(cs->cpus_allowed) &&
|
||||
is_sched_load_balance(cs))
|
||||
rebuild_sched_domains_locked();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
s64 val)
|
||||
{
|
||||
struct cpuset *cs = css_cs(css);
|
||||
cpuset_filetype_t type = cft->private;
|
||||
int retval = -ENODEV;
|
||||
|
||||
cpus_read_lock();
|
||||
cpuset_lock();
|
||||
if (!is_cpuset_online(cs))
|
||||
goto out_unlock;
|
||||
|
||||
switch (type) {
|
||||
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
|
||||
retval = update_relax_domain_level(cs, val);
|
||||
break;
|
||||
default:
|
||||
retval = -EINVAL;
|
||||
break;
|
||||
}
|
||||
out_unlock:
|
||||
cpuset_unlock();
|
||||
cpus_read_unlock();
|
||||
return retval;
|
||||
}
|
||||
|
||||
s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
|
||||
{
|
||||
struct cpuset *cs = css_cs(css);
|
||||
cpuset_filetype_t type = cft->private;
|
||||
|
||||
switch (type) {
|
||||
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
|
||||
return cs->relax_domain_level;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* Unreachable but makes gcc happy */
|
||||
return 0;
|
||||
}
|
||||
|
@ -1075,7 +1075,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
|
||||
*
|
||||
* Call with cpuset_mutex held. Takes cpus_read_lock().
|
||||
*/
|
||||
static void rebuild_sched_domains_locked(void)
|
||||
void rebuild_sched_domains_locked(void)
|
||||
{
|
||||
struct cgroup_subsys_state *pos_css;
|
||||
struct sched_domain_attr *attr;
|
||||
@ -1127,7 +1127,7 @@ static void rebuild_sched_domains_locked(void)
|
||||
partition_and_rebuild_sched_domains(ndoms, doms, attr);
|
||||
}
|
||||
#else /* !CONFIG_SMP */
|
||||
static void rebuild_sched_domains_locked(void)
|
||||
void rebuild_sched_domains_locked(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
@ -2794,23 +2794,6 @@ bool current_cpuset_is_being_rebound(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int update_relax_domain_level(struct cpuset *cs, s64 val)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (val < -1 || val > sched_domain_level_max + 1)
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
||||
if (val != cs->relax_domain_level) {
|
||||
cs->relax_domain_level = val;
|
||||
if (!cpumask_empty(cs->cpus_allowed) &&
|
||||
is_sched_load_balance(cs))
|
||||
rebuild_sched_domains_locked();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* update_tasks_flags - update the spread flags of tasks in the cpuset.
|
||||
* @cs: the cpuset in which each task's spread flags needs to be changed
|
||||
@ -3273,32 +3256,6 @@ out_unlock:
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
|
||||
s64 val)
|
||||
{
|
||||
struct cpuset *cs = css_cs(css);
|
||||
cpuset_filetype_t type = cft->private;
|
||||
int retval = -ENODEV;
|
||||
|
||||
cpus_read_lock();
|
||||
mutex_lock(&cpuset_mutex);
|
||||
if (!is_cpuset_online(cs))
|
||||
goto out_unlock;
|
||||
|
||||
switch (type) {
|
||||
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
|
||||
retval = update_relax_domain_level(cs, val);
|
||||
break;
|
||||
default:
|
||||
retval = -EINVAL;
|
||||
break;
|
||||
}
|
||||
out_unlock:
|
||||
mutex_unlock(&cpuset_mutex);
|
||||
cpus_read_unlock();
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* Common handling for a write to a "cpus" or "mems" file.
|
||||
*/
|
||||
@ -3449,21 +3406,6 @@ static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
|
||||
{
|
||||
struct cpuset *cs = css_cs(css);
|
||||
cpuset_filetype_t type = cft->private;
|
||||
switch (type) {
|
||||
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
|
||||
return cs->relax_domain_level;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* Unreachable but makes gcc happy */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sched_partition_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct cpuset *cs = css_cs(seq_css(seq));
|
||||
|
Loading…
Reference in New Issue
Block a user