1

sched/uclamg: Handle delayed dequeue

Delayed dequeue has tasks sit around on the runqueue that are not
actually runnable -- specifically, they will be dequeued the moment
they get picked.

One side-effect is that such a task can get migrated, which leads to a
'nested' dequeue_task() scenario that messes up uclamp if we don't
take care.

Notably, dequeue_task(DEQUEUE_SLEEP) can 'fail' and keep the task on
the runqueue. This however will have removed the task from uclamp --
per uclamp_rq_dec() in dequeue_task(). So far so good.

However, if at that point the task gets migrated -- or nice adjusted
or any of a myriad of operations that does a dequeue-enqueue cycle --
we'll pass through dequeue_task()/enqueue_task() again. Without
modification this will lead to a double decrement for uclamp, which is
wrong.

Reported-by: Luis Machado <luis.machado@arm.com>
Reported-by: Hongyan Xia <hongyan.xia2@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Valentin Schneider <vschneid@redhat.com>
Tested-by: Valentin Schneider <vschneid@redhat.com>
Link: https://lkml.kernel.org/r/20240727105029.315205425@infradead.org
This commit is contained in:
Peter Zijlstra 2024-06-05 12:09:11 +02:00
parent abc158c82a
commit dfa0a574cb

View File

@ -1691,6 +1691,9 @@ static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
if (unlikely(!p->sched_class->uclamp_enabled)) if (unlikely(!p->sched_class->uclamp_enabled))
return; return;
if (p->se.sched_delayed)
return;
for_each_clamp_id(clamp_id) for_each_clamp_id(clamp_id)
uclamp_rq_inc_id(rq, p, clamp_id); uclamp_rq_inc_id(rq, p, clamp_id);
@ -1715,6 +1718,9 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
if (unlikely(!p->sched_class->uclamp_enabled)) if (unlikely(!p->sched_class->uclamp_enabled))
return; return;
if (p->se.sched_delayed)
return;
for_each_clamp_id(clamp_id) for_each_clamp_id(clamp_id)
uclamp_rq_dec_id(rq, p, clamp_id); uclamp_rq_dec_id(rq, p, clamp_id);
} }
@ -1994,8 +2000,12 @@ void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED)); psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED));
} }
uclamp_rq_inc(rq, p);
p->sched_class->enqueue_task(rq, p, flags); p->sched_class->enqueue_task(rq, p, flags);
/*
* Must be after ->enqueue_task() because ENQUEUE_DELAYED can clear
* ->sched_delayed.
*/
uclamp_rq_inc(rq, p);
if (sched_core_enabled(rq)) if (sched_core_enabled(rq))
sched_core_enqueue(rq, p); sched_core_enqueue(rq, p);
@ -2017,6 +2027,10 @@ inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
psi_dequeue(p, flags & DEQUEUE_SLEEP); psi_dequeue(p, flags & DEQUEUE_SLEEP);
} }
/*
* Must be before ->dequeue_task() because ->dequeue_task() can 'fail'
* and mark the task ->sched_delayed.
*/
uclamp_rq_dec(rq, p); uclamp_rq_dec(rq, p);
return p->sched_class->dequeue_task(rq, p, flags); return p->sched_class->dequeue_task(rq, p, flags);
} }