sched_ext: Use shorter slice while bypassing
While bypassing, tasks are scheduled in FIFO order which favors tasks that hog CPUs. This can slow down e.g. unloading of the BPF scheduler. While bypassing, guaranteeing timely forward progress is the main goal. There's no point in giving long slices. Shorten the time slice used while bypassing from 20ms to 5ms. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: David Vernet <void@manifault.com>
This commit is contained in:
parent
b7b3b2dbae
commit
6f34d8d382
@ -9,6 +9,7 @@
|
||||
#define SCX_OP_IDX(op) (offsetof(struct sched_ext_ops, op) / sizeof(void (*)(void)))
|
||||
|
||||
enum scx_consts {
|
||||
SCX_SLICE_BYPASS = SCX_SLICE_DFL / 4,
|
||||
SCX_DSP_DFL_MAX_BATCH = 32,
|
||||
SCX_DSP_MAX_LOOPS = 32,
|
||||
SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ,
|
||||
@ -1944,6 +1945,7 @@ static bool scx_rq_online(struct rq *rq)
|
||||
static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
|
||||
int sticky_cpu)
|
||||
{
|
||||
bool bypassing = scx_rq_bypassing(rq);
|
||||
struct task_struct **ddsp_taskp;
|
||||
unsigned long qseq;
|
||||
|
||||
@ -1961,7 +1963,7 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
|
||||
if (!scx_rq_online(rq))
|
||||
goto local;
|
||||
|
||||
if (scx_rq_bypassing(rq))
|
||||
if (bypassing)
|
||||
goto global;
|
||||
|
||||
if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID)
|
||||
@ -2016,7 +2018,7 @@ local_norefill:
|
||||
|
||||
global:
|
||||
touch_core_sched(rq, p); /* see the comment in local: */
|
||||
p->scx.slice = SCX_SLICE_DFL;
|
||||
p->scx.slice = bypassing ? SCX_SLICE_BYPASS : SCX_SLICE_DFL;
|
||||
dispatch_enqueue(find_global_dsq(p), p, enq_flags);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user