block: update cached timestamp post schedule/preemption
Mark the task as having a cached timestamp when set assign it, so we can efficiently check if it needs updating post being scheduled back in. This covers both the actual schedule out case, which would've flushed the plug, and the preemption case which doesn't touch the plugged requests (for many reasons, one of them being then we'd need to have preemption disabled around plug state manipulation). Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
da4c8c3d09
commit
06b23f92af
@ -1183,6 +1183,8 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
|
||||
*/
|
||||
if (unlikely(!rq_list_empty(plug->cached_rq)))
|
||||
blk_mq_free_plug_rqs(plug);
|
||||
|
||||
current->flags &= ~PF_BLOCK_TS;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -529,8 +529,10 @@ static inline u64 blk_time_get_ns(void)
|
||||
* a valid timestamp" separately, just accept that we'll do an extra
|
||||
* ktime_get_ns() if we just happen to get 0 as the current time.
|
||||
*/
|
||||
if (!plug->cur_ktime)
|
||||
if (!plug->cur_ktime) {
|
||||
plug->cur_ktime = ktime_get_ns();
|
||||
current->flags |= PF_BLOCK_TS;
|
||||
}
|
||||
return plug->cur_ktime;
|
||||
}
|
||||
|
||||
|
@ -973,6 +973,18 @@ static inline void blk_flush_plug(struct blk_plug *plug, bool async)
|
||||
__blk_flush_plug(plug, async);
|
||||
}
|
||||
|
||||
/*
|
||||
* tsk == current here
|
||||
*/
|
||||
static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
|
||||
{
|
||||
struct blk_plug *plug = tsk->plug;
|
||||
|
||||
if (plug)
|
||||
plug->cur_ktime = 0;
|
||||
current->flags &= ~PF_BLOCK_TS;
|
||||
}
|
||||
|
||||
int blkdev_issue_flush(struct block_device *bdev);
|
||||
long nr_blockdev_pages(void);
|
||||
#else /* CONFIG_BLOCK */
|
||||
@ -996,6 +1008,10 @@ static inline void blk_flush_plug(struct blk_plug *plug, bool async)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int blkdev_issue_flush(struct block_device *bdev)
|
||||
{
|
||||
return 0;
|
||||
|
@ -1642,7 +1642,7 @@ extern struct pid *cad_pid;
|
||||
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
|
||||
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
|
||||
#define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */
|
||||
#define PF__HOLE__20000000 0x20000000
|
||||
#define PF_BLOCK_TS 0x20000000 /* plug has ts that needs updating */
|
||||
#define PF__HOLE__40000000 0x40000000
|
||||
#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
|
||||
|
||||
|
@ -6787,10 +6787,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
|
||||
|
||||
static void sched_update_worker(struct task_struct *tsk)
|
||||
{
|
||||
if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
|
||||
if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER | PF_BLOCK_TS)) {
|
||||
if (tsk->flags & PF_BLOCK_TS)
|
||||
blk_plug_invalidate_ts(tsk);
|
||||
if (tsk->flags & PF_WQ_WORKER)
|
||||
wq_worker_running(tsk);
|
||||
else
|
||||
else if (tsk->flags & PF_IO_WORKER)
|
||||
io_wq_worker_running(tsk);
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user