async: Use a dedicated unbound workqueue with raised min_active
Async can schedule a number of interdependent work items. However, since
5797b1c189
("workqueue: Implement system-wide nr_active enforcement for
unbound workqueues"), unbound workqueues have separate min_active which sets
the number of interdependent work items that can be handled. This default
value is 8 which isn't sufficient for async and can lead to stalls during
resume from suspend in some cases.
Let's use a dedicated unbound workqueue with raised min_active.
Link: http://lkml.kernel.org/r/708a65cc-79ec-44a6-8454-a93d0f3114c3@samsung.com
Reported-by: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
8f172181f2
commit
bf52b1ac6a
@ -120,4 +120,5 @@ extern void async_synchronize_cookie(async_cookie_t cookie);
|
|||||||
extern void async_synchronize_cookie_domain(async_cookie_t cookie,
|
extern void async_synchronize_cookie_domain(async_cookie_t cookie,
|
||||||
struct async_domain *domain);
|
struct async_domain *domain);
|
||||||
extern bool current_is_async(void);
|
extern bool current_is_async(void);
|
||||||
|
extern void async_init(void);
|
||||||
#endif
|
#endif
|
||||||
|
@ -1545,6 +1545,7 @@ static noinline void __init kernel_init_freeable(void)
|
|||||||
sched_init_smp();
|
sched_init_smp();
|
||||||
|
|
||||||
workqueue_init_topology();
|
workqueue_init_topology();
|
||||||
|
async_init();
|
||||||
padata_init();
|
padata_init();
|
||||||
page_alloc_init_late();
|
page_alloc_init_late();
|
||||||
|
|
||||||
|
@ -64,6 +64,7 @@ static async_cookie_t next_cookie = 1;
|
|||||||
static LIST_HEAD(async_global_pending); /* pending from all registered doms */
|
static LIST_HEAD(async_global_pending); /* pending from all registered doms */
|
||||||
static ASYNC_DOMAIN(async_dfl_domain);
|
static ASYNC_DOMAIN(async_dfl_domain);
|
||||||
static DEFINE_SPINLOCK(async_lock);
|
static DEFINE_SPINLOCK(async_lock);
|
||||||
|
static struct workqueue_struct *async_wq;
|
||||||
|
|
||||||
struct async_entry {
|
struct async_entry {
|
||||||
struct list_head domain_list;
|
struct list_head domain_list;
|
||||||
@ -174,7 +175,7 @@ static async_cookie_t __async_schedule_node_domain(async_func_t func,
|
|||||||
spin_unlock_irqrestore(&async_lock, flags);
|
spin_unlock_irqrestore(&async_lock, flags);
|
||||||
|
|
||||||
/* schedule for execution */
|
/* schedule for execution */
|
||||||
queue_work_node(node, system_unbound_wq, &entry->work);
|
queue_work_node(node, async_wq, &entry->work);
|
||||||
|
|
||||||
return newcookie;
|
return newcookie;
|
||||||
}
|
}
|
||||||
@ -345,3 +346,17 @@ bool current_is_async(void)
|
|||||||
return worker && worker->current_func == async_run_entry_fn;
|
return worker && worker->current_func == async_run_entry_fn;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(current_is_async);
|
EXPORT_SYMBOL_GPL(current_is_async);
|
||||||
|
|
||||||
|
void __init async_init(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Async can schedule a number of interdependent work items. However,
|
||||||
|
* unbound workqueues can handle only upto min_active interdependent
|
||||||
|
* work items. The default min_active of 8 isn't sufficient for async
|
||||||
|
* and can lead to stalls. Let's use a dedicated workqueue with raised
|
||||||
|
* min_active.
|
||||||
|
*/
|
||||||
|
async_wq = alloc_workqueue("async", WQ_UNBOUND, 0);
|
||||||
|
BUG_ON(!async_wq);
|
||||||
|
workqueue_set_min_active(async_wq, WQ_DFL_ACTIVE);
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user