workqueue: Changes for v6.12
Nothing major: - workqueue.panic_on_stall boot param added. - alloc_workqueue_lockdep_map() added (used by DRM). - Other cleanusp and doc updates. -----BEGIN PGP SIGNATURE----- iIQEABYKACwWIQTfIjM1kS57o3GsC/uxYfJx3gVYGQUCZuN3gQ4cdGpAa2VybmVs Lm9yZwAKCRCxYfJx3gVYGR1hAP0XObdExeNsVWe1JUUUX061+H+aA6aVffb9+J/t b32u3QEAsn+oNWzuvzlGlSQKQMpPk+dT2na0Q0yZNxkNEzUiEQQ= =TeDS -----END PGP SIGNATURE----- Merge tag 'wq-for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq Pull workqueue updates from Tejun Heo: "Nothing major: - workqueue.panic_on_stall boot param added - alloc_workqueue_lockdep_map() added (used by DRM) - Other cleanusp and doc updates" * tag 'wq-for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: kernel/workqueue.c: fix DEFINE_PER_CPU_SHARED_ALIGNED expansion workqueue: Fix another htmldocs build warning workqueue: fix null-ptr-deref on __alloc_workqueue() error workqueue: Don't call va_start / va_end twice workqueue: Fix htmldocs build warning workqueue: Add interface for user-defined workqueue lockdep map workqueue: Change workqueue lockdep map to pointer workqueue: Split alloc_workqueue into internal function and lockdep init Documentation: kernel-parameters: add workqueue.panic_on_stall workqueue: add cmdline parameter workqueue.panic_on_stall
This commit is contained in:
commit
85a77db95a
@ -7386,6 +7386,13 @@
|
|||||||
it can be updated at runtime by writing to the
|
it can be updated at runtime by writing to the
|
||||||
corresponding sysfs file.
|
corresponding sysfs file.
|
||||||
|
|
||||||
|
workqueue.panic_on_stall=<uint>
|
||||||
|
Panic when workqueue stall is detected by
|
||||||
|
CONFIG_WQ_WATCHDOG. It sets the number times of the
|
||||||
|
stall to trigger panic.
|
||||||
|
|
||||||
|
The default is 0, which disables the panic on stall.
|
||||||
|
|
||||||
workqueue.cpu_intensive_thresh_us=
|
workqueue.cpu_intensive_thresh_us=
|
||||||
Per-cpu work items which run for longer than this
|
Per-cpu work items which run for longer than this
|
||||||
threshold are automatically considered CPU intensive
|
threshold are automatically considered CPU intensive
|
||||||
|
@ -507,6 +507,47 @@ void workqueue_softirq_dead(unsigned int cpu);
|
|||||||
__printf(1, 4) struct workqueue_struct *
|
__printf(1, 4) struct workqueue_struct *
|
||||||
alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
|
alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
|
||||||
|
|
||||||
|
#ifdef CONFIG_LOCKDEP
|
||||||
|
/**
|
||||||
|
* alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map
|
||||||
|
* @fmt: printf format for the name of the workqueue
|
||||||
|
* @flags: WQ_* flags
|
||||||
|
* @max_active: max in-flight work items, 0 for default
|
||||||
|
* @lockdep_map: user-defined lockdep_map
|
||||||
|
* @...: args for @fmt
|
||||||
|
*
|
||||||
|
* Same as alloc_workqueue but with the a user-define lockdep_map. Useful for
|
||||||
|
* workqueues created with the same purpose and to avoid leaking a lockdep_map
|
||||||
|
* on each workqueue creation.
|
||||||
|
*
|
||||||
|
* RETURNS:
|
||||||
|
* Pointer to the allocated workqueue on success, %NULL on failure.
|
||||||
|
*/
|
||||||
|
__printf(1, 5) struct workqueue_struct *
|
||||||
|
alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active,
|
||||||
|
struct lockdep_map *lockdep_map, ...);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* alloc_ordered_workqueue_lockdep_map - allocate an ordered workqueue with
|
||||||
|
* user-defined lockdep_map
|
||||||
|
*
|
||||||
|
* @fmt: printf format for the name of the workqueue
|
||||||
|
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
|
||||||
|
* @lockdep_map: user-defined lockdep_map
|
||||||
|
* @args: args for @fmt
|
||||||
|
*
|
||||||
|
* Same as alloc_ordered_workqueue but with the a user-define lockdep_map.
|
||||||
|
* Useful for workqueues created with the same purpose and to avoid leaking a
|
||||||
|
* lockdep_map on each workqueue creation.
|
||||||
|
*
|
||||||
|
* RETURNS:
|
||||||
|
* Pointer to the allocated workqueue on success, %NULL on failure.
|
||||||
|
*/
|
||||||
|
#define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \
|
||||||
|
alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), \
|
||||||
|
1, lockdep_map, ##args)
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* alloc_ordered_workqueue - allocate an ordered workqueue
|
* alloc_ordered_workqueue - allocate an ordered workqueue
|
||||||
* @fmt: printf format for the name of the workqueue
|
* @fmt: printf format for the name of the workqueue
|
||||||
|
@ -364,7 +364,8 @@ struct workqueue_struct {
|
|||||||
#ifdef CONFIG_LOCKDEP
|
#ifdef CONFIG_LOCKDEP
|
||||||
char *lock_name;
|
char *lock_name;
|
||||||
struct lock_class_key key;
|
struct lock_class_key key;
|
||||||
struct lockdep_map lockdep_map;
|
struct lockdep_map __lockdep_map;
|
||||||
|
struct lockdep_map *lockdep_map;
|
||||||
#endif
|
#endif
|
||||||
char name[WQ_NAME_LEN]; /* I: workqueue name */
|
char name[WQ_NAME_LEN]; /* I: workqueue name */
|
||||||
|
|
||||||
@ -476,16 +477,13 @@ static bool wq_debug_force_rr_cpu = false;
|
|||||||
module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
|
module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
|
||||||
|
|
||||||
/* to raise softirq for the BH worker pools on other CPUs */
|
/* to raise softirq for the BH worker pools on other CPUs */
|
||||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_work [NR_STD_WORKER_POOLS],
|
static DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_work [NR_STD_WORKER_POOLS], bh_pool_irq_works);
|
||||||
bh_pool_irq_works);
|
|
||||||
|
|
||||||
/* the BH worker pools */
|
/* the BH worker pools */
|
||||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
|
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], bh_worker_pools);
|
||||||
bh_worker_pools);
|
|
||||||
|
|
||||||
/* the per-cpu worker pools */
|
/* the per-cpu worker pools */
|
||||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
|
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
|
||||||
cpu_worker_pools);
|
|
||||||
|
|
||||||
static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
|
static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
|
||||||
|
|
||||||
@ -3203,7 +3201,7 @@ __acquires(&pool->lock)
|
|||||||
lockdep_start_depth = lockdep_depth(current);
|
lockdep_start_depth = lockdep_depth(current);
|
||||||
/* see drain_dead_softirq_workfn() */
|
/* see drain_dead_softirq_workfn() */
|
||||||
if (!bh_draining)
|
if (!bh_draining)
|
||||||
lock_map_acquire(&pwq->wq->lockdep_map);
|
lock_map_acquire(pwq->wq->lockdep_map);
|
||||||
lock_map_acquire(&lockdep_map);
|
lock_map_acquire(&lockdep_map);
|
||||||
/*
|
/*
|
||||||
* Strictly speaking we should mark the invariant state without holding
|
* Strictly speaking we should mark the invariant state without holding
|
||||||
@ -3237,7 +3235,7 @@ __acquires(&pool->lock)
|
|||||||
pwq->stats[PWQ_STAT_COMPLETED]++;
|
pwq->stats[PWQ_STAT_COMPLETED]++;
|
||||||
lock_map_release(&lockdep_map);
|
lock_map_release(&lockdep_map);
|
||||||
if (!bh_draining)
|
if (!bh_draining)
|
||||||
lock_map_release(&pwq->wq->lockdep_map);
|
lock_map_release(pwq->wq->lockdep_map);
|
||||||
|
|
||||||
if (unlikely((worker->task && in_atomic()) ||
|
if (unlikely((worker->task && in_atomic()) ||
|
||||||
lockdep_depth(current) != lockdep_start_depth ||
|
lockdep_depth(current) != lockdep_start_depth ||
|
||||||
@ -3873,11 +3871,14 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
|
|||||||
static void touch_wq_lockdep_map(struct workqueue_struct *wq)
|
static void touch_wq_lockdep_map(struct workqueue_struct *wq)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_LOCKDEP
|
#ifdef CONFIG_LOCKDEP
|
||||||
|
if (unlikely(!wq->lockdep_map))
|
||||||
|
return;
|
||||||
|
|
||||||
if (wq->flags & WQ_BH)
|
if (wq->flags & WQ_BH)
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
|
|
||||||
lock_map_acquire(&wq->lockdep_map);
|
lock_map_acquire(wq->lockdep_map);
|
||||||
lock_map_release(&wq->lockdep_map);
|
lock_map_release(wq->lockdep_map);
|
||||||
|
|
||||||
if (wq->flags & WQ_BH)
|
if (wq->flags & WQ_BH)
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
@ -3911,7 +3912,7 @@ void __flush_workqueue(struct workqueue_struct *wq)
|
|||||||
struct wq_flusher this_flusher = {
|
struct wq_flusher this_flusher = {
|
||||||
.list = LIST_HEAD_INIT(this_flusher.list),
|
.list = LIST_HEAD_INIT(this_flusher.list),
|
||||||
.flush_color = -1,
|
.flush_color = -1,
|
||||||
.done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
|
.done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, (*wq->lockdep_map)),
|
||||||
};
|
};
|
||||||
int next_color;
|
int next_color;
|
||||||
|
|
||||||
@ -4776,16 +4777,23 @@ static void wq_init_lockdep(struct workqueue_struct *wq)
|
|||||||
lock_name = wq->name;
|
lock_name = wq->name;
|
||||||
|
|
||||||
wq->lock_name = lock_name;
|
wq->lock_name = lock_name;
|
||||||
lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
|
wq->lockdep_map = &wq->__lockdep_map;
|
||||||
|
lockdep_init_map(wq->lockdep_map, lock_name, &wq->key, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void wq_unregister_lockdep(struct workqueue_struct *wq)
|
static void wq_unregister_lockdep(struct workqueue_struct *wq)
|
||||||
{
|
{
|
||||||
|
if (wq->lockdep_map != &wq->__lockdep_map)
|
||||||
|
return;
|
||||||
|
|
||||||
lockdep_unregister_key(&wq->key);
|
lockdep_unregister_key(&wq->key);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void wq_free_lockdep(struct workqueue_struct *wq)
|
static void wq_free_lockdep(struct workqueue_struct *wq)
|
||||||
{
|
{
|
||||||
|
if (wq->lockdep_map != &wq->__lockdep_map)
|
||||||
|
return;
|
||||||
|
|
||||||
if (wq->lock_name != wq->name)
|
if (wq->lock_name != wq->name)
|
||||||
kfree(wq->lock_name);
|
kfree(wq->lock_name);
|
||||||
}
|
}
|
||||||
@ -5619,12 +5627,10 @@ static void wq_adjust_max_active(struct workqueue_struct *wq)
|
|||||||
} while (activated);
|
} while (activated);
|
||||||
}
|
}
|
||||||
|
|
||||||
__printf(1, 4)
|
static struct workqueue_struct *__alloc_workqueue(const char *fmt,
|
||||||
struct workqueue_struct *alloc_workqueue(const char *fmt,
|
unsigned int flags,
|
||||||
unsigned int flags,
|
int max_active, va_list args)
|
||||||
int max_active, ...)
|
|
||||||
{
|
{
|
||||||
va_list args;
|
|
||||||
struct workqueue_struct *wq;
|
struct workqueue_struct *wq;
|
||||||
size_t wq_size;
|
size_t wq_size;
|
||||||
int name_len;
|
int name_len;
|
||||||
@ -5656,9 +5662,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
|
|||||||
goto err_free_wq;
|
goto err_free_wq;
|
||||||
}
|
}
|
||||||
|
|
||||||
va_start(args, max_active);
|
|
||||||
name_len = vsnprintf(wq->name, sizeof(wq->name), fmt, args);
|
name_len = vsnprintf(wq->name, sizeof(wq->name), fmt, args);
|
||||||
va_end(args);
|
|
||||||
|
|
||||||
if (name_len >= WQ_NAME_LEN)
|
if (name_len >= WQ_NAME_LEN)
|
||||||
pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n",
|
pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n",
|
||||||
@ -5688,12 +5692,11 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
|
|||||||
INIT_LIST_HEAD(&wq->flusher_overflow);
|
INIT_LIST_HEAD(&wq->flusher_overflow);
|
||||||
INIT_LIST_HEAD(&wq->maydays);
|
INIT_LIST_HEAD(&wq->maydays);
|
||||||
|
|
||||||
wq_init_lockdep(wq);
|
|
||||||
INIT_LIST_HEAD(&wq->list);
|
INIT_LIST_HEAD(&wq->list);
|
||||||
|
|
||||||
if (flags & WQ_UNBOUND) {
|
if (flags & WQ_UNBOUND) {
|
||||||
if (alloc_node_nr_active(wq->node_nr_active) < 0)
|
if (alloc_node_nr_active(wq->node_nr_active) < 0)
|
||||||
goto err_unreg_lockdep;
|
goto err_free_wq;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -5732,9 +5735,6 @@ err_unlock_free_node_nr_active:
|
|||||||
kthread_flush_worker(pwq_release_worker);
|
kthread_flush_worker(pwq_release_worker);
|
||||||
free_node_nr_active(wq->node_nr_active);
|
free_node_nr_active(wq->node_nr_active);
|
||||||
}
|
}
|
||||||
err_unreg_lockdep:
|
|
||||||
wq_unregister_lockdep(wq);
|
|
||||||
wq_free_lockdep(wq);
|
|
||||||
err_free_wq:
|
err_free_wq:
|
||||||
free_workqueue_attrs(wq->unbound_attrs);
|
free_workqueue_attrs(wq->unbound_attrs);
|
||||||
kfree(wq);
|
kfree(wq);
|
||||||
@ -5745,8 +5745,49 @@ err_destroy:
|
|||||||
destroy_workqueue(wq);
|
destroy_workqueue(wq);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__printf(1, 4)
|
||||||
|
struct workqueue_struct *alloc_workqueue(const char *fmt,
|
||||||
|
unsigned int flags,
|
||||||
|
int max_active, ...)
|
||||||
|
{
|
||||||
|
struct workqueue_struct *wq;
|
||||||
|
va_list args;
|
||||||
|
|
||||||
|
va_start(args, max_active);
|
||||||
|
wq = __alloc_workqueue(fmt, flags, max_active, args);
|
||||||
|
va_end(args);
|
||||||
|
if (!wq)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
wq_init_lockdep(wq);
|
||||||
|
|
||||||
|
return wq;
|
||||||
|
}
|
||||||
EXPORT_SYMBOL_GPL(alloc_workqueue);
|
EXPORT_SYMBOL_GPL(alloc_workqueue);
|
||||||
|
|
||||||
|
#ifdef CONFIG_LOCKDEP
|
||||||
|
__printf(1, 5)
|
||||||
|
struct workqueue_struct *
|
||||||
|
alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags,
|
||||||
|
int max_active, struct lockdep_map *lockdep_map, ...)
|
||||||
|
{
|
||||||
|
struct workqueue_struct *wq;
|
||||||
|
va_list args;
|
||||||
|
|
||||||
|
va_start(args, lockdep_map);
|
||||||
|
wq = __alloc_workqueue(fmt, flags, max_active, args);
|
||||||
|
va_end(args);
|
||||||
|
if (!wq)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
wq->lockdep_map = lockdep_map;
|
||||||
|
|
||||||
|
return wq;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(alloc_workqueue_lockdep_map);
|
||||||
|
#endif
|
||||||
|
|
||||||
static bool pwq_busy(struct pool_workqueue *pwq)
|
static bool pwq_busy(struct pool_workqueue *pwq)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
@ -7406,6 +7447,9 @@ static struct timer_list wq_watchdog_timer;
|
|||||||
static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
|
static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
|
||||||
static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
|
static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
|
||||||
|
|
||||||
|
static unsigned int wq_panic_on_stall;
|
||||||
|
module_param_named(panic_on_stall, wq_panic_on_stall, uint, 0644);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Show workers that might prevent the processing of pending work items.
|
* Show workers that might prevent the processing of pending work items.
|
||||||
* The only candidates are CPU-bound workers in the running state.
|
* The only candidates are CPU-bound workers in the running state.
|
||||||
@ -7457,6 +7501,16 @@ static void show_cpu_pools_hogs(void)
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void panic_on_wq_watchdog(void)
|
||||||
|
{
|
||||||
|
static unsigned int wq_stall;
|
||||||
|
|
||||||
|
if (wq_panic_on_stall) {
|
||||||
|
wq_stall++;
|
||||||
|
BUG_ON(wq_stall >= wq_panic_on_stall);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void wq_watchdog_reset_touched(void)
|
static void wq_watchdog_reset_touched(void)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
@ -7529,6 +7583,9 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
|
|||||||
if (cpu_pool_stall)
|
if (cpu_pool_stall)
|
||||||
show_cpu_pools_hogs();
|
show_cpu_pools_hogs();
|
||||||
|
|
||||||
|
if (lockup_detected)
|
||||||
|
panic_on_wq_watchdog();
|
||||||
|
|
||||||
wq_watchdog_reset_touched();
|
wq_watchdog_reset_touched();
|
||||||
mod_timer(&wq_watchdog_timer, jiffies + thresh);
|
mod_timer(&wq_watchdog_timer, jiffies + thresh);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user