1

workqueue: Fixes for v6.11-rc4

Nothing too interesting. One patch to remove spurious warning and others to
 address static checker warnings.
 -----BEGIN PGP SIGNATURE-----
 
 iIQEABYKACwWIQTfIjM1kS57o3GsC/uxYfJx3gVYGQUCZskq8g4cdGpAa2VybmVs
 Lm9yZwAKCRCxYfJx3gVYGfTVAP42MsAOyrlND+cH/zQpSc8OhGbm3v0gJFnPn4UE
 Y3B4kgD/W68n57MQ5uWh1vHHvsqjizbXfRez1dVJoGqa/q88GQs=
 =Uwdx
 -----END PGP SIGNATURE-----

Merge tag 'wq-for-6.11-rc4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue fixes from Tejun Heo:
 "Nothing too interesting. One patch to remove spurious warning and
  others to address static checker warnings"

* tag 'wq-for-6.11-rc4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: Correct declaration of cpu_pwq in struct workqueue_struct
  workqueue: Fix spruious data race in __flush_work()
  workqueue: Remove incorrect "WARN_ON_ONCE(!list_empty(&worker->entry));" from dying worker
  workqueue: Fix UBSAN 'subtraction overflow' error in shift_and_mask()
  workqueue: doc: Fix function name, remove markers
This commit is contained in:
Linus Torvalds 2024-08-24 10:35:57 +08:00
commit cb2c84b380
2 changed files with 28 additions and 24 deletions

View File

@ -260,7 +260,7 @@ Some users depend on strict execution ordering where only one work item
is in flight at any given time and the work items are processed in
queueing order. While the combination of ``@max_active`` of 1 and
``WQ_UNBOUND`` used to achieve this behavior, this is no longer the
case. Use ``alloc_ordered_queue()`` instead.
case. Use alloc_ordered_workqueue() instead.
Example Execution Scenarios

View File

@ -377,7 +377,7 @@ struct workqueue_struct {
/* hot fields used during command issue, aligned to cacheline */
unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */
struct pool_workqueue __rcu * __percpu *cpu_pwq; /* I: per-cpu pwqs */
struct wq_node_nr_active *node_nr_active[]; /* I: per-node nr_active */
};
@ -897,7 +897,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
static unsigned long shift_and_mask(unsigned long v, u32 shift, u32 bits)
{
return (v >> shift) & ((1 << bits) - 1);
return (v >> shift) & ((1U << bits) - 1);
}
static void work_offqd_unpack(struct work_offq_data *offqd, unsigned long data)
@ -3351,7 +3351,6 @@ woke_up:
set_pf_worker(false);
ida_free(&pool->worker_ida, worker->id);
WARN_ON_ONCE(!list_empty(&worker->entry));
return 0;
}
@ -4167,7 +4166,6 @@ already_gone:
static bool __flush_work(struct work_struct *work, bool from_cancel)
{
struct wq_barrier barr;
unsigned long data;
if (WARN_ON(!wq_online))
return false;
@ -4185,29 +4183,35 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
* was queued on a BH workqueue, we also know that it was running in the
* BH context and thus can be busy-waited.
*/
data = *work_data_bits(work);
if (from_cancel &&
!WARN_ON_ONCE(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_BH)) {
/*
* On RT, prevent a live lock when %current preempted soft
* interrupt processing or prevents ksoftirqd from running by
* keeping flipping BH. If the BH work item runs on a different
* CPU then this has no effect other than doing the BH
* disable/enable dance for nothing. This is copied from
* kernel/softirq.c::tasklet_unlock_spin_wait().
*/
while (!try_wait_for_completion(&barr.done)) {
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
local_bh_disable();
local_bh_enable();
} else {
cpu_relax();
if (from_cancel) {
unsigned long data = *work_data_bits(work);
if (!WARN_ON_ONCE(data & WORK_STRUCT_PWQ) &&
(data & WORK_OFFQ_BH)) {
/*
* On RT, prevent a live lock when %current preempted
* soft interrupt processing or prevents ksoftirqd from
* running by keeping flipping BH. If the BH work item
* runs on a different CPU then this has no effect other
* than doing the BH disable/enable dance for nothing.
* This is copied from
* kernel/softirq.c::tasklet_unlock_spin_wait().
*/
while (!try_wait_for_completion(&barr.done)) {
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
local_bh_disable();
local_bh_enable();
} else {
cpu_relax();
}
}
goto out_destroy;
}
} else {
wait_for_completion(&barr.done);
}
wait_for_completion(&barr.done);
out_destroy:
destroy_work_on_stack(&barr.work);
return true;
}