rcu-tasks: Maintain lists to eliminate RCU-tasks/do_exit() deadlocks
This commit continues the elimination of deadlocks involving do_exit() and RCU tasks by causing exit_tasks_rcu_start() to add the current task to a per-CPU list and causing exit_tasks_rcu_stop() to remove the current task from whatever list it is on. These lists will be used to track tasks that are exiting, while still accounting for any RCU-tasks quiescent states that these tasks pass though. [ paulmck: Apply Frederic Weisbecker feedback. ] Link: https://lore.kernel.org/all/20240118021842.290665-1-chenzhongjin@huawei.com/ Reported-by: Chen Zhongjin <chenzhongjin@huawei.com> Reported-by: Yang Jihong <yangjihong1@huawei.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Tested-by: Yang Jihong <yangjihong1@huawei.com> Tested-by: Chen Zhongjin <chenzhongjin@huawei.com> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
This commit is contained in:
parent
46faf9d8e1
commit
6b70399f9e
@ -1147,25 +1147,48 @@ struct task_struct *get_rcu_tasks_gp_kthread(void)
|
||||
EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread);
|
||||
|
||||
/*
|
||||
* Contribute to protect against tasklist scan blind spot while the
|
||||
* task is exiting and may be removed from the tasklist. See
|
||||
* corresponding synchronize_srcu() for further details.
|
||||
* Protect against tasklist scan blind spot while the task is exiting and
|
||||
* may be removed from the tasklist. Do this by adding the task to yet
|
||||
* another list.
|
||||
*
|
||||
* Note that the task will remove itself from this list, so there is no
|
||||
* need for get_task_struct(), except in the case where rcu_tasks_pertask()
|
||||
* adds it to the holdout list, in which case rcu_tasks_pertask() supplies
|
||||
* the needed get_task_struct().
|
||||
*/
|
||||
void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
|
||||
void exit_tasks_rcu_start(void)
|
||||
{
|
||||
current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
|
||||
unsigned long flags;
|
||||
struct rcu_tasks_percpu *rtpcp;
|
||||
struct task_struct *t = current;
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&t->rcu_tasks_exit_list));
|
||||
preempt_disable();
|
||||
rtpcp = this_cpu_ptr(rcu_tasks.rtpcpu);
|
||||
t->rcu_tasks_exit_cpu = smp_processor_id();
|
||||
raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
|
||||
if (!rtpcp->rtp_exit_list.next)
|
||||
INIT_LIST_HEAD(&rtpcp->rtp_exit_list);
|
||||
list_add(&t->rcu_tasks_exit_list, &rtpcp->rtp_exit_list);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Contribute to protect against tasklist scan blind spot while the
|
||||
* task is exiting and may be removed from the tasklist. See
|
||||
* corresponding synchronize_srcu() for further details.
|
||||
* Remove the task from the "yet another list" because do_exit() is now
|
||||
* non-preemptible, allowing synchronize_rcu() to wait beyond this point.
|
||||
*/
|
||||
void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu)
|
||||
void exit_tasks_rcu_stop(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_tasks_percpu *rtpcp;
|
||||
struct task_struct *t = current;
|
||||
|
||||
__srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
|
||||
WARN_ON_ONCE(list_empty(&t->rcu_tasks_exit_list));
|
||||
rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, t->rcu_tasks_exit_cpu);
|
||||
raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
|
||||
list_del_init(&t->rcu_tasks_exit_list);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user