introduce for_other_threads(p, t)
Cosmetic, but imho it makes the usage look more clear and simple, the new helper doesn't require to initialize "t". After this change while_each_thread() has only 3 users, and it is only used in the do/while loops. Link: https://lkml.kernel.org/r/20231030155710.GA9095@redhat.com Signed-off-by: Oleg Nesterov <oleg@redhat.com> Reviewed-by: Christian Brauner <brauner@kernel.org> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
a9a1d6ad66
commit
61a7a5e25f
@ -1578,11 +1578,10 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
|
|||||||
* will be able to manipulate the current directory, etc.
|
* will be able to manipulate the current directory, etc.
|
||||||
* It would be nice to force an unshare instead...
|
* It would be nice to force an unshare instead...
|
||||||
*/
|
*/
|
||||||
t = p;
|
|
||||||
n_fs = 1;
|
n_fs = 1;
|
||||||
spin_lock(&p->fs->lock);
|
spin_lock(&p->fs->lock);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
while_each_thread(p, t) {
|
for_other_threads(p, t) {
|
||||||
if (t->fs == p->fs)
|
if (t->fs == p->fs)
|
||||||
n_fs++;
|
n_fs++;
|
||||||
}
|
}
|
||||||
|
@ -646,6 +646,9 @@ extern bool current_is_single_threaded(void);
|
|||||||
#define while_each_thread(g, t) \
|
#define while_each_thread(g, t) \
|
||||||
while ((t = next_thread(t)) != g)
|
while ((t = next_thread(t)) != g)
|
||||||
|
|
||||||
|
#define for_other_threads(p, t) \
|
||||||
|
for (t = p; (t = next_thread(t)) != p; )
|
||||||
|
|
||||||
#define __for_each_thread(signal, t) \
|
#define __for_each_thread(signal, t) \
|
||||||
list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \
|
list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \
|
||||||
lockdep_is_held(&tasklist_lock))
|
lockdep_is_held(&tasklist_lock))
|
||||||
|
@ -1376,12 +1376,12 @@ int force_sig_info(struct kernel_siginfo *info)
|
|||||||
*/
|
*/
|
||||||
int zap_other_threads(struct task_struct *p)
|
int zap_other_threads(struct task_struct *p)
|
||||||
{
|
{
|
||||||
struct task_struct *t = p;
|
struct task_struct *t;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
p->signal->group_stop_count = 0;
|
p->signal->group_stop_count = 0;
|
||||||
|
|
||||||
while_each_thread(p, t) {
|
for_other_threads(p, t) {
|
||||||
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
|
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
|
||||||
/* Don't require de_thread to wait for the vhost_worker */
|
/* Don't require de_thread to wait for the vhost_worker */
|
||||||
if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
|
if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
|
||||||
@ -2465,12 +2465,10 @@ static bool do_signal_stop(int signr)
|
|||||||
sig->group_exit_code = signr;
|
sig->group_exit_code = signr;
|
||||||
|
|
||||||
sig->group_stop_count = 0;
|
sig->group_stop_count = 0;
|
||||||
|
|
||||||
if (task_set_jobctl_pending(current, signr | gstop))
|
if (task_set_jobctl_pending(current, signr | gstop))
|
||||||
sig->group_stop_count++;
|
sig->group_stop_count++;
|
||||||
|
|
||||||
t = current;
|
for_other_threads(current, t) {
|
||||||
while_each_thread(current, t) {
|
|
||||||
/*
|
/*
|
||||||
* Setting state to TASK_STOPPED for a group
|
* Setting state to TASK_STOPPED for a group
|
||||||
* stop is always done with the siglock held,
|
* stop is always done with the siglock held,
|
||||||
@ -2966,8 +2964,7 @@ static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
|
|||||||
if (sigisemptyset(&retarget))
|
if (sigisemptyset(&retarget))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
t = tsk;
|
for_other_threads(tsk, t) {
|
||||||
while_each_thread(tsk, t) {
|
|
||||||
if (t->flags & PF_EXITING)
|
if (t->flags & PF_EXITING)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user