2023-03-10 15:03:30 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2021 Oracle Corporation
|
|
|
|
*/
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/completion.h>
|
|
|
|
#include <linux/sched/task.h>
|
|
|
|
#include <linux/sched/vhost_task.h>
|
|
|
|
#include <linux/sched/signal.h>
|
|
|
|
|
|
|
|
enum vhost_task_flags {
|
|
|
|
VHOST_TASK_FLAGS_STOP,
|
2024-03-15 17:47:06 -07:00
|
|
|
VHOST_TASK_FLAGS_KILLED,
|
2023-03-10 15:03:30 -07:00
|
|
|
};
|
|
|
|
|
2023-06-01 11:32:32 -07:00
|
|
|
struct vhost_task {
|
|
|
|
bool (*fn)(void *data);
|
2024-03-15 17:47:06 -07:00
|
|
|
void (*handle_sigkill)(void *data);
|
2023-06-01 11:32:32 -07:00
|
|
|
void *data;
|
|
|
|
struct completion exited;
|
|
|
|
unsigned long flags;
|
|
|
|
struct task_struct *task;
|
2024-03-15 17:47:06 -07:00
|
|
|
/* serialize SIGKILL and vhost_task_stop calls */
|
|
|
|
struct mutex exit_mutex;
|
2023-06-01 11:32:32 -07:00
|
|
|
};
|
|
|
|
|
2023-03-10 15:03:30 -07:00
|
|
|
static int vhost_task_fn(void *data)
|
|
|
|
{
|
|
|
|
struct vhost_task *vtsk = data;
|
2023-06-01 11:32:32 -07:00
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
bool did_work;
|
|
|
|
|
2024-03-15 17:47:06 -07:00
|
|
|
if (signal_pending(current)) {
|
2023-06-01 11:32:32 -07:00
|
|
|
struct ksignal ksig;
|
2024-03-15 17:47:06 -07:00
|
|
|
|
|
|
|
if (get_signal(&ksig))
|
|
|
|
break;
|
2023-06-01 11:32:32 -07:00
|
|
|
}
|
|
|
|
|
2023-06-07 12:23:38 -07:00
|
|
|
/* mb paired w/ vhost_task_stop */
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
|
|
|
|
if (test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags)) {
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-06-01 11:32:32 -07:00
|
|
|
did_work = vtsk->fn(vtsk->data);
|
2023-06-07 12:23:38 -07:00
|
|
|
if (!did_work)
|
2023-06-01 11:32:32 -07:00
|
|
|
schedule();
|
|
|
|
}
|
2023-03-10 15:03:30 -07:00
|
|
|
|
2024-03-15 17:47:06 -07:00
|
|
|
mutex_lock(&vtsk->exit_mutex);
|
|
|
|
/*
|
|
|
|
* If a vhost_task_stop and SIGKILL race, we can ignore the SIGKILL.
|
|
|
|
* When the vhost layer has called vhost_task_stop it's already stopped
|
|
|
|
* new work and flushed.
|
|
|
|
*/
|
|
|
|
if (!test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags)) {
|
|
|
|
set_bit(VHOST_TASK_FLAGS_KILLED, &vtsk->flags);
|
|
|
|
vtsk->handle_sigkill(vtsk->data);
|
|
|
|
}
|
|
|
|
mutex_unlock(&vtsk->exit_mutex);
|
2023-03-10 15:03:30 -07:00
|
|
|
complete(&vtsk->exited);
|
2024-03-15 17:47:06 -07:00
|
|
|
|
2023-06-01 11:32:32 -07:00
|
|
|
do_exit(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* vhost_task_wake - wakeup the vhost_task
|
|
|
|
* @vtsk: vhost_task to wake
|
|
|
|
*
|
|
|
|
* wake up the vhost_task worker thread
|
|
|
|
*/
|
|
|
|
void vhost_task_wake(struct vhost_task *vtsk)
|
|
|
|
{
|
|
|
|
wake_up_process(vtsk->task);
|
2023-03-10 15:03:30 -07:00
|
|
|
}
|
2023-06-01 11:32:32 -07:00
|
|
|
EXPORT_SYMBOL_GPL(vhost_task_wake);
|
2023-03-10 15:03:30 -07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* vhost_task_stop - stop a vhost_task
|
|
|
|
* @vtsk: vhost_task to stop
|
|
|
|
*
|
2023-06-01 11:32:32 -07:00
|
|
|
* vhost_task_fn ensures the worker thread exits after
|
2024-03-15 17:47:06 -07:00
|
|
|
* VHOST_TASK_FLAGS_STOP becomes true.
|
2023-03-10 15:03:30 -07:00
|
|
|
*/
|
|
|
|
void vhost_task_stop(struct vhost_task *vtsk)
|
|
|
|
{
|
2024-03-15 17:47:06 -07:00
|
|
|
mutex_lock(&vtsk->exit_mutex);
|
|
|
|
if (!test_bit(VHOST_TASK_FLAGS_KILLED, &vtsk->flags)) {
|
|
|
|
set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags);
|
|
|
|
vhost_task_wake(vtsk);
|
|
|
|
}
|
|
|
|
mutex_unlock(&vtsk->exit_mutex);
|
|
|
|
|
2023-03-10 15:03:30 -07:00
|
|
|
/*
|
|
|
|
* Make sure vhost_task_fn is no longer accessing the vhost_task before
|
2023-06-01 11:32:32 -07:00
|
|
|
* freeing it below.
|
2023-03-10 15:03:30 -07:00
|
|
|
*/
|
|
|
|
wait_for_completion(&vtsk->exited);
|
|
|
|
kfree(vtsk);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(vhost_task_stop);
|
|
|
|
|
|
|
|
/**
|
2023-06-01 11:32:32 -07:00
|
|
|
* vhost_task_create - create a copy of a task to be used by the kernel
|
|
|
|
* @fn: vhost worker function
|
2024-03-15 17:47:06 -07:00
|
|
|
* @handle_sigkill: vhost function to handle when we are killed
|
|
|
|
* @arg: data to be passed to fn and handled_kill
|
2023-03-10 15:03:30 -07:00
|
|
|
* @name: the thread's name
|
|
|
|
*
|
|
|
|
* This returns a specialized task for use by the vhost layer or NULL on
|
|
|
|
* failure. The returned task is inactive, and the caller must fire it up
|
|
|
|
* through vhost_task_start().
|
|
|
|
*/
|
2024-03-15 17:47:06 -07:00
|
|
|
struct vhost_task *vhost_task_create(bool (*fn)(void *),
|
|
|
|
void (*handle_sigkill)(void *), void *arg,
|
2023-03-10 15:03:30 -07:00
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
struct kernel_clone_args args = {
|
2023-06-01 11:32:32 -07:00
|
|
|
.flags = CLONE_FS | CLONE_UNTRACED | CLONE_VM |
|
|
|
|
CLONE_THREAD | CLONE_SIGHAND,
|
2023-03-10 15:03:30 -07:00
|
|
|
.exit_signal = 0,
|
|
|
|
.fn = vhost_task_fn,
|
|
|
|
.name = name,
|
|
|
|
.user_worker = 1,
|
|
|
|
.no_files = 1,
|
|
|
|
};
|
|
|
|
struct vhost_task *vtsk;
|
|
|
|
struct task_struct *tsk;
|
|
|
|
|
|
|
|
vtsk = kzalloc(sizeof(*vtsk), GFP_KERNEL);
|
|
|
|
if (!vtsk)
|
|
|
|
return NULL;
|
|
|
|
init_completion(&vtsk->exited);
|
2024-03-15 17:47:06 -07:00
|
|
|
mutex_init(&vtsk->exit_mutex);
|
2023-03-10 15:03:30 -07:00
|
|
|
vtsk->data = arg;
|
|
|
|
vtsk->fn = fn;
|
2024-03-15 17:47:06 -07:00
|
|
|
vtsk->handle_sigkill = handle_sigkill;
|
2023-03-10 15:03:30 -07:00
|
|
|
|
|
|
|
args.fn_arg = vtsk;
|
|
|
|
|
|
|
|
tsk = copy_process(NULL, 0, NUMA_NO_NODE, &args);
|
|
|
|
if (IS_ERR(tsk)) {
|
|
|
|
kfree(vtsk);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
vtsk->task = tsk;
|
|
|
|
return vtsk;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(vhost_task_create);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* vhost_task_start - start a vhost_task created with vhost_task_create
|
|
|
|
* @vtsk: vhost_task to wake up
|
|
|
|
*/
|
|
|
|
void vhost_task_start(struct vhost_task *vtsk)
|
|
|
|
{
|
|
|
|
wake_up_new_task(vtsk->task);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(vhost_task_start);
|