2023-03-11 01:03:30 +03:00
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright ( C ) 2021 Oracle Corporation
*/
# include <linux/slab.h>
# include <linux/completion.h>
# include <linux/sched/task.h>
# include <linux/sched/vhost_task.h>
# include <linux/sched/signal.h>
enum vhost_task_flags {
VHOST_TASK_FLAGS_STOP ,
} ;
2023-06-01 21:32:32 +03:00
struct vhost_task {
bool ( * fn ) ( void * data ) ;
void * data ;
struct completion exited ;
unsigned long flags ;
struct task_struct * task ;
} ;
2023-03-11 01:03:30 +03:00
static int vhost_task_fn ( void * data )
{
struct vhost_task * vtsk = data ;
2023-06-01 21:32:32 +03:00
bool dead = false ;
for ( ; ; ) {
bool did_work ;
if ( ! dead & & signal_pending ( current ) ) {
struct ksignal ksig ;
/*
* Calling get_signal will block in SIGSTOP ,
* or clear fatal_signal_pending , but remember
* what was set .
*
* This thread won ' t actually exit until all
* of the file descriptors are closed , and
* the release function is called .
*/
dead = get_signal ( & ksig ) ;
if ( dead )
clear_thread_flag ( TIF_SIGPENDING ) ;
}
2023-06-07 22:23:38 +03:00
/* mb paired w/ vhost_task_stop */
set_current_state ( TASK_INTERRUPTIBLE ) ;
if ( test_bit ( VHOST_TASK_FLAGS_STOP , & vtsk - > flags ) ) {
__set_current_state ( TASK_RUNNING ) ;
break ;
}
2023-06-01 21:32:32 +03:00
did_work = vtsk - > fn ( vtsk - > data ) ;
2023-06-07 22:23:38 +03:00
if ( ! did_work )
2023-06-01 21:32:32 +03:00
schedule ( ) ;
}
2023-03-11 01:03:30 +03:00
complete ( & vtsk - > exited ) ;
2023-06-01 21:32:32 +03:00
do_exit ( 0 ) ;
}
/**
* vhost_task_wake - wakeup the vhost_task
* @ vtsk : vhost_task to wake
*
* wake up the vhost_task worker thread
*/
void vhost_task_wake ( struct vhost_task * vtsk )
{
wake_up_process ( vtsk - > task ) ;
2023-03-11 01:03:30 +03:00
}
2023-06-01 21:32:32 +03:00
EXPORT_SYMBOL_GPL ( vhost_task_wake ) ;
2023-03-11 01:03:30 +03:00
/**
* vhost_task_stop - stop a vhost_task
* @ vtsk : vhost_task to stop
*
2023-06-01 21:32:32 +03:00
* vhost_task_fn ensures the worker thread exits after
* VHOST_TASK_FLAGS_SOP becomes true .
2023-03-11 01:03:30 +03:00
*/
void vhost_task_stop ( struct vhost_task * vtsk )
{
set_bit ( VHOST_TASK_FLAGS_STOP , & vtsk - > flags ) ;
2023-06-01 21:32:32 +03:00
vhost_task_wake ( vtsk ) ;
2023-03-11 01:03:30 +03:00
/*
* Make sure vhost_task_fn is no longer accessing the vhost_task before
2023-06-01 21:32:32 +03:00
* freeing it below .
2023-03-11 01:03:30 +03:00
*/
wait_for_completion ( & vtsk - > exited ) ;
kfree ( vtsk ) ;
}
EXPORT_SYMBOL_GPL ( vhost_task_stop ) ;
/**
2023-06-01 21:32:32 +03:00
* vhost_task_create - create a copy of a task to be used by the kernel
* @ fn : vhost worker function
2023-03-11 01:03:30 +03:00
* @ arg : data to be passed to fn
* @ name : the thread ' s name
*
* This returns a specialized task for use by the vhost layer or NULL on
* failure . The returned task is inactive , and the caller must fire it up
* through vhost_task_start ( ) .
*/
2023-06-01 21:32:32 +03:00
struct vhost_task * vhost_task_create ( bool ( * fn ) ( void * ) , void * arg ,
2023-03-11 01:03:30 +03:00
const char * name )
{
struct kernel_clone_args args = {
2023-06-01 21:32:32 +03:00
. flags = CLONE_FS | CLONE_UNTRACED | CLONE_VM |
CLONE_THREAD | CLONE_SIGHAND ,
2023-03-11 01:03:30 +03:00
. exit_signal = 0 ,
. fn = vhost_task_fn ,
. name = name ,
. user_worker = 1 ,
. no_files = 1 ,
} ;
struct vhost_task * vtsk ;
struct task_struct * tsk ;
vtsk = kzalloc ( sizeof ( * vtsk ) , GFP_KERNEL ) ;
if ( ! vtsk )
return NULL ;
init_completion ( & vtsk - > exited ) ;
vtsk - > data = arg ;
vtsk - > fn = fn ;
args . fn_arg = vtsk ;
tsk = copy_process ( NULL , 0 , NUMA_NO_NODE , & args ) ;
if ( IS_ERR ( tsk ) ) {
kfree ( vtsk ) ;
return NULL ;
}
vtsk - > task = tsk ;
return vtsk ;
}
EXPORT_SYMBOL_GPL ( vhost_task_create ) ;
/**
* vhost_task_start - start a vhost_task created with vhost_task_create
* @ vtsk : vhost_task to wake up
*/
void vhost_task_start ( struct vhost_task * vtsk )
{
wake_up_new_task ( vtsk - > task ) ;
}
EXPORT_SYMBOL_GPL ( vhost_task_start ) ;