2019-05-29 17:17:56 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2016-04-28 16:46:54 +03:00
/*
* drivers / dma - buf / sync_file . c
*
* Copyright ( C ) 2012 Google , Inc .
*/
2022-03-11 12:32:26 +03:00
# include <linux/dma-fence-unwrap.h>
2016-04-28 16:46:54 +03:00
# include <linux/export.h>
# include <linux/file.h>
# include <linux/fs.h>
# include <linux/kernel.h>
# include <linux/poll.h>
# include <linux/sched.h>
# include <linux/slab.h>
# include <linux/uaccess.h>
# include <linux/anon_inodes.h>
2016-04-28 16:46:57 +03:00
# include <linux/sync_file.h>
# include <uapi/linux/sync_file.h>
2016-04-28 16:46:54 +03:00
static const struct file_operations sync_file_fops ;
2016-08-05 16:39:35 +03:00
static struct sync_file * sync_file_alloc ( void )
2016-04-28 16:46:54 +03:00
{
struct sync_file * sync_file ;
2016-08-05 16:39:35 +03:00
sync_file = kzalloc ( sizeof ( * sync_file ) , GFP_KERNEL ) ;
2016-04-28 16:46:54 +03:00
if ( ! sync_file )
return NULL ;
sync_file - > file = anon_inode_getfile ( " sync_file " , & sync_file_fops ,
sync_file , 0 ) ;
if ( IS_ERR ( sync_file - > file ) )
goto err ;
init_waitqueue_head ( & sync_file - > wq ) ;
2016-08-05 16:39:35 +03:00
INIT_LIST_HEAD ( & sync_file - > cb . node ) ;
2016-04-28 16:46:54 +03:00
return sync_file ;
err :
kfree ( sync_file ) ;
return NULL ;
}
2016-10-25 15:00:45 +03:00
static void fence_check_cb_func ( struct dma_fence * f , struct dma_fence_cb * cb )
2016-04-28 16:46:54 +03:00
{
struct sync_file * sync_file ;
2016-08-05 16:39:35 +03:00
sync_file = container_of ( cb , struct sync_file , cb ) ;
2016-04-28 16:46:54 +03:00
2016-08-05 16:39:35 +03:00
wake_up_all ( & sync_file - > wq ) ;
2016-04-28 16:46:54 +03:00
}
/**
2016-04-28 16:46:55 +03:00
* sync_file_create ( ) - creates a sync file
2016-04-28 16:46:54 +03:00
* @ fence : fence to add to the sync_fence
*
2016-12-09 21:53:06 +03:00
* Creates a sync_file containg @ fence . This function acquires and additional
* reference of @ fence for the newly - created & sync_file , if it succeeds . The
* sync_file can be released with fput ( sync_file - > file ) . Returns the
* sync_file or NULL in case of error .
2016-04-28 16:46:54 +03:00
*/
2016-10-25 15:00:45 +03:00
struct sync_file * sync_file_create ( struct dma_fence * fence )
2016-04-28 16:46:54 +03:00
{
struct sync_file * sync_file ;
2016-08-05 16:39:35 +03:00
sync_file = sync_file_alloc ( ) ;
2016-04-28 16:46:54 +03:00
if ( ! sync_file )
return NULL ;
2016-10-25 15:00:45 +03:00
sync_file - > fence = dma_fence_get ( fence ) ;
2016-08-05 16:39:35 +03:00
2016-04-28 16:46:54 +03:00
return sync_file ;
}
EXPORT_SYMBOL ( sync_file_create ) ;
static struct sync_file * sync_file_fdget ( int fd )
{
struct file * file = fget ( fd ) ;
if ( ! file )
return NULL ;
if ( file - > f_op ! = & sync_file_fops )
goto err ;
return file - > private_data ;
err :
fput ( file ) ;
return NULL ;
}
2016-08-05 16:39:36 +03:00
/**
* sync_file_get_fence - get the fence related to the sync_file fd
* @ fd : sync_file fd to get the fence from
*
* Ensures @ fd references a valid sync_file and returns a fence that
* represents all fence in the sync_file . On error NULL is returned .
*/
2016-10-25 15:00:45 +03:00
struct dma_fence * sync_file_get_fence ( int fd )
2016-08-05 16:39:36 +03:00
{
struct sync_file * sync_file ;
2016-10-25 15:00:45 +03:00
struct dma_fence * fence ;
2016-08-05 16:39:36 +03:00
sync_file = sync_file_fdget ( fd ) ;
if ( ! sync_file )
return NULL ;
2016-10-25 15:00:45 +03:00
fence = dma_fence_get ( sync_file - > fence ) ;
2016-08-05 16:39:36 +03:00
fput ( sync_file - > file ) ;
return fence ;
}
EXPORT_SYMBOL ( sync_file_get_fence ) ;
2017-05-16 14:10:42 +03:00
/**
* sync_file_get_name - get the name of the sync_file
* @ sync_file : sync_file to get the fence from
* @ buf : destination buffer to copy sync_file name into
* @ len : available size of destination buffer .
*
* Each sync_file may have a name assigned either by the user ( when merging
* sync_files together ) or created from the fence it contains . In the latter
* case construction of the name is deferred until use , and so requires
* sync_file_get_name ( ) .
*
* Returns : a string representing the name .
*/
char * sync_file_get_name ( struct sync_file * sync_file , char * buf , int len )
{
if ( sync_file - > user_name [ 0 ] ) {
strlcpy ( buf , sync_file - > user_name , len ) ;
} else {
struct dma_fence * fence = sync_file - > fence ;
2018-11-14 18:11:06 +03:00
snprintf ( buf , len , " %s-%s%llu-%lld " ,
2017-05-16 14:10:42 +03:00
fence - > ops - > get_driver_name ( fence ) ,
fence - > ops - > get_timeline_name ( fence ) ,
fence - > context ,
fence - > seqno ) ;
}
return buf ;
}
2016-08-05 16:39:35 +03:00
static int sync_file_set_fence ( struct sync_file * sync_file ,
2016-10-25 15:00:45 +03:00
struct dma_fence * * fences , int num_fences )
2016-04-28 16:46:54 +03:00
{
2016-10-25 15:00:45 +03:00
struct dma_fence_array * array ;
2016-08-05 16:39:35 +03:00
/*
* The reference for the fences in the new sync_file and held
* in add_fence ( ) during the merge procedure , so for num_fences = = 1
* we already own a new reference to the fence . For num_fence > 1
2016-10-25 15:00:45 +03:00
* we own the reference of the dma_fence_array creation .
2016-08-05 16:39:35 +03:00
*/
2022-04-22 15:38:47 +03:00
if ( num_fences = = 0 ) {
sync_file - > fence = dma_fence_get_stub ( ) ;
kfree ( fences ) ;
} else if ( num_fences = = 1 ) {
2016-08-05 16:39:35 +03:00
sync_file - > fence = fences [ 0 ] ;
2016-09-21 10:20:19 +03:00
kfree ( fences ) ;
2022-04-22 15:38:47 +03:00
2016-08-05 16:39:35 +03:00
} else {
2016-10-25 15:00:45 +03:00
array = dma_fence_array_create ( num_fences , fences ,
dma_fence_context_alloc ( 1 ) ,
1 , false ) ;
2016-08-05 16:39:35 +03:00
if ( ! array )
return - ENOMEM ;
sync_file - > fence = & array - > base ;
}
2016-04-28 16:46:54 +03:00
2016-08-05 16:39:35 +03:00
return 0 ;
}
2016-10-25 15:00:45 +03:00
static void add_fence ( struct dma_fence * * fences ,
int * i , struct dma_fence * fence )
2016-08-05 16:39:35 +03:00
{
fences [ * i ] = fence ;
2016-10-25 15:00:45 +03:00
if ( ! dma_fence_is_signaled ( fence ) ) {
dma_fence_get ( fence ) ;
2016-04-28 16:46:54 +03:00
( * i ) + + ;
}
}
/**
* sync_file_merge ( ) - merge two sync_files
* @ name : name of new fence
* @ a : sync_file a
* @ b : sync_file b
*
* Creates a new sync_file which contains copies of all the fences in both
* @ a and @ b . @ a and @ b remain valid , independent sync_file . Returns the
* new merged sync_file or NULL in case of error .
*/
static struct sync_file * sync_file_merge ( const char * name , struct sync_file * a ,
struct sync_file * b )
{
2022-03-11 12:32:26 +03:00
struct dma_fence * a_fence , * b_fence , * * fences ;
struct dma_fence_unwrap a_iter , b_iter ;
unsigned int index , num_fences ;
2016-04-28 16:46:54 +03:00
struct sync_file * sync_file ;
2016-08-05 16:39:35 +03:00
sync_file = sync_file_alloc ( ) ;
2016-04-28 16:46:54 +03:00
if ( ! sync_file )
return NULL ;
2022-03-11 12:32:26 +03:00
num_fences = 0 ;
dma_fence_unwrap_for_each ( a_fence , & a_iter , a - > fence )
+ + num_fences ;
dma_fence_unwrap_for_each ( b_fence , & b_iter , b - > fence )
+ + num_fences ;
2016-08-05 16:39:35 +03:00
2022-03-11 12:32:26 +03:00
if ( num_fences > INT_MAX )
goto err_free_sync_file ;
2016-08-05 16:39:35 +03:00
fences = kcalloc ( num_fences , sizeof ( * fences ) , GFP_KERNEL ) ;
if ( ! fences )
2022-03-11 12:32:26 +03:00
goto err_free_sync_file ;
2016-04-28 16:46:54 +03:00
/*
2022-03-11 12:32:26 +03:00
* We can ' t guarantee that fences in both a and b are ordered , but it is
* still quite likely .
2016-04-28 16:46:54 +03:00
*
2022-03-11 12:32:26 +03:00
* So attempt to order the fences as we pass over them and merge fences
* with the same context .
2016-04-28 16:46:54 +03:00
*/
2022-03-11 12:32:26 +03:00
index = 0 ;
for ( a_fence = dma_fence_unwrap_first ( a - > fence , & a_iter ) ,
b_fence = dma_fence_unwrap_first ( b - > fence , & b_iter ) ;
a_fence | | b_fence ; ) {
if ( ! b_fence ) {
add_fence ( fences , & index , a_fence ) ;
a_fence = dma_fence_unwrap_next ( & a_iter ) ;
} else if ( ! a_fence ) {
add_fence ( fences , & index , b_fence ) ;
b_fence = dma_fence_unwrap_next ( & b_iter ) ;
} else if ( a_fence - > context < b_fence - > context ) {
add_fence ( fences , & index , a_fence ) ;
a_fence = dma_fence_unwrap_next ( & a_iter ) ;
2016-04-28 16:46:54 +03:00
2022-03-11 12:32:26 +03:00
} else if ( b_fence - > context < a_fence - > context ) {
add_fence ( fences , & index , b_fence ) ;
b_fence = dma_fence_unwrap_next ( & b_iter ) ;
} else if ( __dma_fence_is_later ( a_fence - > seqno , b_fence - > seqno ,
a_fence - > ops ) ) {
add_fence ( fences , & index , a_fence ) ;
a_fence = dma_fence_unwrap_next ( & a_iter ) ;
b_fence = dma_fence_unwrap_next ( & b_iter ) ;
2016-04-28 16:46:54 +03:00
} else {
2022-03-11 12:32:26 +03:00
add_fence ( fences , & index , b_fence ) ;
a_fence = dma_fence_unwrap_next ( & a_iter ) ;
b_fence = dma_fence_unwrap_next ( & b_iter ) ;
2016-04-28 16:46:54 +03:00
}
}
2022-03-11 12:32:26 +03:00
if ( sync_file_set_fence ( sync_file , fences , index ) < 0 )
goto err_put_fences ;
2016-04-28 16:46:54 +03:00
2017-05-16 14:10:42 +03:00
strlcpy ( sync_file - > user_name , name , sizeof ( sync_file - > user_name ) ) ;
2016-04-28 16:46:54 +03:00
return sync_file ;
2016-08-05 16:39:35 +03:00
2022-03-11 12:32:26 +03:00
err_put_fences :
while ( index )
dma_fence_put ( fences [ - - index ] ) ;
2021-06-24 20:47:32 +03:00
kfree ( fences ) ;
2022-03-11 12:32:26 +03:00
err_free_sync_file :
2016-08-05 16:39:35 +03:00
fput ( sync_file - > file ) ;
return NULL ;
2016-04-28 16:46:54 +03:00
}
2017-04-13 04:41:38 +03:00
static int sync_file_release ( struct inode * inode , struct file * file )
2016-04-28 16:46:54 +03:00
{
2017-04-13 04:41:38 +03:00
struct sync_file * sync_file = file - > private_data ;
2016-04-28 16:46:54 +03:00
2017-07-29 00:29:51 +03:00
if ( test_bit ( POLL_ENABLED , & sync_file - > flags ) )
2016-10-25 15:00:45 +03:00
dma_fence_remove_callback ( sync_file - > fence , & sync_file - > cb ) ;
dma_fence_put ( sync_file - > fence ) ;
2016-04-28 16:46:54 +03:00
kfree ( sync_file ) ;
return 0 ;
}
2017-07-03 13:39:46 +03:00
static __poll_t sync_file_poll ( struct file * file , poll_table * wait )
2016-04-28 16:46:54 +03:00
{
struct sync_file * sync_file = file - > private_data ;
poll_wait ( file , & sync_file - > wq , wait ) ;
2017-07-29 00:29:51 +03:00
if ( list_empty ( & sync_file - > cb . node ) & &
! test_and_set_bit ( POLL_ENABLED , & sync_file - > flags ) ) {
2016-10-25 15:00:45 +03:00
if ( dma_fence_add_callback ( sync_file - > fence , & sync_file - > cb ,
fence_check_cb_func ) < 0 )
2016-08-05 16:39:38 +03:00
wake_up_all ( & sync_file - > wq ) ;
}
2016-04-28 16:46:54 +03:00
2018-02-12 01:34:03 +03:00
return dma_fence_is_signaled ( sync_file - > fence ) ? EPOLLIN : 0 ;
2016-04-28 16:46:54 +03:00
}
static long sync_file_ioctl_merge ( struct sync_file * sync_file ,
2016-04-28 16:46:56 +03:00
unsigned long arg )
2016-04-28 16:46:54 +03:00
{
int fd = get_unused_fd_flags ( O_CLOEXEC ) ;
int err ;
struct sync_file * fence2 , * fence3 ;
struct sync_merge_data data ;
if ( fd < 0 )
return fd ;
if ( copy_from_user ( & data , ( void __user * ) arg , sizeof ( data ) ) ) {
err = - EFAULT ;
goto err_put_fd ;
}
if ( data . flags | | data . pad ) {
err = - EINVAL ;
goto err_put_fd ;
}
fence2 = sync_file_fdget ( data . fd2 ) ;
if ( ! fence2 ) {
err = - ENOENT ;
goto err_put_fd ;
}
data . name [ sizeof ( data . name ) - 1 ] = ' \0 ' ;
fence3 = sync_file_merge ( data . name , sync_file , fence2 ) ;
if ( ! fence3 ) {
err = - ENOMEM ;
goto err_put_fence2 ;
}
data . fence = fd ;
if ( copy_to_user ( ( void __user * ) arg , & data , sizeof ( data ) ) ) {
err = - EFAULT ;
goto err_put_fence3 ;
}
fd_install ( fd , fence3 - > file ) ;
fput ( fence2 - > file ) ;
return 0 ;
err_put_fence3 :
fput ( fence3 - > file ) ;
err_put_fence2 :
fput ( fence2 - > file ) ;
err_put_fd :
put_unused_fd ( fd ) ;
return err ;
}
2017-10-09 16:49:36 +03:00
static int sync_fill_fence_info ( struct dma_fence * fence ,
2016-04-28 16:46:56 +03:00
struct sync_fence_info * info )
2016-04-28 16:46:54 +03:00
{
strlcpy ( info - > obj_name , fence - > ops - > get_timeline_name ( fence ) ,
sizeof ( info - > obj_name ) ) ;
strlcpy ( info - > driver_name , fence - > ops - > get_driver_name ( fence ) ,
sizeof ( info - > driver_name ) ) ;
2017-01-04 17:12:21 +03:00
info - > status = dma_fence_get_status ( fence ) ;
2017-02-14 15:40:01 +03:00
while ( test_bit ( DMA_FENCE_FLAG_SIGNALED_BIT , & fence - > flags ) & &
! test_bit ( DMA_FENCE_FLAG_TIMESTAMP_BIT , & fence - > flags ) )
cpu_relax ( ) ;
info - > timestamp_ns =
test_bit ( DMA_FENCE_FLAG_TIMESTAMP_BIT , & fence - > flags ) ?
ktime_to_ns ( fence - > timestamp ) :
ktime_set ( 0 , 0 ) ;
2017-10-09 16:49:36 +03:00
return info - > status ;
2016-04-28 16:46:54 +03:00
}
static long sync_file_ioctl_fence_info ( struct sync_file * sync_file ,
2016-04-28 16:46:56 +03:00
unsigned long arg )
2016-04-28 16:46:54 +03:00
{
struct sync_fence_info * fence_info = NULL ;
2022-03-11 12:32:26 +03:00
struct dma_fence_unwrap iter ;
struct sync_file_info info ;
unsigned int num_fences ;
struct dma_fence * fence ;
int ret ;
2016-04-28 16:46:54 +03:00
__u32 size ;
if ( copy_from_user ( & info , ( void __user * ) arg , sizeof ( info ) ) )
return - EFAULT ;
if ( info . flags | | info . pad )
return - EINVAL ;
2022-03-11 12:32:26 +03:00
num_fences = 0 ;
dma_fence_unwrap_for_each ( fence , & iter , sync_file - > fence )
+ + num_fences ;
2016-08-05 16:39:35 +03:00
2016-04-28 16:46:54 +03:00
/*
* Passing num_fences = 0 means that userspace doesn ' t want to
* retrieve any sync_fence_info . If num_fences = 0 we skip filling
* sync_fence_info and return the actual number of fences on
* info - > num_fences .
*/
2017-10-09 16:49:36 +03:00
if ( ! info . num_fences ) {
2019-08-12 12:12:03 +03:00
info . status = dma_fence_get_status ( sync_file - > fence ) ;
2016-04-28 16:46:54 +03:00
goto no_fences ;
2017-10-09 16:49:36 +03:00
} else {
info . status = 1 ;
}
2016-04-28 16:46:54 +03:00
2016-08-05 16:39:35 +03:00
if ( info . num_fences < num_fences )
2016-04-28 16:46:54 +03:00
return - EINVAL ;
2016-08-05 16:39:35 +03:00
size = num_fences * sizeof ( * fence_info ) ;
2016-04-28 16:46:54 +03:00
fence_info = kzalloc ( size , GFP_KERNEL ) ;
if ( ! fence_info )
return - ENOMEM ;
2022-03-11 12:32:26 +03:00
num_fences = 0 ;
dma_fence_unwrap_for_each ( fence , & iter , sync_file - > fence ) {
int status ;
status = sync_fill_fence_info ( fence , & fence_info [ num_fences + + ] ) ;
2017-10-09 16:49:36 +03:00
info . status = info . status < = 0 ? info . status : status ;
}
2016-04-28 16:46:54 +03:00
if ( copy_to_user ( u64_to_user_ptr ( info . sync_fence_info ) , fence_info ,
size ) ) {
ret = - EFAULT ;
goto out ;
}
no_fences :
2017-05-16 14:10:42 +03:00
sync_file_get_name ( sync_file , info . name , sizeof ( info . name ) ) ;
2016-08-05 16:39:35 +03:00
info . num_fences = num_fences ;
2016-04-28 16:46:54 +03:00
if ( copy_to_user ( ( void __user * ) arg , & info , sizeof ( info ) ) )
ret = - EFAULT ;
else
ret = 0 ;
out :
kfree ( fence_info ) ;
return ret ;
}
static long sync_file_ioctl ( struct file * file , unsigned int cmd ,
2016-04-28 16:46:56 +03:00
unsigned long arg )
2016-04-28 16:46:54 +03:00
{
struct sync_file * sync_file = file - > private_data ;
switch ( cmd ) {
case SYNC_IOC_MERGE :
return sync_file_ioctl_merge ( sync_file , arg ) ;
case SYNC_IOC_FILE_INFO :
return sync_file_ioctl_fence_info ( sync_file , arg ) ;
default :
return - ENOTTY ;
}
}
static const struct file_operations sync_file_fops = {
. release = sync_file_release ,
. poll = sync_file_poll ,
. unlocked_ioctl = sync_file_ioctl ,
2018-09-11 22:59:08 +03:00
. compat_ioctl = compat_ptr_ioctl ,
2016-04-28 16:46:54 +03:00
} ;