2021-06-10 14:04:42 +03:00
// SPDX-License-Identifier: GPL-2.0-only
/*
* Syncpoint dma_fence implementation
*
* Copyright ( c ) 2020 , NVIDIA Corporation .
*/
# include <linux/dma-fence.h>
# include <linux/file.h>
# include <linux/fs.h>
# include <linux/slab.h>
# include <linux/sync_file.h>
# include "fence.h"
# include "intr.h"
# include "syncpt.h"
static const char * host1x_syncpt_fence_get_driver_name ( struct dma_fence * f )
{
return " host1x " ;
}
static const char * host1x_syncpt_fence_get_timeline_name ( struct dma_fence * f )
{
return " syncpoint " ;
}
static struct host1x_syncpt_fence * to_host1x_fence ( struct dma_fence * f )
{
return container_of ( f , struct host1x_syncpt_fence , base ) ;
}
static bool host1x_syncpt_fence_enable_signaling ( struct dma_fence * f )
{
struct host1x_syncpt_fence * sf = to_host1x_fence ( f ) ;
if ( host1x_syncpt_is_expired ( sf - > sp , sf - > threshold ) )
return false ;
2023-01-19 15:09:21 +02:00
/* Reference for interrupt path. */
2021-06-10 14:04:42 +03:00
dma_fence_get ( f ) ;
/*
* The dma_fence framework requires the fence driver to keep a
* reference to any fences for which ' enable_signaling ' has been
* called ( and that have not been signalled ) .
*
2023-01-19 15:09:21 +02:00
* We cannot currently always guarantee that all fences get signalled
* or cancelled . As such , for such situations , set up a timeout , so
* that long - lasting fences will get reaped eventually .
2021-06-10 14:04:42 +03:00
*/
2023-01-19 15:09:21 +02:00
if ( sf - > timeout ) {
/* Reference for timeout path. */
dma_fence_get ( f ) ;
schedule_delayed_work ( & sf - > timeout_work , msecs_to_jiffies ( 30000 ) ) ;
}
2021-06-10 14:04:42 +03:00
2023-01-19 15:09:20 +02:00
host1x_intr_add_fence_locked ( sf - > sp - > host , sf ) ;
2021-06-10 14:04:42 +03:00
/*
* The fence may get signalled at any time after the above call ,
* so we need to initialize all state used by signalling
* before it .
*/
return true ;
}
2022-11-04 14:29:18 +01:00
static const struct dma_fence_ops host1x_syncpt_fence_ops = {
2021-06-10 14:04:42 +03:00
. get_driver_name = host1x_syncpt_fence_get_driver_name ,
. get_timeline_name = host1x_syncpt_fence_get_timeline_name ,
. enable_signaling = host1x_syncpt_fence_enable_signaling ,
} ;
void host1x_fence_signal ( struct host1x_syncpt_fence * f )
{
2023-01-19 15:09:20 +02:00
if ( atomic_xchg ( & f - > signaling , 1 ) ) {
/*
* Already on timeout path , but we removed the fence before
* timeout path could , so drop interrupt path reference .
*/
dma_fence_put ( & f - > base ) ;
2021-06-10 14:04:42 +03:00
return ;
2023-01-19 15:09:20 +02:00
}
2021-06-10 14:04:42 +03:00
2023-01-19 15:09:21 +02:00
if ( f - > timeout & & cancel_delayed_work ( & f - > timeout_work ) ) {
2023-01-19 15:09:20 +02:00
/*
* We know that the timeout path will not be entered .
* Safe to drop the timeout path ' s reference now .
*/
dma_fence_put ( & f - > base ) ;
}
2021-06-10 14:04:42 +03:00
2023-01-19 15:09:20 +02:00
dma_fence_signal_locked ( & f - > base ) ;
2021-06-10 14:04:42 +03:00
dma_fence_put ( & f - > base ) ;
}
static void do_fence_timeout ( struct work_struct * work )
{
struct delayed_work * dwork = ( struct delayed_work * ) work ;
struct host1x_syncpt_fence * f =
container_of ( dwork , struct host1x_syncpt_fence , timeout_work ) ;
2023-01-19 15:09:20 +02:00
if ( atomic_xchg ( & f - > signaling , 1 ) ) {
2023-01-19 15:09:21 +02:00
/* Already on interrupt path, drop timeout path reference if any. */
if ( f - > timeout )
dma_fence_put ( & f - > base ) ;
2021-06-10 14:04:42 +03:00
return ;
2023-01-19 15:09:20 +02:00
}
2021-06-10 14:04:42 +03:00
2023-01-19 15:09:20 +02:00
if ( host1x_intr_remove_fence ( f - > sp - > host , f ) ) {
/*
* Managed to remove fence from queue , so it ' s safe to drop
* the interrupt path ' s reference .
*/
dma_fence_put ( & f - > base ) ;
}
2021-06-10 14:04:42 +03:00
dma_fence_set_error ( & f - > base , - ETIMEDOUT ) ;
dma_fence_signal ( & f - > base ) ;
2023-01-19 15:09:21 +02:00
if ( f - > timeout )
dma_fence_put ( & f - > base ) ;
2021-06-10 14:04:42 +03:00
}
2023-01-19 15:09:21 +02:00
struct dma_fence * host1x_fence_create ( struct host1x_syncpt * sp , u32 threshold ,
bool timeout )
2021-06-10 14:04:42 +03:00
{
struct host1x_syncpt_fence * fence ;
fence = kzalloc ( sizeof ( * fence ) , GFP_KERNEL ) ;
if ( ! fence )
return ERR_PTR ( - ENOMEM ) ;
fence - > sp = sp ;
fence - > threshold = threshold ;
2023-01-19 15:09:21 +02:00
fence - > timeout = timeout ;
2021-06-10 14:04:42 +03:00
2023-01-19 15:09:20 +02:00
dma_fence_init ( & fence - > base , & host1x_syncpt_fence_ops , & sp - > fences . lock ,
2021-06-10 14:04:42 +03:00
dma_fence_context_alloc ( 1 ) , 0 ) ;
INIT_DELAYED_WORK ( & fence - > timeout_work , do_fence_timeout ) ;
return & fence - > base ;
}
EXPORT_SYMBOL ( host1x_fence_create ) ;
2023-01-19 15:09:21 +02:00
void host1x_fence_cancel ( struct dma_fence * f )
{
struct host1x_syncpt_fence * sf = to_host1x_fence ( f ) ;
schedule_delayed_work ( & sf - > timeout_work , 0 ) ;
flush_delayed_work ( & sf - > timeout_work ) ;
}
EXPORT_SYMBOL ( host1x_fence_cancel ) ;