2009-12-11 19:24:15 +10:00
/*
* Copyright ( C ) 2007 Ben Skeggs .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining
* a copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sublicense , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial
* portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT .
* IN NO EVENT SHALL THE COPYRIGHT OWNER ( S ) AND / OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION
* OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE .
*
*/
2012-10-02 18:01:07 +01:00
# include <drm/drmP.h>
2009-12-11 19:24:15 +10:00
2011-03-09 14:22:19 +01:00
# include <linux/ktime.h>
# include <linux/hrtimer.h>
2016-10-25 13:00:45 +01:00
# include <trace/events/dma_fence.h>
2011-03-09 14:22:19 +01:00
2015-11-08 11:28:26 +10:00
# include <nvif/cl826e.h>
2014-08-10 04:10:25 +10:00
# include <nvif/notify.h>
# include <nvif/event.h>
2016-05-20 09:22:55 +10:00
# include "nouveau_drv.h"
2009-12-11 19:24:15 +10:00
# include "nouveau_dma.h"
2012-07-20 08:17:34 +10:00
# include "nouveau_fence.h"
2009-12-11 19:24:15 +10:00
2016-10-25 13:00:45 +01:00
static const struct dma_fence_ops nouveau_fence_ops_uevent ;
static const struct dma_fence_ops nouveau_fence_ops_legacy ;
2014-01-09 11:03:11 +01:00
static inline struct nouveau_fence *
2016-10-25 13:00:45 +01:00
from_fence ( struct dma_fence * fence )
2014-01-09 11:03:11 +01:00
{
return container_of ( fence , struct nouveau_fence , base ) ;
}
static inline struct nouveau_fence_chan *
nouveau_fctx ( struct nouveau_fence * fence )
{
return container_of ( fence - > base . lock , struct nouveau_fence_chan , lock ) ;
}
2013-05-07 09:48:30 +10:00
2014-12-01 19:11:06 +10:00
static int
2013-05-07 09:48:30 +10:00
nouveau_fence_signal ( struct nouveau_fence * fence )
{
2014-12-01 19:11:06 +10:00
int drop = 0 ;
2016-10-25 13:00:45 +01:00
dma_fence_signal_locked ( & fence - > base ) ;
2014-01-09 11:03:11 +01:00
list_del ( & fence - > head ) ;
2014-12-01 19:11:06 +10:00
rcu_assign_pointer ( fence - > channel , NULL ) ;
2014-01-09 11:03:11 +01:00
2016-10-25 13:00:45 +01:00
if ( test_bit ( DMA_FENCE_FLAG_USER_BITS , & fence - > base . flags ) ) {
2014-01-09 11:03:11 +01:00
struct nouveau_fence_chan * fctx = nouveau_fctx ( fence ) ;
2013-05-07 09:48:30 +10:00
2014-01-09 11:03:11 +01:00
if ( ! - - fctx - > notify_ref )
2014-12-01 19:11:06 +10:00
drop = 1 ;
2013-05-07 09:48:30 +10:00
}
2016-10-25 13:00:45 +01:00
dma_fence_put ( & fence - > base ) ;
2014-12-01 19:11:06 +10:00
return drop ;
2014-01-09 11:03:11 +01:00
}
static struct nouveau_fence *
2016-10-25 13:00:45 +01:00
nouveau_local_fence ( struct dma_fence * fence , struct nouveau_drm * drm ) {
2014-01-09 11:03:11 +01:00
struct nouveau_fence_priv * priv = ( void * ) drm - > fence ;
if ( fence - > ops ! = & nouveau_fence_ops_legacy & &
fence - > ops ! = & nouveau_fence_ops_uevent )
return NULL ;
if ( fence - > context < priv - > context_base | |
fence - > context > = priv - > context_base + priv - > contexts )
return NULL ;
return from_fence ( fence ) ;
2013-05-07 09:48:30 +10:00
}
2012-04-30 13:55:29 +10:00
void
nouveau_fence_context_del ( struct nouveau_fence_chan * fctx )
{
2014-01-09 11:03:11 +01:00
struct nouveau_fence * fence ;
spin_lock_irq ( & fctx - > lock ) ;
while ( ! list_empty ( & fctx - > pending ) ) {
fence = list_entry ( fctx - > pending . next , typeof ( * fence ) , head ) ;
2014-12-01 19:11:06 +10:00
if ( nouveau_fence_signal ( fence ) )
nvif_notify_put ( & fctx - > notify ) ;
2012-04-30 13:55:29 +10:00
}
2014-01-09 11:03:11 +01:00
spin_unlock_irq ( & fctx - > lock ) ;
2014-12-01 19:11:06 +10:00
nvif_notify_fini ( & fctx - > notify ) ;
fctx - > dead = 1 ;
/*
* Ensure that all accesses to fence - > channel complete before freeing
* the channel .
*/
synchronize_rcu ( ) ;
2014-01-09 11:03:11 +01:00
}
2014-09-29 10:06:18 +02:00
static void
nouveau_fence_context_put ( struct kref * fence_ref )
{
kfree ( container_of ( fence_ref , struct nouveau_fence_chan , fence_ref ) ) ;
}
void
nouveau_fence_context_free ( struct nouveau_fence_chan * fctx )
{
kref_put ( & fctx - > fence_ref , nouveau_fence_context_put ) ;
}
2014-12-01 19:11:06 +10:00
static int
2014-01-09 11:03:11 +01:00
nouveau_fence_update ( struct nouveau_channel * chan , struct nouveau_fence_chan * fctx )
{
struct nouveau_fence * fence ;
2014-12-01 19:11:06 +10:00
int drop = 0 ;
2014-01-09 11:03:11 +01:00
u32 seq = fctx - > read ( chan ) ;
while ( ! list_empty ( & fctx - > pending ) ) {
fence = list_entry ( fctx - > pending . next , typeof ( * fence ) , head ) ;
if ( ( int ) ( seq - fence - > base . seqno ) < 0 )
2014-12-01 19:11:06 +10:00
break ;
2014-01-09 11:03:11 +01:00
2014-12-01 19:11:06 +10:00
drop | = nouveau_fence_signal ( fence ) ;
2014-01-09 11:03:11 +01:00
}
2014-12-01 19:11:06 +10:00
return drop ;
2014-01-09 11:03:11 +01:00
}
static int
nouveau_fence_wait_uevent_handler ( struct nvif_notify * notify )
{
struct nouveau_fence_chan * fctx =
container_of ( notify , typeof ( * fctx ) , notify ) ;
unsigned long flags ;
2014-12-01 19:11:06 +10:00
int ret = NVIF_NOTIFY_KEEP ;
2014-01-09 11:03:11 +01:00
spin_lock_irqsave ( & fctx - > lock , flags ) ;
if ( ! list_empty ( & fctx - > pending ) ) {
struct nouveau_fence * fence ;
2014-12-01 19:11:06 +10:00
struct nouveau_channel * chan ;
2014-01-09 11:03:11 +01:00
fence = list_entry ( fctx - > pending . next , typeof ( * fence ) , head ) ;
2014-12-01 19:11:06 +10:00
chan = rcu_dereference_protected ( fence - > channel , lockdep_is_held ( & fctx - > lock ) ) ;
if ( nouveau_fence_update ( fence - > channel , fctx ) )
ret = NVIF_NOTIFY_DROP ;
2014-01-09 11:03:11 +01:00
}
spin_unlock_irqrestore ( & fctx - > lock , flags ) ;
2014-12-01 19:11:06 +10:00
return ret ;
2012-04-30 13:55:29 +10:00
}
void
2014-01-09 11:03:11 +01:00
nouveau_fence_context_new ( struct nouveau_channel * chan , struct nouveau_fence_chan * fctx )
2012-04-30 13:55:29 +10:00
{
2014-01-09 11:03:11 +01:00
struct nouveau_fence_priv * priv = ( void * ) chan - > drm - > fence ;
2015-08-20 14:54:15 +10:00
struct nouveau_cli * cli = ( void * ) chan - > user . client ;
2014-01-09 11:03:11 +01:00
int ret ;
2012-07-22 11:55:54 +10:00
INIT_LIST_HEAD ( & fctx - > flip ) ;
2012-04-30 13:55:29 +10:00
INIT_LIST_HEAD ( & fctx - > pending ) ;
spin_lock_init ( & fctx - > lock ) ;
2014-01-09 11:03:11 +01:00
fctx - > context = priv - > context_base + chan - > chid ;
2014-09-29 10:06:18 +02:00
if ( chan = = chan - > drm - > cechan )
strcpy ( fctx - > name , " copy engine channel " ) ;
else if ( chan = = chan - > drm - > channel )
strcpy ( fctx - > name , " generic kernel channel " ) ;
else
2015-01-12 12:33:37 +10:00
strcpy ( fctx - > name , nvxx_client ( & cli - > base ) - > name ) ;
2014-09-29 10:06:18 +02:00
kref_init ( & fctx - > fence_ref ) ;
2014-01-09 11:03:11 +01:00
if ( ! priv - > uevent )
return ;
2015-08-20 14:54:15 +10:00
ret = nvif_notify_init ( & chan - > user , nouveau_fence_wait_uevent_handler ,
false , G82_CHANNEL_DMA_V0_NTFY_UEVENT ,
& ( struct nvif_notify_uevent_req ) { } ,
sizeof ( struct nvif_notify_uevent_req ) ,
sizeof ( struct nvif_notify_uevent_rep ) ,
& fctx - > notify ) ;
2014-01-09 11:03:11 +01:00
WARN_ON ( ret ) ;
2012-04-30 13:55:29 +10:00
}
2009-12-11 19:24:15 +10:00
2014-01-09 11:03:11 +01:00
struct nouveau_fence_work {
struct work_struct work ;
2016-10-25 13:00:45 +01:00
struct dma_fence_cb cb ;
2014-01-09 11:03:11 +01:00
void ( * func ) ( void * ) ;
void * data ;
} ;
2013-05-07 09:48:30 +10:00
static void
nouveau_fence_work_handler ( struct work_struct * kwork )
{
2014-01-09 11:03:11 +01:00
struct nouveau_fence_work * work = container_of ( kwork , typeof ( * work ) , work ) ;
2013-05-07 09:48:30 +10:00
work - > func ( work - > data ) ;
kfree ( work ) ;
}
2016-10-25 13:00:45 +01:00
static void nouveau_fence_work_cb ( struct dma_fence * fence , struct dma_fence_cb * cb )
2014-01-09 11:03:11 +01:00
{
struct nouveau_fence_work * work = container_of ( cb , typeof ( * work ) , cb ) ;
schedule_work ( & work - > work ) ;
}
2013-05-07 09:48:30 +10:00
void
2016-10-25 13:00:45 +01:00
nouveau_fence_work ( struct dma_fence * fence ,
2013-05-07 09:48:30 +10:00
void ( * func ) ( void * ) , void * data )
{
2014-01-09 11:03:11 +01:00
struct nouveau_fence_work * work ;
2013-05-07 09:48:30 +10:00
2016-10-25 13:00:45 +01:00
if ( dma_fence_is_signaled ( fence ) )
2014-01-09 11:03:11 +01:00
goto err ;
2013-05-07 09:48:30 +10:00
work = kmalloc ( sizeof ( * work ) , GFP_KERNEL ) ;
if ( ! work ) {
2014-01-09 11:03:15 +01:00
/*
* this might not be a nouveau fence any more ,
* so force a lazy wait here
*/
2014-04-02 17:14:48 +02:00
WARN_ON ( nouveau_fence_wait ( ( struct nouveau_fence * ) fence ,
2014-01-09 11:03:15 +01:00
true , false ) ) ;
2014-01-09 11:03:11 +01:00
goto err ;
2013-05-07 09:48:30 +10:00
}
2014-01-09 11:03:11 +01:00
INIT_WORK ( & work - > work , nouveau_fence_work_handler ) ;
2013-05-07 09:48:30 +10:00
work - > func = func ;
work - > data = data ;
2009-12-11 19:24:15 +10:00
2016-10-25 13:00:45 +01:00
if ( dma_fence_add_callback ( fence , & work - > cb , nouveau_fence_work_cb ) < 0 )
2014-01-09 11:03:11 +01:00
goto err_free ;
return ;
2012-03-21 13:51:03 +10:00
2014-01-09 11:03:11 +01:00
err_free :
kfree ( work ) ;
err :
func ( data ) ;
2009-12-11 19:24:15 +10:00
}
int
2012-04-30 13:30:00 +10:00
nouveau_fence_emit ( struct nouveau_fence * fence , struct nouveau_channel * chan )
2009-12-11 19:24:15 +10:00
{
2012-07-19 10:51:42 +10:00
struct nouveau_fence_chan * fctx = chan - > fence ;
2014-01-09 11:03:11 +01:00
struct nouveau_fence_priv * priv = ( void * ) chan - > drm - > fence ;
2009-12-11 19:24:15 +10:00
int ret ;
2012-04-30 13:55:29 +10:00
fence - > channel = chan ;
2013-12-11 11:34:41 +01:00
fence - > timeout = jiffies + ( 15 * HZ ) ;
2009-12-11 19:24:15 +10:00
2014-01-09 11:03:11 +01:00
if ( priv - > uevent )
2016-10-25 13:00:45 +01:00
dma_fence_init ( & fence - > base , & nouveau_fence_ops_uevent ,
& fctx - > lock , fctx - > context , + + fctx - > sequence ) ;
2014-01-09 11:03:11 +01:00
else
2016-10-25 13:00:45 +01:00
dma_fence_init ( & fence - > base , & nouveau_fence_ops_legacy ,
& fctx - > lock , fctx - > context , + + fctx - > sequence ) ;
2014-09-29 10:06:18 +02:00
kref_get ( & fctx - > fence_ref ) ;
2014-01-09 11:03:11 +01:00
2016-10-25 13:00:45 +01:00
trace_dma_fence_emit ( & fence - > base ) ;
2013-02-14 13:20:17 +10:00
ret = fctx - > emit ( fence ) ;
2012-04-30 13:55:29 +10:00
if ( ! ret ) {
2016-10-25 13:00:45 +01:00
dma_fence_get ( & fence - > base ) ;
2014-01-09 11:03:11 +01:00
spin_lock_irq ( & fctx - > lock ) ;
2014-12-01 19:11:06 +10:00
if ( nouveau_fence_update ( chan , fctx ) )
nvif_notify_put ( & fctx - > notify ) ;
2012-04-30 13:55:29 +10:00
list_add_tail ( & fence - > head , & fctx - > pending ) ;
2014-01-09 11:03:11 +01:00
spin_unlock_irq ( & fctx - > lock ) ;
2010-11-24 10:30:22 +10:00
}
2009-12-11 19:24:15 +10:00
2012-04-30 13:55:29 +10:00
return ret ;
2009-12-11 19:24:15 +10:00
}
bool
2012-04-30 13:30:00 +10:00
nouveau_fence_done ( struct nouveau_fence * fence )
2009-12-11 19:24:15 +10:00
{
2014-01-09 11:03:11 +01:00
if ( fence - > base . ops = = & nouveau_fence_ops_legacy | |
fence - > base . ops = = & nouveau_fence_ops_uevent ) {
struct nouveau_fence_chan * fctx = nouveau_fctx ( fence ) ;
2014-12-01 19:11:06 +10:00
struct nouveau_channel * chan ;
2014-01-09 11:03:11 +01:00
unsigned long flags ;
2009-12-11 19:24:15 +10:00
2016-10-25 13:00:45 +01:00
if ( test_bit ( DMA_FENCE_FLAG_SIGNALED_BIT , & fence - > base . flags ) )
2014-01-09 11:03:11 +01:00
return true ;
2014-08-10 04:10:20 +10:00
2014-01-09 11:03:11 +01:00
spin_lock_irqsave ( & fctx - > lock , flags ) ;
2014-12-01 19:11:06 +10:00
chan = rcu_dereference_protected ( fence - > channel , lockdep_is_held ( & fctx - > lock ) ) ;
if ( chan & & nouveau_fence_update ( chan , fctx ) )
nvif_notify_put ( & fctx - > notify ) ;
2014-01-09 11:03:11 +01:00
spin_unlock_irqrestore ( & fctx - > lock , flags ) ;
}
2016-10-25 13:00:45 +01:00
return dma_fence_is_signaled ( & fence - > base ) ;
2013-01-31 14:57:33 +10:00
}
2014-01-09 11:03:11 +01:00
static long
2016-10-25 13:00:45 +01:00
nouveau_fence_wait_legacy ( struct dma_fence * f , bool intr , long wait )
2013-01-31 14:57:33 +10:00
{
2014-01-09 11:03:11 +01:00
struct nouveau_fence * fence = from_fence ( f ) ;
unsigned long sleep_time = NSEC_PER_MSEC / 1000 ;
unsigned long t = jiffies , timeout = t + wait ;
2013-01-31 14:57:33 +10:00
2014-01-09 11:03:11 +01:00
while ( ! nouveau_fence_done ( fence ) ) {
ktime_t kt ;
2013-10-03 07:02:29 +10:00
2014-01-09 11:03:11 +01:00
t = jiffies ;
2013-01-31 14:57:33 +10:00
2014-01-09 11:03:11 +01:00
if ( wait ! = MAX_SCHEDULE_TIMEOUT & & time_after_eq ( t , timeout ) ) {
__set_current_state ( TASK_RUNNING ) ;
return 0 ;
2013-01-31 14:57:33 +10:00
}
2014-01-09 11:03:11 +01:00
__set_current_state ( intr ? TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE ) ;
kt = ktime_set ( 0 , sleep_time ) ;
schedule_hrtimeout ( & kt , HRTIMER_MODE_REL ) ;
sleep_time * = 2 ;
if ( sleep_time > NSEC_PER_MSEC )
sleep_time = NSEC_PER_MSEC ;
if ( intr & & signal_pending ( current ) )
return - ERESTARTSYS ;
2013-01-31 14:57:33 +10:00
}
2014-01-09 11:03:11 +01:00
__set_current_state ( TASK_RUNNING ) ;
2013-01-31 14:57:33 +10:00
2014-01-09 11:03:11 +01:00
return timeout - t ;
2013-01-31 14:57:33 +10:00
}
2014-01-09 11:03:11 +01:00
static int
nouveau_fence_wait_busy ( struct nouveau_fence * fence , bool intr )
2009-12-11 19:24:15 +10:00
{
int ret = 0 ;
2012-04-30 13:30:00 +10:00
while ( ! nouveau_fence_done ( fence ) ) {
2014-01-09 11:03:11 +01:00
if ( time_after_eq ( jiffies , fence - > timeout ) ) {
2009-12-11 19:24:15 +10:00
ret = - EBUSY ;
break ;
}
2014-01-09 11:03:11 +01:00
__set_current_state ( intr ?
TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE ) ;
2009-12-11 19:24:15 +10:00
if ( intr & & signal_pending ( current ) ) {
2009-12-15 11:04:25 +10:00
ret = - ERESTARTSYS ;
2009-12-11 19:24:15 +10:00
break ;
}
}
__set_current_state ( TASK_RUNNING ) ;
2012-04-30 13:30:00 +10:00
return ret ;
}
2012-04-30 13:55:29 +10:00
int
2014-01-09 11:03:11 +01:00
nouveau_fence_wait ( struct nouveau_fence * fence , bool lazy , bool intr )
{
long ret ;
if ( ! lazy )
return nouveau_fence_wait_busy ( fence , intr ) ;
2016-10-25 13:00:45 +01:00
ret = dma_fence_wait_timeout ( & fence - > base , intr , 15 * HZ ) ;
2014-01-09 11:03:11 +01:00
if ( ret < 0 )
return ret ;
else if ( ! ret )
return - EBUSY ;
else
return 0 ;
}
int
2014-09-16 11:15:07 +02:00
nouveau_fence_sync ( struct nouveau_bo * nvbo , struct nouveau_channel * chan , bool exclusive , bool intr )
2012-04-30 13:55:29 +10:00
{
2013-02-14 13:20:17 +10:00
struct nouveau_fence_chan * fctx = chan - > fence ;
2016-10-25 13:00:45 +01:00
struct dma_fence * fence ;
2014-01-09 11:03:11 +01:00
struct reservation_object * resv = nvbo - > bo . resv ;
struct reservation_object_list * fobj ;
2014-04-09 16:19:30 +02:00
struct nouveau_fence * f ;
2014-01-09 11:03:11 +01:00
int ret = 0 , i ;
2014-04-09 16:19:30 +02:00
if ( ! exclusive ) {
ret = reservation_object_reserve_shared ( resv ) ;
if ( ret )
return ret ;
}
fobj = reservation_object_get_list ( resv ) ;
2014-04-02 17:14:48 +02:00
fence = reservation_object_get_excl ( resv ) ;
2014-01-09 11:03:11 +01:00
2014-04-09 16:19:30 +02:00
if ( fence & & ( ! exclusive | | ! fobj | | ! fobj - > shared_count ) ) {
struct nouveau_channel * prev = NULL ;
2014-12-01 19:11:06 +10:00
bool must_wait = true ;
2012-04-30 13:55:29 +10:00
2014-04-09 16:19:30 +02:00
f = nouveau_local_fence ( fence , chan - > drm ) ;
2014-12-01 19:11:06 +10:00
if ( f ) {
rcu_read_lock ( ) ;
prev = rcu_dereference ( f - > channel ) ;
if ( prev & & ( prev = = chan | | fctx - > sync ( f , prev , chan ) = = 0 ) )
must_wait = false ;
rcu_read_unlock ( ) ;
}
2014-04-09 16:19:30 +02:00
2014-12-01 19:11:06 +10:00
if ( must_wait )
2016-10-25 13:00:45 +01:00
ret = dma_fence_wait ( fence , intr ) ;
2012-04-30 13:55:29 +10:00
2014-01-09 11:03:11 +01:00
return ret ;
2014-04-09 16:19:30 +02:00
}
2012-04-30 13:55:29 +10:00
2014-04-09 16:19:30 +02:00
if ( ! exclusive | | ! fobj )
2014-01-09 11:03:11 +01:00
return ret ;
for ( i = 0 ; i < fobj - > shared_count & & ! ret ; + + i ) {
2014-04-09 16:19:30 +02:00
struct nouveau_channel * prev = NULL ;
2014-12-01 19:11:06 +10:00
bool must_wait = true ;
2014-04-09 16:19:30 +02:00
2014-01-09 11:03:11 +01:00
fence = rcu_dereference_protected ( fobj - > shared [ i ] ,
reservation_object_held ( resv ) ) ;
2014-04-09 16:19:30 +02:00
f = nouveau_local_fence ( fence , chan - > drm ) ;
2014-12-01 19:11:06 +10:00
if ( f ) {
rcu_read_lock ( ) ;
prev = rcu_dereference ( f - > channel ) ;
if ( prev & & ( prev = = chan | | fctx - > sync ( f , prev , chan ) = = 0 ) )
must_wait = false ;
rcu_read_unlock ( ) ;
}
2014-04-09 16:19:30 +02:00
2014-12-01 19:11:06 +10:00
if ( must_wait )
2016-10-25 13:00:45 +01:00
ret = dma_fence_wait ( fence , intr ) ;
2014-01-09 11:03:11 +01:00
}
return ret ;
2012-04-30 13:30:00 +10:00
}
void
nouveau_fence_unref ( struct nouveau_fence * * pfence )
{
if ( * pfence )
2016-10-25 13:00:45 +01:00
dma_fence_put ( & ( * pfence ) - > base ) ;
2012-04-30 13:30:00 +10:00
* pfence = NULL ;
}
int
2013-02-14 13:43:21 +10:00
nouveau_fence_new ( struct nouveau_channel * chan , bool sysmem ,
struct nouveau_fence * * pfence )
2012-04-30 13:30:00 +10:00
{
struct nouveau_fence * fence ;
int ret = 0 ;
2009-12-11 19:24:15 +10:00
2012-07-19 10:51:42 +10:00
if ( unlikely ( ! chan - > fence ) )
2012-04-30 13:55:29 +10:00
return - ENODEV ;
2012-04-30 13:30:00 +10:00
fence = kzalloc ( sizeof ( * fence ) , GFP_KERNEL ) ;
if ( ! fence )
return - ENOMEM ;
2013-02-14 13:43:21 +10:00
fence - > sysmem = sysmem ;
2012-04-30 13:30:00 +10:00
2013-01-15 18:19:49 +01:00
ret = nouveau_fence_emit ( fence , chan ) ;
if ( ret )
nouveau_fence_unref ( & fence ) ;
2012-04-30 13:30:00 +10:00
* pfence = fence ;
2009-12-11 19:24:15 +10:00
return ret ;
}
2014-01-09 11:03:11 +01:00
2016-10-25 13:00:45 +01:00
static const char * nouveau_fence_get_get_driver_name ( struct dma_fence * fence )
2014-01-09 11:03:11 +01:00
{
return " nouveau " ;
}
2016-10-25 13:00:45 +01:00
static const char * nouveau_fence_get_timeline_name ( struct dma_fence * f )
2014-01-09 11:03:11 +01:00
{
struct nouveau_fence * fence = from_fence ( f ) ;
struct nouveau_fence_chan * fctx = nouveau_fctx ( fence ) ;
2014-12-01 19:11:06 +10:00
return ! fctx - > dead ? fctx - > name : " dead channel " ;
2014-01-09 11:03:11 +01:00
}
/*
* In an ideal world , read would not assume the channel context is still alive .
* This function may be called from another device , running into free memory as a
* result . The drm node should still be there , so we can derive the index from
* the fence context .
*/
2016-10-25 13:00:45 +01:00
static bool nouveau_fence_is_signaled ( struct dma_fence * f )
2014-01-09 11:03:11 +01:00
{
struct nouveau_fence * fence = from_fence ( f ) ;
struct nouveau_fence_chan * fctx = nouveau_fctx ( fence ) ;
2014-12-01 19:11:06 +10:00
struct nouveau_channel * chan ;
bool ret = false ;
rcu_read_lock ( ) ;
chan = rcu_dereference ( fence - > channel ) ;
if ( chan )
ret = ( int ) ( fctx - > read ( chan ) - fence - > base . seqno ) > = 0 ;
rcu_read_unlock ( ) ;
2014-01-09 11:03:11 +01:00
2014-12-01 19:11:06 +10:00
return ret ;
2014-01-09 11:03:11 +01:00
}
2016-10-25 13:00:45 +01:00
static bool nouveau_fence_no_signaling ( struct dma_fence * f )
2014-01-09 11:03:11 +01:00
{
struct nouveau_fence * fence = from_fence ( f ) ;
/*
* caller should have a reference on the fence ,
* else fence could get freed here
*/
WARN_ON ( atomic_read ( & fence - > base . refcount . refcount ) < = 1 ) ;
/*
2016-10-25 13:00:45 +01:00
* This needs uevents to work correctly , but dma_fence_add_callback relies on
2014-01-09 11:03:11 +01:00
* being able to enable signaling . It will still get signaled eventually ,
* just not right away .
*/
if ( nouveau_fence_is_signaled ( f ) ) {
list_del ( & fence - > head ) ;
2016-10-25 13:00:45 +01:00
dma_fence_put ( & fence - > base ) ;
2014-01-09 11:03:11 +01:00
return false ;
}
return true ;
}
2016-10-25 13:00:45 +01:00
static void nouveau_fence_release ( struct dma_fence * f )
2014-09-29 10:06:18 +02:00
{
struct nouveau_fence * fence = from_fence ( f ) ;
struct nouveau_fence_chan * fctx = nouveau_fctx ( fence ) ;
kref_put ( & fctx - > fence_ref , nouveau_fence_context_put ) ;
2016-10-25 13:00:45 +01:00
dma_fence_free ( & fence - > base ) ;
2014-09-29 10:06:18 +02:00
}
2016-10-25 13:00:45 +01:00
static const struct dma_fence_ops nouveau_fence_ops_legacy = {
2014-01-09 11:03:11 +01:00
. get_driver_name = nouveau_fence_get_get_driver_name ,
. get_timeline_name = nouveau_fence_get_timeline_name ,
. enable_signaling = nouveau_fence_no_signaling ,
. signaled = nouveau_fence_is_signaled ,
. wait = nouveau_fence_wait_legacy ,
2014-09-29 10:06:18 +02:00
. release = nouveau_fence_release
2014-01-09 11:03:11 +01:00
} ;
2016-10-25 13:00:45 +01:00
static bool nouveau_fence_enable_signaling ( struct dma_fence * f )
2014-01-09 11:03:11 +01:00
{
struct nouveau_fence * fence = from_fence ( f ) ;
struct nouveau_fence_chan * fctx = nouveau_fctx ( fence ) ;
bool ret ;
if ( ! fctx - > notify_ref + + )
nvif_notify_get ( & fctx - > notify ) ;
ret = nouveau_fence_no_signaling ( f ) ;
if ( ret )
2016-10-25 13:00:45 +01:00
set_bit ( DMA_FENCE_FLAG_USER_BITS , & fence - > base . flags ) ;
2014-01-09 11:03:11 +01:00
else if ( ! - - fctx - > notify_ref )
nvif_notify_put ( & fctx - > notify ) ;
return ret ;
}
2016-10-25 13:00:45 +01:00
static const struct dma_fence_ops nouveau_fence_ops_uevent = {
2014-01-09 11:03:11 +01:00
. get_driver_name = nouveau_fence_get_get_driver_name ,
. get_timeline_name = nouveau_fence_get_timeline_name ,
. enable_signaling = nouveau_fence_enable_signaling ,
. signaled = nouveau_fence_is_signaled ,
2016-10-25 13:00:45 +01:00
. wait = dma_fence_default_wait ,
2014-01-09 11:03:11 +01:00
. release = NULL
} ;