2013-06-27 13:48:16 +02:00
/*
2014-07-01 12:57:54 +02:00
* Copyright ( C ) 2012 - 2014 Canonical Ltd ( Maarten Lankhorst )
2013-06-27 13:48:16 +02:00
*
* Based on bo . c which bears the following copyright notice ,
* but is dual licensed :
*
* Copyright ( c ) 2006 - 2009 VMware , Inc . , Palo Alto , CA . , USA
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Authors : Thomas Hellstrom < thellstrom - at - vmware - dot - com >
*/
# include <linux/reservation.h>
# include <linux/export.h>
2016-03-31 16:26:51 -04:00
/**
* DOC : Reservation Object Overview
*
* The reservation object provides a mechanism to manage shared and
* exclusive fences associated with a buffer . A reservation object
* can have attached one exclusive fence ( normally associated with
* write operations ) or N shared fences ( read operations ) . The RCU
* mechanism is used to protect read access to fences from locked
* write - side updates .
*/
2018-06-15 10:17:38 +02:00
DEFINE_WD_CLASS ( reservation_ww_class ) ;
2013-06-27 13:48:16 +02:00
EXPORT_SYMBOL ( reservation_ww_class ) ;
2014-07-01 12:57:54 +02:00
2014-07-01 12:58:00 +02:00
struct lock_class_key reservation_seqcount_class ;
EXPORT_SYMBOL ( reservation_seqcount_class ) ;
const char reservation_seqcount_string [ ] = " reservation_seqcount " ;
EXPORT_SYMBOL ( reservation_seqcount_string ) ;
2016-03-31 16:26:51 -04:00
/**
2018-09-19 16:12:25 +02:00
* reservation_object_reserve_shared - Reserve space to add shared fences to
* a reservation_object .
2016-03-31 16:26:51 -04:00
* @ obj : reservation object
2018-09-19 16:12:25 +02:00
* @ num_fences : number of fences we want to add
2016-03-31 16:26:51 -04:00
*
* Should be called before reservation_object_add_shared_fence ( ) . Must
* be called with obj - > lock held .
*
* RETURNS
* Zero for success , or - errno
2014-07-01 12:57:54 +02:00
*/
2018-09-19 16:12:25 +02:00
int reservation_object_reserve_shared ( struct reservation_object * obj ,
unsigned int num_fences )
2014-07-01 12:57:54 +02:00
{
2018-08-08 16:01:22 +02:00
struct reservation_object_list * old , * new ;
unsigned int i , j , k , max ;
2014-07-01 12:57:54 +02:00
2017-06-13 10:26:46 +02:00
reservation_object_assert_held ( obj ) ;
2014-07-01 12:57:54 +02:00
old = reservation_object_get_list ( obj ) ;
if ( old & & old - > shared_max ) {
2018-09-19 16:12:25 +02:00
if ( ( old - > shared_count + num_fences ) < = old - > shared_max )
2014-07-01 12:57:54 +02:00
return 0 ;
2018-08-08 16:01:22 +02:00
else
2018-09-19 16:12:25 +02:00
max = max ( old - > shared_count + num_fences ,
old - > shared_max * 2 ) ;
2017-11-14 15:24:36 +01:00
} else {
2018-08-08 16:01:22 +02:00
max = 4 ;
2017-11-14 15:24:36 +01:00
}
2014-07-01 12:58:00 +02:00
2018-08-08 16:01:22 +02:00
new = kmalloc ( offsetof ( typeof ( * new ) , shared [ max ] ) , GFP_KERNEL ) ;
if ( ! new )
return - ENOMEM ;
2014-07-01 12:57:54 +02:00
/*
* no need to bump fence refcounts , rcu_read access
* requires the use of kref_get_unless_zero , and the
* references from the old struct are carried over to
* the new .
*/
2018-08-08 16:01:22 +02:00
for ( i = 0 , j = 0 , k = max ; i < ( old ? old - > shared_count : 0 ) ; + + i ) {
struct dma_fence * fence ;
2014-07-01 12:58:00 +02:00
2018-08-08 16:01:22 +02:00
fence = rcu_dereference_protected ( old - > shared [ i ] ,
reservation_object_held ( obj ) ) ;
if ( dma_fence_is_signaled ( fence ) )
RCU_INIT_POINTER ( new - > shared [ - - k ] , fence ) ;
2017-11-14 15:24:35 +01:00
else
2018-08-08 16:01:22 +02:00
RCU_INIT_POINTER ( new - > shared [ j + + ] , fence ) ;
2014-07-01 12:57:54 +02:00
}
2018-08-08 16:01:22 +02:00
new - > shared_count = j ;
new - > shared_max = max ;
2014-07-01 12:57:54 +02:00
2014-07-01 12:58:00 +02:00
preempt_disable ( ) ;
write_seqcount_begin ( & obj - > seq ) ;
/*
* RCU_INIT_POINTER can be used here ,
* seqcount provides the necessary barriers
*/
2018-08-08 16:01:22 +02:00
RCU_INIT_POINTER ( obj - > fence , new ) ;
2014-07-01 12:58:00 +02:00
write_seqcount_end ( & obj - > seq ) ;
preempt_enable ( ) ;
2017-11-14 15:24:35 +01:00
if ( ! old )
2018-08-08 16:01:22 +02:00
return 0 ;
2014-07-01 12:58:00 +02:00
2017-11-14 15:24:35 +01:00
/* Drop the references to the signaled fences */
2018-08-08 16:01:22 +02:00
for ( i = k ; i < new - > shared_max ; + + i ) {
struct dma_fence * fence ;
2017-11-14 15:24:35 +01:00
2018-08-08 16:01:22 +02:00
fence = rcu_dereference_protected ( new - > shared [ i ] ,
reservation_object_held ( obj ) ) ;
dma_fence_put ( fence ) ;
2017-11-14 15:24:35 +01:00
}
kfree_rcu ( old , rcu ) ;
2018-08-08 16:01:22 +02:00
return 0 ;
2014-07-01 12:57:54 +02:00
}
2018-08-08 16:01:22 +02:00
EXPORT_SYMBOL ( reservation_object_reserve_shared ) ;
2014-07-01 12:57:54 +02:00
2016-03-31 16:26:51 -04:00
/**
* reservation_object_add_shared_fence - Add a fence to a shared slot
* @ obj : the reservation object
* @ fence : the shared fence to add
*
2014-07-01 12:57:54 +02:00
* Add a fence to a shared slot , obj - > lock must be held , and
2016-08-19 16:55:34 -04:00
* reservation_object_reserve_shared ( ) has been called .
2014-07-01 12:57:54 +02:00
*/
void reservation_object_add_shared_fence ( struct reservation_object * obj ,
2016-10-25 13:00:45 +01:00
struct dma_fence * fence )
2014-07-01 12:57:54 +02:00
{
2018-08-08 16:01:22 +02:00
struct reservation_object_list * fobj ;
2018-10-26 09:03:02 +01:00
unsigned int i , count ;
2014-07-01 12:57:54 +02:00
2018-08-08 16:01:22 +02:00
dma_fence_get ( fence ) ;
2017-06-13 10:26:46 +02:00
reservation_object_assert_held ( obj ) ;
2018-08-08 16:01:22 +02:00
fobj = reservation_object_get_list ( obj ) ;
2018-10-26 09:03:02 +01:00
count = fobj - > shared_count ;
2014-07-01 12:57:54 +02:00
2018-08-08 16:01:22 +02:00
preempt_disable ( ) ;
write_seqcount_begin ( & obj - > seq ) ;
2018-10-26 09:03:02 +01:00
for ( i = 0 ; i < count ; + + i ) {
2018-08-08 16:01:22 +02:00
struct dma_fence * old_fence ;
old_fence = rcu_dereference_protected ( fobj - > shared [ i ] ,
reservation_object_held ( obj ) ) ;
if ( old_fence - > context = = fence - > context | |
dma_fence_is_signaled ( old_fence ) ) {
dma_fence_put ( old_fence ) ;
goto replace ;
}
}
BUG_ON ( fobj - > shared_count > = fobj - > shared_max ) ;
2018-10-26 09:03:02 +01:00
count + + ;
2018-08-08 16:01:22 +02:00
replace :
RCU_INIT_POINTER ( fobj - > shared [ i ] , fence ) ;
2018-10-26 09:03:02 +01:00
/* pointer update must be visible before we extend the shared_count */
smp_store_mb ( fobj - > shared_count , count ) ;
2018-08-08 16:01:22 +02:00
write_seqcount_end ( & obj - > seq ) ;
preempt_enable ( ) ;
2014-07-01 12:57:54 +02:00
}
EXPORT_SYMBOL ( reservation_object_add_shared_fence ) ;
2016-03-31 16:26:51 -04:00
/**
* reservation_object_add_excl_fence - Add an exclusive fence .
* @ obj : the reservation object
* @ fence : the shared fence to add
*
* Add a fence to the exclusive slot . The obj - > lock must be held .
*/
2014-07-01 12:57:54 +02:00
void reservation_object_add_excl_fence ( struct reservation_object * obj ,
2016-10-25 13:00:45 +01:00
struct dma_fence * fence )
2014-07-01 12:57:54 +02:00
{
2016-10-25 13:00:45 +01:00
struct dma_fence * old_fence = reservation_object_get_excl ( obj ) ;
2014-07-01 12:57:54 +02:00
struct reservation_object_list * old ;
u32 i = 0 ;
2017-06-13 10:26:46 +02:00
reservation_object_assert_held ( obj ) ;
2014-07-01 12:57:54 +02:00
old = reservation_object_get_list ( obj ) ;
2014-07-01 12:58:00 +02:00
if ( old )
2014-07-01 12:57:54 +02:00
i = old - > shared_count ;
if ( fence )
2016-10-25 13:00:45 +01:00
dma_fence_get ( fence ) ;
2014-07-01 12:57:54 +02:00
2014-07-01 12:58:00 +02:00
preempt_disable ( ) ;
write_seqcount_begin ( & obj - > seq ) ;
/* write_seqcount_begin provides the necessary memory barrier */
RCU_INIT_POINTER ( obj - > fence_excl , fence ) ;
if ( old )
old - > shared_count = 0 ;
write_seqcount_end ( & obj - > seq ) ;
preempt_enable ( ) ;
2014-07-01 12:57:54 +02:00
/* inplace update, no shared fences */
while ( i - - )
2016-10-25 13:00:45 +01:00
dma_fence_put ( rcu_dereference_protected ( old - > shared [ i ] ,
2014-07-01 12:58:00 +02:00
reservation_object_held ( obj ) ) ) ;
2014-07-01 12:57:54 +02:00
2017-08-07 17:32:21 -04:00
dma_fence_put ( old_fence ) ;
2014-07-01 12:57:54 +02:00
}
EXPORT_SYMBOL ( reservation_object_add_excl_fence ) ;
2014-07-01 12:58:00 +02:00
2017-08-10 13:01:48 -04:00
/**
* reservation_object_copy_fences - Copy all fences from src to dst .
* @ dst : the destination reservation object
* @ src : the source reservation object
*
2017-09-04 21:02:45 +02:00
* Copy all fences from src to dst . dst - lock must be held .
2017-08-10 13:01:48 -04:00
*/
int reservation_object_copy_fences ( struct reservation_object * dst ,
struct reservation_object * src )
{
struct reservation_object_list * src_list , * dst_list ;
struct dma_fence * old , * new ;
size_t size ;
unsigned i ;
2017-06-13 10:26:46 +02:00
reservation_object_assert_held ( dst ) ;
2017-09-04 21:02:45 +02:00
rcu_read_lock ( ) ;
src_list = rcu_dereference ( src - > fence ) ;
2017-08-10 13:01:48 -04:00
2017-09-04 21:02:45 +02:00
retry :
2017-08-10 13:01:48 -04:00
if ( src_list ) {
2017-09-04 21:02:45 +02:00
unsigned shared_count = src_list - > shared_count ;
size = offsetof ( typeof ( * src_list ) , shared [ shared_count ] ) ;
rcu_read_unlock ( ) ;
2017-08-10 13:01:48 -04:00
dst_list = kmalloc ( size , GFP_KERNEL ) ;
if ( ! dst_list )
return - ENOMEM ;
2017-09-04 21:02:45 +02:00
rcu_read_lock ( ) ;
src_list = rcu_dereference ( src - > fence ) ;
if ( ! src_list | | src_list - > shared_count > shared_count ) {
kfree ( dst_list ) ;
goto retry ;
}
dst_list - > shared_count = 0 ;
dst_list - > shared_max = shared_count ;
for ( i = 0 ; i < src_list - > shared_count ; + + i ) {
struct dma_fence * fence ;
fence = rcu_dereference ( src_list - > shared [ i ] ) ;
if ( test_bit ( DMA_FENCE_FLAG_SIGNALED_BIT ,
& fence - > flags ) )
continue ;
if ( ! dma_fence_get_rcu ( fence ) ) {
kfree ( dst_list ) ;
src_list = rcu_dereference ( src - > fence ) ;
goto retry ;
}
if ( dma_fence_is_signaled ( fence ) ) {
dma_fence_put ( fence ) ;
continue ;
}
2017-11-02 22:03:36 +02:00
rcu_assign_pointer ( dst_list - > shared [ dst_list - > shared_count + + ] , fence ) ;
2017-09-04 21:02:45 +02:00
}
2017-08-10 13:01:48 -04:00
} else {
dst_list = NULL ;
}
2017-09-04 21:02:45 +02:00
new = dma_fence_get_rcu_safe ( & src - > fence_excl ) ;
rcu_read_unlock ( ) ;
2017-08-10 13:01:48 -04:00
src_list = reservation_object_get_list ( dst ) ;
old = reservation_object_get_excl ( dst ) ;
preempt_disable ( ) ;
write_seqcount_begin ( & dst - > seq ) ;
/* write_seqcount_begin provides the necessary memory barrier */
RCU_INIT_POINTER ( dst - > fence_excl , new ) ;
RCU_INIT_POINTER ( dst - > fence , dst_list ) ;
write_seqcount_end ( & dst - > seq ) ;
preempt_enable ( ) ;
if ( src_list )
kfree_rcu ( src_list , rcu ) ;
dma_fence_put ( old ) ;
return 0 ;
}
EXPORT_SYMBOL ( reservation_object_copy_fences ) ;
2016-03-31 16:26:51 -04:00
/**
* reservation_object_get_fences_rcu - Get an object ' s shared and exclusive
* fences without update side lock held
* @ obj : the reservation object
* @ pfence_excl : the returned exclusive fence ( or NULL )
* @ pshared_count : the number of shared fences returned
* @ pshared : the array of shared fence ptrs returned ( array is krealloc ' d to
* the required size , and must be freed by caller )
*
2018-01-10 13:53:41 +01:00
* Retrieve all fences from the reservation object . If the pointer for the
* exclusive fence is not specified the fence is put into the array of the
* shared fences as well . Returns either zero or - ENOMEM .
2016-03-31 16:26:51 -04:00
*/
2014-07-01 12:58:00 +02:00
int reservation_object_get_fences_rcu ( struct reservation_object * obj ,
2016-10-25 13:00:45 +01:00
struct dma_fence * * pfence_excl ,
2014-07-01 12:58:00 +02:00
unsigned * pshared_count ,
2016-10-25 13:00:45 +01:00
struct dma_fence * * * pshared )
2014-07-01 12:58:00 +02:00
{
2016-10-25 13:00:45 +01:00
struct dma_fence * * shared = NULL ;
struct dma_fence * fence_excl ;
2016-08-29 08:08:30 +01:00
unsigned int shared_count ;
int ret = 1 ;
2014-07-01 12:58:00 +02:00
2016-08-29 08:08:30 +01:00
do {
2014-07-01 12:58:00 +02:00
struct reservation_object_list * fobj ;
2018-01-10 13:53:41 +01:00
unsigned int i , seq ;
size_t sz = 0 ;
2014-07-01 12:58:00 +02:00
2016-08-29 08:08:30 +01:00
shared_count = i = 0 ;
2014-07-01 12:58:00 +02:00
rcu_read_lock ( ) ;
2016-08-29 08:08:30 +01:00
seq = read_seqcount_begin ( & obj - > seq ) ;
fence_excl = rcu_dereference ( obj - > fence_excl ) ;
2016-10-25 13:00:45 +01:00
if ( fence_excl & & ! dma_fence_get_rcu ( fence_excl ) )
2016-08-29 08:08:30 +01:00
goto unlock ;
2014-07-01 12:58:00 +02:00
fobj = rcu_dereference ( obj - > fence ) ;
2018-01-10 13:53:41 +01:00
if ( fobj )
sz + = sizeof ( * shared ) * fobj - > shared_max ;
if ( ! pfence_excl & & fence_excl )
sz + = sizeof ( * shared ) ;
if ( sz ) {
2016-10-25 13:00:45 +01:00
struct dma_fence * * nshared ;
2014-07-01 12:58:00 +02:00
nshared = krealloc ( shared , sz ,
GFP_NOWAIT | __GFP_NOWARN ) ;
if ( ! nshared ) {
rcu_read_unlock ( ) ;
nshared = krealloc ( shared , sz , GFP_KERNEL ) ;
if ( nshared ) {
shared = nshared ;
continue ;
}
ret = - ENOMEM ;
break ;
}
shared = nshared ;
2018-01-10 13:53:41 +01:00
shared_count = fobj ? fobj - > shared_count : 0 ;
2014-07-01 12:58:00 +02:00
for ( i = 0 ; i < shared_count ; + + i ) {
2016-08-29 08:08:30 +01:00
shared [ i ] = rcu_dereference ( fobj - > shared [ i ] ) ;
2016-10-25 13:00:45 +01:00
if ( ! dma_fence_get_rcu ( shared [ i ] ) )
2016-08-29 08:08:30 +01:00
break ;
2014-07-01 12:58:00 +02:00
}
2018-01-10 13:53:41 +01:00
if ( ! pfence_excl & & fence_excl ) {
shared [ i ] = fence_excl ;
fence_excl = NULL ;
+ + i ;
+ + shared_count ;
}
2016-08-29 08:08:30 +01:00
}
2014-07-01 12:58:00 +02:00
2016-08-29 08:08:30 +01:00
if ( i ! = shared_count | | read_seqcount_retry ( & obj - > seq , seq ) ) {
while ( i - - )
2016-10-25 13:00:45 +01:00
dma_fence_put ( shared [ i ] ) ;
dma_fence_put ( fence_excl ) ;
2016-08-29 08:08:30 +01:00
goto unlock ;
}
ret = 0 ;
2014-07-01 12:58:00 +02:00
unlock :
rcu_read_unlock ( ) ;
2016-08-29 08:08:30 +01:00
} while ( ret ) ;
if ( ! shared_count ) {
2014-07-01 12:58:00 +02:00
kfree ( shared ) ;
2016-08-29 08:08:30 +01:00
shared = NULL ;
2014-07-01 12:58:00 +02:00
}
2016-08-29 08:08:30 +01:00
* pshared_count = shared_count ;
* pshared = shared ;
2018-01-10 13:53:41 +01:00
if ( pfence_excl )
* pfence_excl = fence_excl ;
2014-07-01 12:58:00 +02:00
return ret ;
}
EXPORT_SYMBOL_GPL ( reservation_object_get_fences_rcu ) ;
2016-03-31 16:26:51 -04:00
/**
* reservation_object_wait_timeout_rcu - Wait on reservation ' s objects
* shared and / or exclusive fences .
* @ obj : the reservation object
* @ wait_all : if true , wait on all fences , else wait on just exclusive fence
* @ intr : if true , do interruptible wait
* @ timeout : timeout value in jiffies or zero to return immediately
*
* RETURNS
* Returns - ERESTARTSYS if interrupted , 0 if the wait timed out , or
* greater than zer on success .
*/
2014-07-01 12:58:00 +02:00
long reservation_object_wait_timeout_rcu ( struct reservation_object * obj ,
bool wait_all , bool intr ,
unsigned long timeout )
{
2016-10-25 13:00:45 +01:00
struct dma_fence * fence ;
2018-01-22 21:00:03 +01:00
unsigned seq , shared_count ;
2016-11-07 16:16:16 -05:00
long ret = timeout ? timeout : 1 ;
2018-01-22 21:00:03 +01:00
int i ;
2015-01-21 18:35:47 +08:00
2014-07-01 12:58:00 +02:00
retry :
shared_count = 0 ;
seq = read_seqcount_begin ( & obj - > seq ) ;
rcu_read_lock ( ) ;
2018-01-22 21:00:03 +01:00
i = - 1 ;
2014-07-01 12:58:00 +02:00
2017-08-10 13:01:49 -04:00
fence = rcu_dereference ( obj - > fence_excl ) ;
if ( fence & & ! test_bit ( DMA_FENCE_FLAG_SIGNALED_BIT , & fence - > flags ) ) {
if ( ! dma_fence_get_rcu ( fence ) )
goto unlock_retry ;
if ( dma_fence_is_signaled ( fence ) ) {
dma_fence_put ( fence ) ;
fence = NULL ;
}
} else {
fence = NULL ;
}
2018-01-22 21:00:03 +01:00
if ( wait_all ) {
2015-05-21 01:09:31 +05:30
struct reservation_object_list * fobj =
rcu_dereference ( obj - > fence ) ;
2014-07-01 12:58:00 +02:00
if ( fobj )
shared_count = fobj - > shared_count ;
2018-01-22 21:00:03 +01:00
for ( i = 0 ; ! fence & & i < shared_count ; + + i ) {
2016-10-25 13:00:45 +01:00
struct dma_fence * lfence = rcu_dereference ( fobj - > shared [ i ] ) ;
2014-07-01 12:58:00 +02:00
2016-10-25 13:00:45 +01:00
if ( test_bit ( DMA_FENCE_FLAG_SIGNALED_BIT ,
& lfence - > flags ) )
2014-07-01 12:58:00 +02:00
continue ;
2016-10-25 13:00:45 +01:00
if ( ! dma_fence_get_rcu ( lfence ) )
2014-07-01 12:58:00 +02:00
goto unlock_retry ;
2016-10-25 13:00:45 +01:00
if ( dma_fence_is_signaled ( lfence ) ) {
dma_fence_put ( lfence ) ;
2014-07-01 12:58:00 +02:00
continue ;
}
fence = lfence ;
break ;
}
}
rcu_read_unlock ( ) ;
if ( fence ) {
2016-08-29 08:08:31 +01:00
if ( read_seqcount_retry ( & obj - > seq , seq ) ) {
2016-10-25 13:00:45 +01:00
dma_fence_put ( fence ) ;
2016-08-29 08:08:31 +01:00
goto retry ;
}
2016-10-25 13:00:45 +01:00
ret = dma_fence_wait_timeout ( fence , intr , ret ) ;
dma_fence_put ( fence ) ;
2014-07-01 12:58:00 +02:00
if ( ret > 0 & & wait_all & & ( i + 1 < shared_count ) )
goto retry ;
}
return ret ;
unlock_retry :
rcu_read_unlock ( ) ;
goto retry ;
}
EXPORT_SYMBOL_GPL ( reservation_object_wait_timeout_rcu ) ;
static inline int
2016-10-25 13:00:45 +01:00
reservation_object_test_signaled_single ( struct dma_fence * passed_fence )
2014-07-01 12:58:00 +02:00
{
2016-10-25 13:00:45 +01:00
struct dma_fence * fence , * lfence = passed_fence ;
2014-07-01 12:58:00 +02:00
int ret = 1 ;
2016-10-25 13:00:45 +01:00
if ( ! test_bit ( DMA_FENCE_FLAG_SIGNALED_BIT , & lfence - > flags ) ) {
fence = dma_fence_get_rcu ( lfence ) ;
2014-07-01 12:58:00 +02:00
if ( ! fence )
return - 1 ;
2016-10-25 13:00:45 +01:00
ret = ! ! dma_fence_is_signaled ( fence ) ;
dma_fence_put ( fence ) ;
2014-07-01 12:58:00 +02:00
}
return ret ;
}
2016-03-31 16:26:51 -04:00
/**
* reservation_object_test_signaled_rcu - Test if a reservation object ' s
* fences have been signaled .
* @ obj : the reservation object
* @ test_all : if true , test all fences , otherwise only test the exclusive
* fence
*
* RETURNS
* true if all fences signaled , else false
*/
2014-07-01 12:58:00 +02:00
bool reservation_object_test_signaled_rcu ( struct reservation_object * obj ,
bool test_all )
{
unsigned seq , shared_count ;
2016-08-29 08:08:32 +01:00
int ret ;
2014-07-01 12:58:00 +02:00
2016-08-29 08:08:32 +01:00
rcu_read_lock ( ) ;
2014-07-01 12:58:00 +02:00
retry :
2016-08-29 08:08:32 +01:00
ret = true ;
2014-07-01 12:58:00 +02:00
shared_count = 0 ;
seq = read_seqcount_begin ( & obj - > seq ) ;
if ( test_all ) {
unsigned i ;
2015-05-21 01:09:31 +05:30
struct reservation_object_list * fobj =
rcu_dereference ( obj - > fence ) ;
2014-07-01 12:58:00 +02:00
if ( fobj )
shared_count = fobj - > shared_count ;
for ( i = 0 ; i < shared_count ; + + i ) {
2016-10-25 13:00:45 +01:00
struct dma_fence * fence = rcu_dereference ( fobj - > shared [ i ] ) ;
2014-07-01 12:58:00 +02:00
ret = reservation_object_test_signaled_single ( fence ) ;
if ( ret < 0 )
2016-08-29 08:08:32 +01:00
goto retry ;
2014-07-01 12:58:00 +02:00
else if ( ! ret )
break ;
}
2016-08-29 08:08:32 +01:00
if ( read_seqcount_retry ( & obj - > seq , seq ) )
goto retry ;
2014-07-01 12:58:00 +02:00
}
if ( ! shared_count ) {
2016-10-25 13:00:45 +01:00
struct dma_fence * fence_excl = rcu_dereference ( obj - > fence_excl ) ;
2014-07-01 12:58:00 +02:00
if ( fence_excl ) {
2015-05-21 01:09:31 +05:30
ret = reservation_object_test_signaled_single (
fence_excl ) ;
2014-07-01 12:58:00 +02:00
if ( ret < 0 )
2016-08-29 08:08:32 +01:00
goto retry ;
if ( read_seqcount_retry ( & obj - > seq , seq ) )
goto retry ;
2014-07-01 12:58:00 +02:00
}
}
rcu_read_unlock ( ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( reservation_object_test_signaled_rcu ) ;