2011-09-01 20:18:44 +00:00
/**************************************************************************
*
2015-07-29 12:38:02 -07:00
* Copyright © 2011 - 2014 VMware , Inc . , Palo Alto , CA . , USA
2011-09-01 20:18:44 +00:00
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2012-10-02 18:01:07 +01:00
# include <drm/drmP.h>
2011-09-01 20:18:44 +00:00
# include "vmwgfx_drv.h"
# define VMW_FENCE_WRAP (1 << 31)
struct vmw_fence_manager {
int num_fence_objects ;
struct vmw_private * dev_priv ;
spinlock_t lock ;
struct list_head fence_list ;
2015-01-14 02:33:39 -08:00
struct work_struct work ;
2011-09-01 20:18:44 +00:00
u32 user_fence_size ;
u32 fence_size ;
2011-10-10 12:23:26 +02:00
u32 event_fence_action_size ;
2011-09-01 20:18:44 +00:00
bool fifo_down ;
struct list_head cleanup_list ;
2011-10-10 12:23:26 +02:00
uint32_t pending_actions [ VMW_ACTION_MAX ] ;
struct mutex goal_irq_mutex ;
bool goal_irq_on ; /* Protected by @goal_irq_mutex */
bool seqno_valid ; /* Protected by @lock, and may not be set to true
without the @ goal_irq_mutex held . */
2014-03-26 14:07:44 +01:00
unsigned ctx ;
2011-09-01 20:18:44 +00:00
} ;
struct vmw_user_fence {
struct ttm_base_object base ;
struct vmw_fence_obj fence ;
} ;
/**
2011-10-10 12:23:26 +02:00
* struct vmw_event_fence_action - fence action that delivers a drm event .
2011-09-01 20:18:44 +00:00
*
2011-10-10 12:23:26 +02:00
* @ e : A struct drm_pending_event that controls the event delivery .
* @ action : A struct vmw_fence_action to hook up to a fence .
* @ fence : A referenced pointer to the fence to keep it alive while @ action
* hangs on it .
* @ dev : Pointer to a struct drm_device so we can access the event stuff .
* @ kref : Both @ e and @ action has destructors , so we need to refcount .
* @ size : Size accounted for this object .
* @ tv_sec : If non - null , the variable pointed to will be assigned
* current time tv_sec val when the fence signals .
* @ tv_usec : Must be set if @ tv_sec is set , and the variable pointed to will
* be assigned the current time tv_usec val when the fence signals .
*/
struct vmw_event_fence_action {
struct vmw_fence_action action ;
2012-02-09 16:56:42 +01:00
struct list_head fpriv_head ;
2012-02-09 16:56:41 +01:00
struct drm_pending_event * event ;
2011-10-10 12:23:26 +02:00
struct vmw_fence_obj * fence ;
struct drm_device * dev ;
2012-02-09 16:56:41 +01:00
2011-10-10 12:23:26 +02:00
uint32_t * tv_sec ;
uint32_t * tv_usec ;
} ;
2014-03-26 14:07:44 +01:00
static struct vmw_fence_manager *
fman_from_fence ( struct vmw_fence_obj * fence )
{
return container_of ( fence - > base . lock , struct vmw_fence_manager , lock ) ;
}
2011-10-10 12:23:26 +02:00
/**
* Note on fencing subsystem usage of irqs :
* Typically the vmw_fences_update function is called
*
* a ) When a new fence seqno has been submitted by the fifo code .
* b ) On - demand when we have waiters . Sleeping waiters will switch on the
* ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
* irq is received . When the last fence waiter is gone , that IRQ is masked
* away .
*
* In situations where there are no waiters and we don ' t submit any new fences ,
* fence objects may not be signaled . This is perfectly OK , since there are
* no consumers of the signaled data , but that is NOT ok when there are fence
* actions attached to a fence . The fencing subsystem then makes use of the
* FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
* which has an action attached , and each time vmw_fences_update is called ,
* the subsystem makes sure the fence goal seqno is updated .
*
* The fence goal seqno irq is on as long as there are unsignaled fence
* objects with actions attached to them .
2011-09-01 20:18:44 +00:00
*/
2014-03-26 14:07:44 +01:00
static void vmw_fence_obj_destroy ( struct fence * f )
2011-09-01 20:18:44 +00:00
{
struct vmw_fence_obj * fence =
2014-03-26 14:07:44 +01:00
container_of ( f , struct vmw_fence_obj , base ) ;
2011-09-01 20:18:44 +00:00
2014-03-26 14:07:44 +01:00
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
unsigned long irq_flags ;
2011-09-01 20:18:44 +00:00
2014-03-26 14:07:44 +01:00
spin_lock_irqsave ( & fman - > lock , irq_flags ) ;
2011-09-01 20:18:44 +00:00
list_del_init ( & fence - > head ) ;
2014-03-26 14:07:44 +01:00
- - fman - > num_fence_objects ;
spin_unlock_irqrestore ( & fman - > lock , irq_flags ) ;
fence - > destroy ( fence ) ;
}
2011-09-01 20:18:44 +00:00
2014-03-26 14:07:44 +01:00
static const char * vmw_fence_get_driver_name ( struct fence * f )
{
return " vmwgfx " ;
}
static const char * vmw_fence_get_timeline_name ( struct fence * f )
{
return " svga " ;
2011-09-01 20:18:44 +00:00
}
2014-03-26 14:07:44 +01:00
static bool vmw_fence_enable_signaling ( struct fence * f )
{
struct vmw_fence_obj * fence =
container_of ( f , struct vmw_fence_obj , base ) ;
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
struct vmw_private * dev_priv = fman - > dev_priv ;
2015-10-28 10:44:04 +01:00
u32 * fifo_mem = dev_priv - > mmio_virt ;
u32 seqno = vmw_mmio_read ( fifo_mem + SVGA_FIFO_FENCE ) ;
2014-03-26 14:07:44 +01:00
if ( seqno - fence - > base . seqno < VMW_FENCE_WRAP )
return false ;
2015-01-14 02:33:39 -08:00
vmw_fifo_ping_host ( dev_priv , SVGA_SYNC_GENERIC ) ;
2014-03-26 14:07:44 +01:00
return true ;
}
struct vmwgfx_wait_cb {
struct fence_cb base ;
struct task_struct * task ;
} ;
static void
vmwgfx_wait_cb ( struct fence * fence , struct fence_cb * cb )
{
struct vmwgfx_wait_cb * wait =
container_of ( cb , struct vmwgfx_wait_cb , base ) ;
wake_up_process ( wait - > task ) ;
}
static void __vmw_fences_update ( struct vmw_fence_manager * fman ) ;
static long vmw_fence_wait ( struct fence * f , bool intr , signed long timeout )
{
struct vmw_fence_obj * fence =
container_of ( f , struct vmw_fence_obj , base ) ;
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
struct vmw_private * dev_priv = fman - > dev_priv ;
struct vmwgfx_wait_cb cb ;
long ret = timeout ;
unsigned long irq_flags ;
if ( likely ( vmw_fence_obj_signaled ( fence ) ) )
return timeout ;
vmw_fifo_ping_host ( dev_priv , SVGA_SYNC_GENERIC ) ;
vmw_seqno_waiter_add ( dev_priv ) ;
spin_lock_irqsave ( f - > lock , irq_flags ) ;
if ( intr & & signal_pending ( current ) ) {
ret = - ERESTARTSYS ;
goto out ;
}
cb . base . func = vmwgfx_wait_cb ;
cb . task = current ;
list_add ( & cb . base . node , & f - > cb_list ) ;
while ( ret > 0 ) {
__vmw_fences_update ( fman ) ;
if ( test_bit ( FENCE_FLAG_SIGNALED_BIT , & f - > flags ) )
break ;
if ( intr )
__set_current_state ( TASK_INTERRUPTIBLE ) ;
else
__set_current_state ( TASK_UNINTERRUPTIBLE ) ;
spin_unlock_irqrestore ( f - > lock , irq_flags ) ;
ret = schedule_timeout ( ret ) ;
spin_lock_irqsave ( f - > lock , irq_flags ) ;
if ( ret > 0 & & intr & & signal_pending ( current ) )
ret = - ERESTARTSYS ;
}
if ( ! list_empty ( & cb . base . node ) )
list_del ( & cb . base . node ) ;
__set_current_state ( TASK_RUNNING ) ;
out :
spin_unlock_irqrestore ( f - > lock , irq_flags ) ;
vmw_seqno_waiter_remove ( dev_priv ) ;
return ret ;
}
static struct fence_ops vmw_fence_ops = {
. get_driver_name = vmw_fence_get_driver_name ,
. get_timeline_name = vmw_fence_get_timeline_name ,
. enable_signaling = vmw_fence_enable_signaling ,
. wait = vmw_fence_wait ,
. release = vmw_fence_obj_destroy ,
} ;
2011-09-01 20:18:44 +00:00
/**
* Execute signal actions on fences recently signaled .
* This is done from a workqueue so we don ' t have to execute
* signal actions from atomic context .
*/
static void vmw_fence_work_func ( struct work_struct * work )
{
struct vmw_fence_manager * fman =
container_of ( work , struct vmw_fence_manager , work ) ;
struct list_head list ;
struct vmw_fence_action * action , * next_action ;
2011-10-10 12:23:26 +02:00
bool seqno_valid ;
2011-09-01 20:18:44 +00:00
do {
INIT_LIST_HEAD ( & list ) ;
2011-10-10 12:23:26 +02:00
mutex_lock ( & fman - > goal_irq_mutex ) ;
2011-09-01 20:18:44 +00:00
spin_lock_irq ( & fman - > lock ) ;
list_splice_init ( & fman - > cleanup_list , & list ) ;
2011-10-10 12:23:26 +02:00
seqno_valid = fman - > seqno_valid ;
2011-09-01 20:18:44 +00:00
spin_unlock_irq ( & fman - > lock ) ;
2011-10-10 12:23:26 +02:00
if ( ! seqno_valid & & fman - > goal_irq_on ) {
fman - > goal_irq_on = false ;
vmw_goal_waiter_remove ( fman - > dev_priv ) ;
}
mutex_unlock ( & fman - > goal_irq_mutex ) ;
2011-09-01 20:18:44 +00:00
if ( list_empty ( & list ) )
return ;
/*
* At this point , only we should be able to manipulate the
* list heads of the actions we have on the private list .
2011-10-10 12:23:26 +02:00
* hence fman : : lock not held .
2011-09-01 20:18:44 +00:00
*/
list_for_each_entry_safe ( action , next_action , & list , head ) {
list_del_init ( & action - > head ) ;
2011-10-10 12:23:26 +02:00
if ( action - > cleanup )
action - > cleanup ( action ) ;
2011-09-01 20:18:44 +00:00
}
} while ( 1 ) ;
}
struct vmw_fence_manager * vmw_fence_manager_init ( struct vmw_private * dev_priv )
{
struct vmw_fence_manager * fman = kzalloc ( sizeof ( * fman ) , GFP_KERNEL ) ;
if ( unlikely ( fman = = NULL ) )
return NULL ;
fman - > dev_priv = dev_priv ;
spin_lock_init ( & fman - > lock ) ;
INIT_LIST_HEAD ( & fman - > fence_list ) ;
INIT_LIST_HEAD ( & fman - > cleanup_list ) ;
INIT_WORK ( & fman - > work , & vmw_fence_work_func ) ;
fman - > fifo_down = true ;
fman - > user_fence_size = ttm_round_pot ( sizeof ( struct vmw_user_fence ) ) ;
fman - > fence_size = ttm_round_pot ( sizeof ( struct vmw_fence_obj ) ) ;
2011-10-10 12:23:26 +02:00
fman - > event_fence_action_size =
ttm_round_pot ( sizeof ( struct vmw_event_fence_action ) ) ;
mutex_init ( & fman - > goal_irq_mutex ) ;
2014-03-26 14:07:44 +01:00
fman - > ctx = fence_context_alloc ( 1 ) ;
2011-09-01 20:18:44 +00:00
return fman ;
}
void vmw_fence_manager_takedown ( struct vmw_fence_manager * fman )
{
unsigned long irq_flags ;
bool lists_empty ;
( void ) cancel_work_sync ( & fman - > work ) ;
spin_lock_irqsave ( & fman - > lock , irq_flags ) ;
lists_empty = list_empty ( & fman - > fence_list ) & &
list_empty ( & fman - > cleanup_list ) ;
spin_unlock_irqrestore ( & fman - > lock , irq_flags ) ;
BUG_ON ( ! lists_empty ) ;
kfree ( fman ) ;
}
static int vmw_fence_obj_init ( struct vmw_fence_manager * fman ,
2014-03-26 13:06:24 +01:00
struct vmw_fence_obj * fence , u32 seqno ,
2011-09-01 20:18:44 +00:00
void ( * destroy ) ( struct vmw_fence_obj * fence ) )
{
unsigned long irq_flags ;
int ret = 0 ;
2014-03-26 14:07:44 +01:00
fence_init ( & fence - > base , & vmw_fence_ops , & fman - > lock ,
fman - > ctx , seqno ) ;
2011-09-01 20:18:44 +00:00
INIT_LIST_HEAD ( & fence - > seq_passed_actions ) ;
fence - > destroy = destroy ;
spin_lock_irqsave ( & fman - > lock , irq_flags ) ;
if ( unlikely ( fman - > fifo_down ) ) {
ret = - EBUSY ;
goto out_unlock ;
}
list_add_tail ( & fence - > head , & fman - > fence_list ) ;
2014-03-26 14:07:44 +01:00
+ + fman - > num_fence_objects ;
2011-09-01 20:18:44 +00:00
out_unlock :
spin_unlock_irqrestore ( & fman - > lock , irq_flags ) ;
return ret ;
}
2014-01-06 22:21:21 +05:30
static void vmw_fences_perform_actions ( struct vmw_fence_manager * fman ,
2011-09-01 20:18:44 +00:00
struct list_head * list )
{
struct vmw_fence_action * action , * next_action ;
list_for_each_entry_safe ( action , next_action , list , head ) {
list_del_init ( & action - > head ) ;
2011-10-10 12:23:26 +02:00
fman - > pending_actions [ action - > type ] - - ;
2011-09-01 20:18:44 +00:00
if ( action - > seq_passed ! = NULL )
action - > seq_passed ( action ) ;
/*
* Add the cleanup action to the cleanup list so that
* it will be performed by a worker task .
*/
2011-10-10 12:23:26 +02:00
list_add_tail ( & action - > head , & fman - > cleanup_list ) ;
}
}
/**
* vmw_fence_goal_new_locked - Figure out a new device fence goal
* seqno if needed .
*
* @ fman : Pointer to a fence manager .
* @ passed_seqno : The seqno the device currently signals as passed .
*
* This function should be called with the fence manager lock held .
* It is typically called when we have a new passed_seqno , and
* we might need to update the fence goal . It checks to see whether
* the current fence goal has already passed , and , in that case ,
* scans through all unsignaled fences to get the next fence object with an
* action attached , and sets the seqno of that fence as a new fence goal .
*
* returns true if the device goal seqno was updated . False otherwise .
*/
static bool vmw_fence_goal_new_locked ( struct vmw_fence_manager * fman ,
u32 passed_seqno )
{
u32 goal_seqno ;
2015-10-28 10:44:04 +01:00
u32 * fifo_mem ;
2011-10-10 12:23:26 +02:00
struct vmw_fence_obj * fence ;
if ( likely ( ! fman - > seqno_valid ) )
return false ;
fifo_mem = fman - > dev_priv - > mmio_virt ;
2015-10-28 10:44:04 +01:00
goal_seqno = vmw_mmio_read ( fifo_mem + SVGA_FIFO_FENCE_GOAL ) ;
2011-10-10 12:23:26 +02:00
if ( likely ( passed_seqno - goal_seqno > = VMW_FENCE_WRAP ) )
return false ;
fman - > seqno_valid = false ;
list_for_each_entry ( fence , & fman - > fence_list , head ) {
if ( ! list_empty ( & fence - > seq_passed_actions ) ) {
fman - > seqno_valid = true ;
2015-10-28 10:44:04 +01:00
vmw_mmio_write ( fence - > base . seqno ,
fifo_mem + SVGA_FIFO_FENCE_GOAL ) ;
2011-10-10 12:23:26 +02:00
break ;
}
2011-09-01 20:18:44 +00:00
}
2011-10-10 12:23:26 +02:00
return true ;
}
/**
* vmw_fence_goal_check_locked - Replace the device fence goal seqno if
* needed .
*
* @ fence : Pointer to a struct vmw_fence_obj the seqno of which should be
* considered as a device fence goal .
*
* This function should be called with the fence manager lock held .
* It is typically called when an action has been attached to a fence to
* check whether the seqno of that fence should be used for a fence
* goal interrupt . This is typically needed if the current fence goal is
* invalid , or has a higher seqno than that of the current fence object .
*
* returns true if the device goal seqno was updated . False otherwise .
*/
static bool vmw_fence_goal_check_locked ( struct vmw_fence_obj * fence )
{
2014-03-26 14:07:44 +01:00
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
2011-10-10 12:23:26 +02:00
u32 goal_seqno ;
2015-10-28 10:44:04 +01:00
u32 * fifo_mem ;
2011-10-10 12:23:26 +02:00
2014-03-26 14:07:44 +01:00
if ( fence_is_signaled_locked ( & fence - > base ) )
2011-10-10 12:23:26 +02:00
return false ;
2014-03-26 14:07:44 +01:00
fifo_mem = fman - > dev_priv - > mmio_virt ;
2015-10-28 10:44:04 +01:00
goal_seqno = vmw_mmio_read ( fifo_mem + SVGA_FIFO_FENCE_GOAL ) ;
2014-03-26 14:07:44 +01:00
if ( likely ( fman - > seqno_valid & &
goal_seqno - fence - > base . seqno < VMW_FENCE_WRAP ) )
2011-10-10 12:23:26 +02:00
return false ;
2015-10-28 10:44:04 +01:00
vmw_mmio_write ( fence - > base . seqno , fifo_mem + SVGA_FIFO_FENCE_GOAL ) ;
2014-03-26 14:07:44 +01:00
fman - > seqno_valid = true ;
2011-10-10 12:23:26 +02:00
return true ;
2011-09-01 20:18:44 +00:00
}
2014-03-26 14:07:44 +01:00
static void __vmw_fences_update ( struct vmw_fence_manager * fman )
2011-09-01 20:18:44 +00:00
{
struct vmw_fence_obj * fence , * next_fence ;
struct list_head action_list ;
2011-10-10 12:23:26 +02:00
bool needs_rerun ;
uint32_t seqno , new_seqno ;
2015-10-28 10:44:04 +01:00
u32 * fifo_mem = fman - > dev_priv - > mmio_virt ;
2011-09-01 20:18:44 +00:00
2015-10-28 10:44:04 +01:00
seqno = vmw_mmio_read ( fifo_mem + SVGA_FIFO_FENCE ) ;
2011-10-10 12:23:26 +02:00
rerun :
2011-09-01 20:18:44 +00:00
list_for_each_entry_safe ( fence , next_fence , & fman - > fence_list , head ) {
2014-03-26 14:07:44 +01:00
if ( seqno - fence - > base . seqno < VMW_FENCE_WRAP ) {
2011-09-01 20:18:44 +00:00
list_del_init ( & fence - > head ) ;
2014-03-26 14:07:44 +01:00
fence_signal_locked ( & fence - > base ) ;
2011-09-01 20:18:44 +00:00
INIT_LIST_HEAD ( & action_list ) ;
list_splice_init ( & fence - > seq_passed_actions ,
& action_list ) ;
vmw_fences_perform_actions ( fman , & action_list ) ;
2011-10-10 12:23:26 +02:00
} else
break ;
2011-09-01 20:18:44 +00:00
}
2011-10-10 12:23:26 +02:00
/*
* Rerun if the fence goal seqno was updated , and the
* hardware might have raced with that update , so that
* we missed a fence_goal irq .
*/
2014-03-26 14:07:44 +01:00
needs_rerun = vmw_fence_goal_new_locked ( fman , seqno ) ;
2011-10-10 12:23:26 +02:00
if ( unlikely ( needs_rerun ) ) {
2015-10-28 10:44:04 +01:00
new_seqno = vmw_mmio_read ( fifo_mem + SVGA_FIFO_FENCE ) ;
2011-10-10 12:23:26 +02:00
if ( new_seqno ! = seqno ) {
seqno = new_seqno ;
goto rerun ;
}
}
2014-03-26 14:07:44 +01:00
if ( ! list_empty ( & fman - > cleanup_list ) )
( void ) schedule_work ( & fman - > work ) ;
2011-10-10 12:23:26 +02:00
}
2011-09-01 20:18:44 +00:00
2014-03-26 14:07:44 +01:00
void vmw_fences_update ( struct vmw_fence_manager * fman )
2011-09-01 20:18:44 +00:00
{
unsigned long irq_flags ;
spin_lock_irqsave ( & fman - > lock , irq_flags ) ;
2014-03-26 14:07:44 +01:00
__vmw_fences_update ( fman ) ;
2011-09-01 20:18:44 +00:00
spin_unlock_irqrestore ( & fman - > lock , irq_flags ) ;
2014-03-26 14:07:44 +01:00
}
bool vmw_fence_obj_signaled ( struct vmw_fence_obj * fence )
{
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
2011-09-01 20:18:44 +00:00
2014-03-26 14:07:44 +01:00
if ( test_bit ( FENCE_FLAG_SIGNALED_BIT , & fence - > base . flags ) )
2011-09-01 20:18:44 +00:00
return 1 ;
2014-03-26 13:06:24 +01:00
vmw_fences_update ( fman ) ;
2011-09-01 20:18:44 +00:00
2014-03-26 14:07:44 +01:00
return fence_is_signaled ( & fence - > base ) ;
2011-09-01 20:18:44 +00:00
}
2014-03-26 13:06:24 +01:00
int vmw_fence_obj_wait ( struct vmw_fence_obj * fence , bool lazy ,
2011-09-01 20:18:44 +00:00
bool interruptible , unsigned long timeout )
{
2014-03-26 14:07:44 +01:00
long ret = fence_wait_timeout ( & fence - > base , interruptible , timeout ) ;
2011-09-01 20:18:44 +00:00
2014-03-26 14:07:44 +01:00
if ( likely ( ret > 0 ) )
2011-09-01 20:18:44 +00:00
return 0 ;
2014-03-26 14:07:44 +01:00
else if ( ret = = 0 )
return - EBUSY ;
2011-09-01 20:18:44 +00:00
else
2014-03-26 14:07:44 +01:00
return ret ;
2011-09-01 20:18:44 +00:00
}
void vmw_fence_obj_flush ( struct vmw_fence_obj * fence )
{
2014-03-26 14:07:44 +01:00
struct vmw_private * dev_priv = fman_from_fence ( fence ) - > dev_priv ;
2011-09-01 20:18:44 +00:00
vmw_fifo_ping_host ( dev_priv , SVGA_SYNC_GENERIC ) ;
}
static void vmw_fence_destroy ( struct vmw_fence_obj * fence )
{
2014-03-26 14:07:44 +01:00
fence_free ( & fence - > base ) ;
2011-09-01 20:18:44 +00:00
}
int vmw_fence_create ( struct vmw_fence_manager * fman ,
uint32_t seqno ,
struct vmw_fence_obj * * p_fence )
{
struct vmw_fence_obj * fence ;
int ret ;
fence = kzalloc ( sizeof ( * fence ) , GFP_KERNEL ) ;
2014-12-02 03:32:24 -08:00
if ( unlikely ( fence = = NULL ) )
return - ENOMEM ;
2011-09-01 20:18:44 +00:00
2014-03-26 13:06:24 +01:00
ret = vmw_fence_obj_init ( fman , fence , seqno ,
2011-09-01 20:18:44 +00:00
vmw_fence_destroy ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_err_init ;
* p_fence = fence ;
return 0 ;
out_err_init :
kfree ( fence ) ;
return ret ;
}
static void vmw_user_fence_destroy ( struct vmw_fence_obj * fence )
{
struct vmw_user_fence * ufence =
container_of ( fence , struct vmw_user_fence , fence ) ;
2014-03-26 14:07:44 +01:00
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
2011-09-01 20:18:44 +00:00
2012-11-20 12:16:49 +00:00
ttm_base_object_kfree ( ufence , base ) ;
2011-09-01 20:18:44 +00:00
/*
* Free kernel space accounting .
*/
ttm_mem_global_free ( vmw_mem_glob ( fman - > dev_priv ) ,
fman - > user_fence_size ) ;
}
static void vmw_user_fence_base_release ( struct ttm_base_object * * p_base )
{
struct ttm_base_object * base = * p_base ;
struct vmw_user_fence * ufence =
container_of ( base , struct vmw_user_fence , base ) ;
struct vmw_fence_obj * fence = & ufence - > fence ;
* p_base = NULL ;
vmw_fence_obj_unreference ( & fence ) ;
}
int vmw_user_fence_create ( struct drm_file * file_priv ,
struct vmw_fence_manager * fman ,
uint32_t seqno ,
struct vmw_fence_obj * * p_fence ,
uint32_t * p_handle )
{
struct ttm_object_file * tfile = vmw_fpriv ( file_priv ) - > tfile ;
struct vmw_user_fence * ufence ;
struct vmw_fence_obj * tmp ;
struct ttm_mem_global * mem_glob = vmw_mem_glob ( fman - > dev_priv ) ;
int ret ;
/*
* Kernel memory space accounting , since this object may
* be created by a user - space request .
*/
ret = ttm_mem_global_alloc ( mem_glob , fman - > user_fence_size ,
false , false ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
ufence = kzalloc ( sizeof ( * ufence ) , GFP_KERNEL ) ;
if ( unlikely ( ufence = = NULL ) ) {
ret = - ENOMEM ;
goto out_no_object ;
}
ret = vmw_fence_obj_init ( fman , & ufence - > fence , seqno ,
2014-03-26 13:06:24 +01:00
vmw_user_fence_destroy ) ;
2011-09-01 20:18:44 +00:00
if ( unlikely ( ret ! = 0 ) ) {
kfree ( ufence ) ;
goto out_no_object ;
}
/*
* The base object holds a reference which is freed in
* vmw_user_fence_base_release .
*/
tmp = vmw_fence_obj_reference ( & ufence - > fence ) ;
ret = ttm_base_object_init ( tfile , & ufence - > base , false ,
VMW_RES_FENCE ,
& vmw_user_fence_base_release , NULL ) ;
if ( unlikely ( ret ! = 0 ) ) {
/*
* Free the base object ' s reference
*/
vmw_fence_obj_unreference ( & tmp ) ;
goto out_err ;
}
* p_fence = & ufence - > fence ;
* p_handle = ufence - > base . hash . key ;
return 0 ;
out_err :
tmp = & ufence - > fence ;
vmw_fence_obj_unreference ( & tmp ) ;
out_no_object :
ttm_mem_global_free ( mem_glob , fman - > user_fence_size ) ;
return ret ;
}
/**
* vmw_fence_fifo_down - signal all unsignaled fence objects .
*/
void vmw_fence_fifo_down ( struct vmw_fence_manager * fman )
{
struct list_head action_list ;
int ret ;
/*
* The list may be altered while we traverse it , so always
* restart when we ' ve released the fman - > lock .
*/
2014-03-26 14:07:44 +01:00
spin_lock_irq ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
fman - > fifo_down = true ;
while ( ! list_empty ( & fman - > fence_list ) ) {
struct vmw_fence_obj * fence =
list_entry ( fman - > fence_list . prev , struct vmw_fence_obj ,
head ) ;
2014-03-26 14:07:44 +01:00
fence_get ( & fence - > base ) ;
2011-09-01 20:18:44 +00:00
spin_unlock_irq ( & fman - > lock ) ;
2014-03-26 13:06:24 +01:00
ret = vmw_fence_obj_wait ( fence , false , false ,
2011-09-01 20:18:44 +00:00
VMW_FENCE_WAIT_TIMEOUT ) ;
if ( unlikely ( ret ! = 0 ) ) {
list_del_init ( & fence - > head ) ;
2014-03-26 14:07:44 +01:00
fence_signal ( & fence - > base ) ;
2011-09-01 20:18:44 +00:00
INIT_LIST_HEAD ( & action_list ) ;
list_splice_init ( & fence - > seq_passed_actions ,
& action_list ) ;
vmw_fences_perform_actions ( fman , & action_list ) ;
}
BUG_ON ( ! list_empty ( & fence - > head ) ) ;
2014-03-26 14:07:44 +01:00
fence_put ( & fence - > base ) ;
spin_lock_irq ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
}
2014-03-26 14:07:44 +01:00
spin_unlock_irq ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
}
void vmw_fence_fifo_up ( struct vmw_fence_manager * fman )
{
unsigned long irq_flags ;
spin_lock_irqsave ( & fman - > lock , irq_flags ) ;
fman - > fifo_down = false ;
spin_unlock_irqrestore ( & fman - > lock , irq_flags ) ;
}
int vmw_fence_obj_wait_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_vmw_fence_wait_arg * arg =
( struct drm_vmw_fence_wait_arg * ) data ;
unsigned long timeout ;
struct ttm_base_object * base ;
struct vmw_fence_obj * fence ;
struct ttm_object_file * tfile = vmw_fpriv ( file_priv ) - > tfile ;
int ret ;
uint64_t wait_timeout = ( ( uint64_t ) arg - > timeout_us * HZ ) ;
/*
* 64 - bit division not present on 32 - bit systems , so do an
* approximation . ( Divide by 1000000 ) .
*/
wait_timeout = ( wait_timeout > > 20 ) + ( wait_timeout > > 24 ) -
( wait_timeout > > 26 ) ;
if ( ! arg - > cookie_valid ) {
arg - > cookie_valid = 1 ;
arg - > kernel_cookie = jiffies + wait_timeout ;
}
base = ttm_base_object_lookup ( tfile , arg - > handle ) ;
if ( unlikely ( base = = NULL ) ) {
printk ( KERN_ERR " Wait invalid fence object handle "
" 0x%08lx. \n " ,
( unsigned long ) arg - > handle ) ;
return - EINVAL ;
}
fence = & ( container_of ( base , struct vmw_user_fence , base ) - > fence ) ;
timeout = jiffies ;
if ( time_after_eq ( timeout , ( unsigned long ) arg - > kernel_cookie ) ) {
2014-03-26 13:06:24 +01:00
ret = ( ( vmw_fence_obj_signaled ( fence ) ) ?
2011-09-01 20:18:44 +00:00
0 : - EBUSY ) ;
goto out ;
}
timeout = ( unsigned long ) arg - > kernel_cookie - timeout ;
2014-03-26 13:06:24 +01:00
ret = vmw_fence_obj_wait ( fence , arg - > lazy , true , timeout ) ;
2011-09-01 20:18:44 +00:00
out :
ttm_base_object_unref ( & base ) ;
/*
* Optionally unref the fence object .
*/
if ( ret = = 0 & & ( arg - > wait_options & DRM_VMW_WAIT_OPTION_UNREF ) )
return ttm_ref_object_base_unref ( tfile , arg - > handle ,
TTM_REF_USAGE ) ;
return ret ;
}
int vmw_fence_obj_signaled_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_vmw_fence_signaled_arg * arg =
( struct drm_vmw_fence_signaled_arg * ) data ;
struct ttm_base_object * base ;
struct vmw_fence_obj * fence ;
struct vmw_fence_manager * fman ;
struct ttm_object_file * tfile = vmw_fpriv ( file_priv ) - > tfile ;
struct vmw_private * dev_priv = vmw_priv ( dev ) ;
base = ttm_base_object_lookup ( tfile , arg - > handle ) ;
if ( unlikely ( base = = NULL ) ) {
printk ( KERN_ERR " Fence signaled invalid fence object handle "
" 0x%08lx. \n " ,
( unsigned long ) arg - > handle ) ;
return - EINVAL ;
}
fence = & ( container_of ( base , struct vmw_user_fence , base ) - > fence ) ;
2014-03-26 14:07:44 +01:00
fman = fman_from_fence ( fence ) ;
2011-09-01 20:18:44 +00:00
2014-03-26 13:06:24 +01:00
arg - > signaled = vmw_fence_obj_signaled ( fence ) ;
2011-09-01 20:18:44 +00:00
2014-03-26 13:06:24 +01:00
arg - > signaled_flags = arg - > flags ;
2014-03-26 14:07:44 +01:00
spin_lock_irq ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
arg - > passed_seqno = dev_priv - > last_read_seqno ;
spin_unlock_irq ( & fman - > lock ) ;
ttm_base_object_unref ( & base ) ;
return 0 ;
}
int vmw_fence_obj_unref_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_vmw_fence_arg * arg =
( struct drm_vmw_fence_arg * ) data ;
return ttm_ref_object_base_unref ( vmw_fpriv ( file_priv ) - > tfile ,
arg - > handle ,
TTM_REF_USAGE ) ;
}
2011-10-10 12:23:26 +02:00
2012-02-09 16:56:42 +01:00
/**
* vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects
*
* @ fman : Pointer to a struct vmw_fence_manager
* @ event_list : Pointer to linked list of struct vmw_event_fence_action objects
* with pointers to a struct drm_file object about to be closed .
*
* This function removes all pending fence events with references to a
* specific struct drm_file object about to be closed . The caller is required
* to pass a list of all struct vmw_event_fence_action objects with such
* events attached . This function is typically called before the
* struct drm_file object ' s event management is taken down .
*/
void vmw_event_fence_fpriv_gone ( struct vmw_fence_manager * fman ,
struct list_head * event_list )
{
struct vmw_event_fence_action * eaction ;
struct drm_pending_event * event ;
unsigned long irq_flags ;
while ( 1 ) {
spin_lock_irqsave ( & fman - > lock , irq_flags ) ;
if ( list_empty ( event_list ) )
goto out_unlock ;
eaction = list_first_entry ( event_list ,
struct vmw_event_fence_action ,
fpriv_head ) ;
list_del_init ( & eaction - > fpriv_head ) ;
event = eaction - > event ;
eaction - > event = NULL ;
spin_unlock_irqrestore ( & fman - > lock , irq_flags ) ;
event - > destroy ( event ) ;
}
out_unlock :
spin_unlock_irqrestore ( & fman - > lock , irq_flags ) ;
}
2011-10-10 12:23:26 +02:00
/**
* vmw_event_fence_action_seq_passed
*
* @ action : The struct vmw_fence_action embedded in a struct
* vmw_event_fence_action .
*
* This function is called when the seqno of the fence where @ action is
* attached has passed . It queues the event on the submitter ' s event list .
* This function is always called from atomic context , and may be called
2012-02-09 16:56:41 +01:00
* from irq context .
2011-10-10 12:23:26 +02:00
*/
static void vmw_event_fence_action_seq_passed ( struct vmw_fence_action * action )
{
struct vmw_event_fence_action * eaction =
container_of ( action , struct vmw_event_fence_action , action ) ;
struct drm_device * dev = eaction - > dev ;
2012-02-09 16:56:42 +01:00
struct drm_pending_event * event = eaction - > event ;
struct drm_file * file_priv ;
2011-10-10 12:23:26 +02:00
unsigned long irq_flags ;
2012-02-09 16:56:42 +01:00
if ( unlikely ( event = = NULL ) )
return ;
file_priv = event - > file_priv ;
2011-10-10 12:23:26 +02:00
spin_lock_irqsave ( & dev - > event_lock , irq_flags ) ;
if ( likely ( eaction - > tv_sec ! = NULL ) ) {
struct timeval tv ;
do_gettimeofday ( & tv ) ;
* eaction - > tv_sec = tv . tv_sec ;
* eaction - > tv_usec = tv . tv_usec ;
}
2012-02-09 16:56:42 +01:00
list_del_init ( & eaction - > fpriv_head ) ;
2012-02-09 16:56:41 +01:00
list_add_tail ( & eaction - > event - > link , & file_priv - > event_list ) ;
2012-02-09 16:56:42 +01:00
eaction - > event = NULL ;
2011-10-10 12:23:26 +02:00
wake_up_all ( & file_priv - > event_wait ) ;
spin_unlock_irqrestore ( & dev - > event_lock , irq_flags ) ;
}
/**
* vmw_event_fence_action_cleanup
*
* @ action : The struct vmw_fence_action embedded in a struct
* vmw_event_fence_action .
*
* This function is the struct vmw_fence_action destructor . It ' s typically
* called from a workqueue .
*/
static void vmw_event_fence_action_cleanup ( struct vmw_fence_action * action )
{
struct vmw_event_fence_action * eaction =
container_of ( action , struct vmw_event_fence_action , action ) ;
2014-03-26 14:07:44 +01:00
struct vmw_fence_manager * fman = fman_from_fence ( eaction - > fence ) ;
2012-02-09 16:56:42 +01:00
unsigned long irq_flags ;
spin_lock_irqsave ( & fman - > lock , irq_flags ) ;
list_del ( & eaction - > fpriv_head ) ;
spin_unlock_irqrestore ( & fman - > lock , irq_flags ) ;
2011-10-10 12:23:26 +02:00
vmw_fence_obj_unreference ( & eaction - > fence ) ;
2012-02-09 16:56:41 +01:00
kfree ( eaction ) ;
2011-10-10 12:23:26 +02:00
}
/**
* vmw_fence_obj_add_action - Add an action to a fence object .
*
* @ fence - The fence object .
* @ action - The action to add .
*
* Note that the action callbacks may be executed before this function
* returns .
*/
2014-01-06 22:21:21 +05:30
static void vmw_fence_obj_add_action ( struct vmw_fence_obj * fence ,
2011-10-10 12:23:26 +02:00
struct vmw_fence_action * action )
{
2014-03-26 14:07:44 +01:00
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
2011-10-10 12:23:26 +02:00
unsigned long irq_flags ;
bool run_update = false ;
mutex_lock ( & fman - > goal_irq_mutex ) ;
spin_lock_irqsave ( & fman - > lock , irq_flags ) ;
fman - > pending_actions [ action - > type ] + + ;
2014-03-26 14:07:44 +01:00
if ( fence_is_signaled_locked ( & fence - > base ) ) {
2011-10-10 12:23:26 +02:00
struct list_head action_list ;
INIT_LIST_HEAD ( & action_list ) ;
list_add_tail ( & action - > head , & action_list ) ;
vmw_fences_perform_actions ( fman , & action_list ) ;
} else {
list_add_tail ( & action - > head , & fence - > seq_passed_actions ) ;
/*
* This function may set fman : : seqno_valid , so it must
* be run with the goal_irq_mutex held .
*/
run_update = vmw_fence_goal_check_locked ( fence ) ;
}
spin_unlock_irqrestore ( & fman - > lock , irq_flags ) ;
if ( run_update ) {
if ( ! fman - > goal_irq_on ) {
fman - > goal_irq_on = true ;
vmw_goal_waiter_add ( fman - > dev_priv ) ;
}
vmw_fences_update ( fman ) ;
}
mutex_unlock ( & fman - > goal_irq_mutex ) ;
}
/**
* vmw_event_fence_action_create - Post an event for sending when a fence
* object seqno has passed .
*
* @ file_priv : The file connection on which the event should be posted .
* @ fence : The fence object on which to post the event .
* @ event : Event to be posted . This event should ' ve been alloced
* using k [ mz ] alloc , and should ' ve been completely initialized .
* @ interruptible : Interruptible waits if possible .
*
* As a side effect , the object pointed to by @ event may have been
* freed when this function returns . If this function returns with
* an error code , the caller needs to free that object .
*/
2012-02-09 16:56:41 +01:00
int vmw_event_fence_action_queue ( struct drm_file * file_priv ,
struct vmw_fence_obj * fence ,
struct drm_pending_event * event ,
uint32_t * tv_sec ,
uint32_t * tv_usec ,
bool interruptible )
2011-10-10 12:23:26 +02:00
{
2011-10-18 09:09:45 +03:00
struct vmw_event_fence_action * eaction ;
2014-03-26 14:07:44 +01:00
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
2012-02-09 16:56:42 +01:00
struct vmw_fpriv * vmw_fp = vmw_fpriv ( file_priv ) ;
unsigned long irq_flags ;
2011-10-10 12:23:26 +02:00
eaction = kzalloc ( sizeof ( * eaction ) , GFP_KERNEL ) ;
2012-02-09 16:56:41 +01:00
if ( unlikely ( eaction = = NULL ) )
2011-10-10 12:23:26 +02:00
return - ENOMEM ;
2012-02-09 16:56:41 +01:00
eaction - > event = event ;
2011-10-10 12:23:26 +02:00
eaction - > action . seq_passed = vmw_event_fence_action_seq_passed ;
eaction - > action . cleanup = vmw_event_fence_action_cleanup ;
eaction - > action . type = VMW_ACTION_EVENT ;
eaction - > fence = vmw_fence_obj_reference ( fence ) ;
eaction - > dev = fman - > dev_priv - > dev ;
eaction - > tv_sec = tv_sec ;
eaction - > tv_usec = tv_usec ;
2012-02-09 16:56:42 +01:00
spin_lock_irqsave ( & fman - > lock , irq_flags ) ;
list_add_tail ( & eaction - > fpriv_head , & vmw_fp - > fence_events ) ;
spin_unlock_irqrestore ( & fman - > lock , irq_flags ) ;
2011-10-10 12:23:26 +02:00
vmw_fence_obj_add_action ( fence , & eaction - > action ) ;
return 0 ;
}
2012-02-09 16:56:41 +01:00
struct vmw_event_fence_pending {
struct drm_pending_event base ;
struct drm_vmw_event_fence event ;
} ;
2014-01-06 22:21:21 +05:30
static int vmw_event_fence_action_create ( struct drm_file * file_priv ,
2012-02-09 16:56:41 +01:00
struct vmw_fence_obj * fence ,
uint32_t flags ,
uint64_t user_data ,
bool interruptible )
{
struct vmw_event_fence_pending * event ;
2014-03-26 14:07:44 +01:00
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
struct drm_device * dev = fman - > dev_priv - > dev ;
2012-02-09 16:56:41 +01:00
int ret ;
2012-09-23 19:33:55 +03:00
event = kzalloc ( sizeof ( * event ) , GFP_KERNEL ) ;
2012-02-09 16:56:41 +01:00
if ( unlikely ( event = = NULL ) ) {
DRM_ERROR ( " Failed to allocate an event. \n " ) ;
ret = - ENOMEM ;
2016-01-11 22:40:58 +01:00
goto out_no_space ;
2012-02-09 16:56:41 +01:00
}
event - > event . base . type = DRM_VMW_EVENT_FENCE_SIGNALED ;
event - > event . base . length = sizeof ( * event ) ;
event - > event . user_data = user_data ;
2016-01-11 22:40:58 +01:00
ret = drm_event_reserve_init ( dev , file_priv , & event - > base , & event - > event . base ) ;
2012-02-09 16:56:41 +01:00
2016-01-11 22:40:58 +01:00
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Failed to allocate event space for this file. \n " ) ;
kfree ( event ) ;
goto out_no_space ;
}
2012-02-09 16:56:41 +01:00
if ( flags & DRM_VMW_FE_FLAG_REQ_TIME )
ret = vmw_event_fence_action_queue ( file_priv , fence ,
& event - > base ,
& event - > event . tv_sec ,
& event - > event . tv_usec ,
interruptible ) ;
else
ret = vmw_event_fence_action_queue ( file_priv , fence ,
& event - > base ,
NULL ,
NULL ,
interruptible ) ;
if ( ret ! = 0 )
goto out_no_queue ;
2014-12-02 03:36:57 -08:00
return 0 ;
2012-02-09 16:56:41 +01:00
out_no_queue :
2016-01-11 22:40:58 +01:00
drm_event_cancel_free ( dev , & event - > base ) ;
2012-02-09 16:56:41 +01:00
out_no_space :
return ret ;
}
2011-10-10 12:23:26 +02:00
int vmw_fence_event_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct vmw_private * dev_priv = vmw_priv ( dev ) ;
struct drm_vmw_fence_event_arg * arg =
( struct drm_vmw_fence_event_arg * ) data ;
struct vmw_fence_obj * fence = NULL ;
struct vmw_fpriv * vmw_fp = vmw_fpriv ( file_priv ) ;
struct drm_vmw_fence_rep __user * user_fence_rep =
( struct drm_vmw_fence_rep __user * ) ( unsigned long )
arg - > fence_rep ;
uint32_t handle ;
int ret ;
/*
* Look up an existing fence object ,
* and if user - space wants a new reference ,
* add one .
*/
if ( arg - > handle ) {
struct ttm_base_object * base =
drm/ttm: ttm object security fixes for render nodes
When a client looks up a ttm object, don't look it up through the device hash
table, but rather from the file hash table. That makes sure that the client
has indeed put a reference on the object, or in gem terms, has opened
the object; either using prime or using the global "name".
To avoid a performance loss, make sure the file hash table entries can be
looked up from under an RCU lock, and as a consequence, replace the rwlock
with a spinlock, since we never need to take it in read mode only anymore.
Finally add a ttm object lookup function for the device hash table, that is
intended to be used when we put a ref object on a base object or, in gem terms,
when we open the object.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Brian Paul <brianp@vmware.com>
2013-12-18 14:13:29 +01:00
ttm_base_object_lookup_for_ref ( dev_priv - > tdev ,
arg - > handle ) ;
2011-10-10 12:23:26 +02:00
if ( unlikely ( base = = NULL ) ) {
DRM_ERROR ( " Fence event invalid fence object handle "
" 0x%08lx. \n " ,
( unsigned long ) arg - > handle ) ;
return - EINVAL ;
}
fence = & ( container_of ( base , struct vmw_user_fence ,
base ) - > fence ) ;
( void ) vmw_fence_obj_reference ( fence ) ;
if ( user_fence_rep ! = NULL ) {
bool existed ;
ret = ttm_ref_object_add ( vmw_fp - > tfile , base ,
TTM_REF_USAGE , & existed ) ;
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Failed to reference a fence "
" object. \n " ) ;
goto out_no_ref_obj ;
}
handle = base - > hash . key ;
}
ttm_base_object_unref ( & base ) ;
}
/*
* Create a new fence object .
*/
if ( ! fence ) {
ret = vmw_execbuf_fence_commands ( file_priv , dev_priv ,
& fence ,
( user_fence_rep ) ?
& handle : NULL ) ;
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Fence event failed to create fence. \n " ) ;
return ret ;
}
}
BUG_ON ( fence = = NULL ) ;
2014-12-02 03:36:57 -08:00
ret = vmw_event_fence_action_create ( file_priv , fence ,
arg - > flags ,
arg - > user_data ,
true ) ;
2011-10-10 12:23:26 +02:00
if ( unlikely ( ret ! = 0 ) ) {
if ( ret ! = - ERESTARTSYS )
DRM_ERROR ( " Failed to attach event to fence. \n " ) ;
2012-02-09 16:56:41 +01:00
goto out_no_create ;
2011-10-10 12:23:26 +02:00
}
vmw_execbuf_copy_fence_user ( dev_priv , vmw_fp , 0 , user_fence_rep , fence ,
handle ) ;
vmw_fence_obj_unreference ( & fence ) ;
return 0 ;
2012-02-09 16:56:41 +01:00
out_no_create :
2011-10-10 12:23:26 +02:00
if ( user_fence_rep ! = NULL )
ttm_ref_object_base_unref ( vmw_fpriv ( file_priv ) - > tfile ,
handle , TTM_REF_USAGE ) ;
out_no_ref_obj :
vmw_fence_obj_unreference ( & fence ) ;
return ret ;
}