2018-05-07 01:16:26 +02:00
// SPDX-License-Identifier: GPL-2.0 OR MIT
2011-09-01 20:18:44 +00:00
/**************************************************************************
*
2018-05-07 01:16:26 +02:00
* Copyright 2011 - 2014 VMware , Inc . , Palo Alto , CA . , USA
2011-09-01 20:18:44 +00:00
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2012-10-02 18:01:07 +01:00
# include <drm/drmP.h>
2011-09-01 20:18:44 +00:00
# include "vmwgfx_drv.h"
# define VMW_FENCE_WRAP (1 << 31)
struct vmw_fence_manager {
int num_fence_objects ;
struct vmw_private * dev_priv ;
spinlock_t lock ;
struct list_head fence_list ;
2015-01-14 02:33:39 -08:00
struct work_struct work ;
2011-09-01 20:18:44 +00:00
u32 user_fence_size ;
u32 fence_size ;
2011-10-10 12:23:26 +02:00
u32 event_fence_action_size ;
2011-09-01 20:18:44 +00:00
bool fifo_down ;
struct list_head cleanup_list ;
2011-10-10 12:23:26 +02:00
uint32_t pending_actions [ VMW_ACTION_MAX ] ;
struct mutex goal_irq_mutex ;
bool goal_irq_on ; /* Protected by @goal_irq_mutex */
bool seqno_valid ; /* Protected by @lock, and may not be set to true
without the @ goal_irq_mutex held . */
2016-06-01 15:10:02 +02:00
u64 ctx ;
2011-09-01 20:18:44 +00:00
} ;
struct vmw_user_fence {
struct ttm_base_object base ;
struct vmw_fence_obj fence ;
} ;
/**
2011-10-10 12:23:26 +02:00
* struct vmw_event_fence_action - fence action that delivers a drm event .
2011-09-01 20:18:44 +00:00
*
2011-10-10 12:23:26 +02:00
* @ e : A struct drm_pending_event that controls the event delivery .
* @ action : A struct vmw_fence_action to hook up to a fence .
* @ fence : A referenced pointer to the fence to keep it alive while @ action
* hangs on it .
* @ dev : Pointer to a struct drm_device so we can access the event stuff .
* @ kref : Both @ e and @ action has destructors , so we need to refcount .
* @ size : Size accounted for this object .
* @ tv_sec : If non - null , the variable pointed to will be assigned
* current time tv_sec val when the fence signals .
* @ tv_usec : Must be set if @ tv_sec is set , and the variable pointed to will
* be assigned the current time tv_usec val when the fence signals .
*/
struct vmw_event_fence_action {
struct vmw_fence_action action ;
2012-02-09 16:56:41 +01:00
struct drm_pending_event * event ;
2011-10-10 12:23:26 +02:00
struct vmw_fence_obj * fence ;
struct drm_device * dev ;
2012-02-09 16:56:41 +01:00
2011-10-10 12:23:26 +02:00
uint32_t * tv_sec ;
uint32_t * tv_usec ;
} ;
2014-03-26 14:07:44 +01:00
static struct vmw_fence_manager *
fman_from_fence ( struct vmw_fence_obj * fence )
{
return container_of ( fence - > base . lock , struct vmw_fence_manager , lock ) ;
}
2011-10-10 12:23:26 +02:00
/**
* Note on fencing subsystem usage of irqs :
* Typically the vmw_fences_update function is called
*
* a ) When a new fence seqno has been submitted by the fifo code .
* b ) On - demand when we have waiters . Sleeping waiters will switch on the
* ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
* irq is received . When the last fence waiter is gone , that IRQ is masked
* away .
*
* In situations where there are no waiters and we don ' t submit any new fences ,
* fence objects may not be signaled . This is perfectly OK , since there are
* no consumers of the signaled data , but that is NOT ok when there are fence
* actions attached to a fence . The fencing subsystem then makes use of the
* FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
* which has an action attached , and each time vmw_fences_update is called ,
* the subsystem makes sure the fence goal seqno is updated .
*
* The fence goal seqno irq is on as long as there are unsignaled fence
* objects with actions attached to them .
2011-09-01 20:18:44 +00:00
*/
2016-10-25 13:00:45 +01:00
static void vmw_fence_obj_destroy ( struct dma_fence * f )
2011-09-01 20:18:44 +00:00
{
struct vmw_fence_obj * fence =
2014-03-26 14:07:44 +01:00
container_of ( f , struct vmw_fence_obj , base ) ;
2011-09-01 20:18:44 +00:00
2014-03-26 14:07:44 +01:00
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
2011-09-01 20:18:44 +00:00
2017-08-24 08:06:28 +02:00
spin_lock ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
list_del_init ( & fence - > head ) ;
2014-03-26 14:07:44 +01:00
- - fman - > num_fence_objects ;
2017-08-24 08:06:28 +02:00
spin_unlock ( & fman - > lock ) ;
2014-03-26 14:07:44 +01:00
fence - > destroy ( fence ) ;
}
2011-09-01 20:18:44 +00:00
2016-10-25 13:00:45 +01:00
static const char * vmw_fence_get_driver_name ( struct dma_fence * f )
2014-03-26 14:07:44 +01:00
{
return " vmwgfx " ;
}
2016-10-25 13:00:45 +01:00
static const char * vmw_fence_get_timeline_name ( struct dma_fence * f )
2014-03-26 14:07:44 +01:00
{
return " svga " ;
2011-09-01 20:18:44 +00:00
}
2016-10-25 13:00:45 +01:00
static bool vmw_fence_enable_signaling ( struct dma_fence * f )
2014-03-26 14:07:44 +01:00
{
struct vmw_fence_obj * fence =
container_of ( f , struct vmw_fence_obj , base ) ;
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
struct vmw_private * dev_priv = fman - > dev_priv ;
2015-10-28 10:44:04 +01:00
u32 * fifo_mem = dev_priv - > mmio_virt ;
u32 seqno = vmw_mmio_read ( fifo_mem + SVGA_FIFO_FENCE ) ;
2014-03-26 14:07:44 +01:00
if ( seqno - fence - > base . seqno < VMW_FENCE_WRAP )
return false ;
2015-01-14 02:33:39 -08:00
vmw_fifo_ping_host ( dev_priv , SVGA_SYNC_GENERIC ) ;
2014-03-26 14:07:44 +01:00
return true ;
}
struct vmwgfx_wait_cb {
2016-10-25 13:00:45 +01:00
struct dma_fence_cb base ;
2014-03-26 14:07:44 +01:00
struct task_struct * task ;
} ;
static void
2016-10-25 13:00:45 +01:00
vmwgfx_wait_cb ( struct dma_fence * fence , struct dma_fence_cb * cb )
2014-03-26 14:07:44 +01:00
{
struct vmwgfx_wait_cb * wait =
container_of ( cb , struct vmwgfx_wait_cb , base ) ;
wake_up_process ( wait - > task ) ;
}
static void __vmw_fences_update ( struct vmw_fence_manager * fman ) ;
2016-10-25 13:00:45 +01:00
static long vmw_fence_wait ( struct dma_fence * f , bool intr , signed long timeout )
2014-03-26 14:07:44 +01:00
{
struct vmw_fence_obj * fence =
container_of ( f , struct vmw_fence_obj , base ) ;
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
struct vmw_private * dev_priv = fman - > dev_priv ;
struct vmwgfx_wait_cb cb ;
long ret = timeout ;
if ( likely ( vmw_fence_obj_signaled ( fence ) ) )
return timeout ;
vmw_fifo_ping_host ( dev_priv , SVGA_SYNC_GENERIC ) ;
vmw_seqno_waiter_add ( dev_priv ) ;
2018-06-20 11:51:02 +02:00
spin_lock ( f - > lock ) ;
2014-03-26 14:07:44 +01:00
if ( intr & & signal_pending ( current ) ) {
ret = - ERESTARTSYS ;
goto out ;
}
cb . base . func = vmwgfx_wait_cb ;
cb . task = current ;
list_add ( & cb . base . node , & f - > cb_list ) ;
2018-06-20 11:51:02 +02:00
for ( ; ; ) {
2014-03-26 14:07:44 +01:00
__vmw_fences_update ( fman ) ;
2018-06-20 11:51:02 +02:00
/*
* We can use the barrier free __set_current_state ( ) since
* DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
* fence spinlock .
*/
2014-03-26 14:07:44 +01:00
if ( intr )
__set_current_state ( TASK_INTERRUPTIBLE ) ;
else
__set_current_state ( TASK_UNINTERRUPTIBLE ) ;
2018-06-20 11:51:02 +02:00
if ( test_bit ( DMA_FENCE_FLAG_SIGNALED_BIT , & f - > flags ) ) {
if ( ret = = 0 & & timeout > 0 )
ret = 1 ;
break ;
}
2014-03-26 14:07:44 +01:00
2018-06-20 11:51:02 +02:00
if ( intr & & signal_pending ( current ) ) {
2014-03-26 14:07:44 +01:00
ret = - ERESTARTSYS ;
2018-06-20 11:51:02 +02:00
break ;
}
if ( ret = = 0 )
break ;
2014-03-26 14:07:44 +01:00
2018-06-20 11:51:02 +02:00
spin_unlock ( f - > lock ) ;
ret = schedule_timeout ( ret ) ;
spin_lock ( f - > lock ) ;
}
__set_current_state ( TASK_RUNNING ) ;
2014-03-26 14:07:44 +01:00
if ( ! list_empty ( & cb . base . node ) )
list_del ( & cb . base . node ) ;
out :
2018-06-20 11:51:02 +02:00
spin_unlock ( f - > lock ) ;
2014-03-26 14:07:44 +01:00
vmw_seqno_waiter_remove ( dev_priv ) ;
return ret ;
}
2017-11-01 10:45:43 -07:00
static const struct dma_fence_ops vmw_fence_ops = {
2014-03-26 14:07:44 +01:00
. get_driver_name = vmw_fence_get_driver_name ,
. get_timeline_name = vmw_fence_get_timeline_name ,
. enable_signaling = vmw_fence_enable_signaling ,
. wait = vmw_fence_wait ,
. release = vmw_fence_obj_destroy ,
} ;
2011-09-01 20:18:44 +00:00
/**
* Execute signal actions on fences recently signaled .
* This is done from a workqueue so we don ' t have to execute
* signal actions from atomic context .
*/
static void vmw_fence_work_func ( struct work_struct * work )
{
struct vmw_fence_manager * fman =
container_of ( work , struct vmw_fence_manager , work ) ;
struct list_head list ;
struct vmw_fence_action * action , * next_action ;
2011-10-10 12:23:26 +02:00
bool seqno_valid ;
2011-09-01 20:18:44 +00:00
do {
INIT_LIST_HEAD ( & list ) ;
2011-10-10 12:23:26 +02:00
mutex_lock ( & fman - > goal_irq_mutex ) ;
2017-08-24 08:06:28 +02:00
spin_lock ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
list_splice_init ( & fman - > cleanup_list , & list ) ;
2011-10-10 12:23:26 +02:00
seqno_valid = fman - > seqno_valid ;
2017-08-24 08:06:28 +02:00
spin_unlock ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
2011-10-10 12:23:26 +02:00
if ( ! seqno_valid & & fman - > goal_irq_on ) {
fman - > goal_irq_on = false ;
vmw_goal_waiter_remove ( fman - > dev_priv ) ;
}
mutex_unlock ( & fman - > goal_irq_mutex ) ;
2011-09-01 20:18:44 +00:00
if ( list_empty ( & list ) )
return ;
/*
* At this point , only we should be able to manipulate the
* list heads of the actions we have on the private list .
2011-10-10 12:23:26 +02:00
* hence fman : : lock not held .
2011-09-01 20:18:44 +00:00
*/
list_for_each_entry_safe ( action , next_action , & list , head ) {
list_del_init ( & action - > head ) ;
2011-10-10 12:23:26 +02:00
if ( action - > cleanup )
action - > cleanup ( action ) ;
2011-09-01 20:18:44 +00:00
}
} while ( 1 ) ;
}
struct vmw_fence_manager * vmw_fence_manager_init ( struct vmw_private * dev_priv )
{
struct vmw_fence_manager * fman = kzalloc ( sizeof ( * fman ) , GFP_KERNEL ) ;
2016-11-08 17:30:31 +05:30
if ( unlikely ( ! fman ) )
2011-09-01 20:18:44 +00:00
return NULL ;
fman - > dev_priv = dev_priv ;
spin_lock_init ( & fman - > lock ) ;
INIT_LIST_HEAD ( & fman - > fence_list ) ;
INIT_LIST_HEAD ( & fman - > cleanup_list ) ;
INIT_WORK ( & fman - > work , & vmw_fence_work_func ) ;
fman - > fifo_down = true ;
2018-09-26 15:50:13 +02:00
fman - > user_fence_size = ttm_round_pot ( sizeof ( struct vmw_user_fence ) ) +
TTM_OBJ_EXTRA_SIZE ;
2011-09-01 20:18:44 +00:00
fman - > fence_size = ttm_round_pot ( sizeof ( struct vmw_fence_obj ) ) ;
2011-10-10 12:23:26 +02:00
fman - > event_fence_action_size =
ttm_round_pot ( sizeof ( struct vmw_event_fence_action ) ) ;
mutex_init ( & fman - > goal_irq_mutex ) ;
2016-10-25 13:00:45 +01:00
fman - > ctx = dma_fence_context_alloc ( 1 ) ;
2011-09-01 20:18:44 +00:00
return fman ;
}
void vmw_fence_manager_takedown ( struct vmw_fence_manager * fman )
{
bool lists_empty ;
( void ) cancel_work_sync ( & fman - > work ) ;
2017-08-24 08:06:28 +02:00
spin_lock ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
lists_empty = list_empty ( & fman - > fence_list ) & &
list_empty ( & fman - > cleanup_list ) ;
2017-08-24 08:06:28 +02:00
spin_unlock ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
BUG_ON ( ! lists_empty ) ;
kfree ( fman ) ;
}
static int vmw_fence_obj_init ( struct vmw_fence_manager * fman ,
2014-03-26 13:06:24 +01:00
struct vmw_fence_obj * fence , u32 seqno ,
2011-09-01 20:18:44 +00:00
void ( * destroy ) ( struct vmw_fence_obj * fence ) )
{
int ret = 0 ;
2016-10-25 13:00:45 +01:00
dma_fence_init ( & fence - > base , & vmw_fence_ops , & fman - > lock ,
fman - > ctx , seqno ) ;
2011-09-01 20:18:44 +00:00
INIT_LIST_HEAD ( & fence - > seq_passed_actions ) ;
fence - > destroy = destroy ;
2017-08-24 08:06:28 +02:00
spin_lock ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
if ( unlikely ( fman - > fifo_down ) ) {
ret = - EBUSY ;
goto out_unlock ;
}
list_add_tail ( & fence - > head , & fman - > fence_list ) ;
2014-03-26 14:07:44 +01:00
+ + fman - > num_fence_objects ;
2011-09-01 20:18:44 +00:00
out_unlock :
2017-08-24 08:06:28 +02:00
spin_unlock ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
return ret ;
}
2014-01-06 22:21:21 +05:30
static void vmw_fences_perform_actions ( struct vmw_fence_manager * fman ,
2011-09-01 20:18:44 +00:00
struct list_head * list )
{
struct vmw_fence_action * action , * next_action ;
list_for_each_entry_safe ( action , next_action , list , head ) {
list_del_init ( & action - > head ) ;
2011-10-10 12:23:26 +02:00
fman - > pending_actions [ action - > type ] - - ;
2011-09-01 20:18:44 +00:00
if ( action - > seq_passed ! = NULL )
action - > seq_passed ( action ) ;
/*
* Add the cleanup action to the cleanup list so that
* it will be performed by a worker task .
*/
2011-10-10 12:23:26 +02:00
list_add_tail ( & action - > head , & fman - > cleanup_list ) ;
}
}
/**
* vmw_fence_goal_new_locked - Figure out a new device fence goal
* seqno if needed .
*
* @ fman : Pointer to a fence manager .
* @ passed_seqno : The seqno the device currently signals as passed .
*
* This function should be called with the fence manager lock held .
* It is typically called when we have a new passed_seqno , and
* we might need to update the fence goal . It checks to see whether
* the current fence goal has already passed , and , in that case ,
* scans through all unsignaled fences to get the next fence object with an
* action attached , and sets the seqno of that fence as a new fence goal .
*
* returns true if the device goal seqno was updated . False otherwise .
*/
static bool vmw_fence_goal_new_locked ( struct vmw_fence_manager * fman ,
u32 passed_seqno )
{
u32 goal_seqno ;
2015-10-28 10:44:04 +01:00
u32 * fifo_mem ;
2011-10-10 12:23:26 +02:00
struct vmw_fence_obj * fence ;
if ( likely ( ! fman - > seqno_valid ) )
return false ;
fifo_mem = fman - > dev_priv - > mmio_virt ;
2015-10-28 10:44:04 +01:00
goal_seqno = vmw_mmio_read ( fifo_mem + SVGA_FIFO_FENCE_GOAL ) ;
2011-10-10 12:23:26 +02:00
if ( likely ( passed_seqno - goal_seqno > = VMW_FENCE_WRAP ) )
return false ;
fman - > seqno_valid = false ;
list_for_each_entry ( fence , & fman - > fence_list , head ) {
if ( ! list_empty ( & fence - > seq_passed_actions ) ) {
fman - > seqno_valid = true ;
2015-10-28 10:44:04 +01:00
vmw_mmio_write ( fence - > base . seqno ,
fifo_mem + SVGA_FIFO_FENCE_GOAL ) ;
2011-10-10 12:23:26 +02:00
break ;
}
2011-09-01 20:18:44 +00:00
}
2011-10-10 12:23:26 +02:00
return true ;
}
/**
* vmw_fence_goal_check_locked - Replace the device fence goal seqno if
* needed .
*
* @ fence : Pointer to a struct vmw_fence_obj the seqno of which should be
* considered as a device fence goal .
*
* This function should be called with the fence manager lock held .
* It is typically called when an action has been attached to a fence to
* check whether the seqno of that fence should be used for a fence
* goal interrupt . This is typically needed if the current fence goal is
* invalid , or has a higher seqno than that of the current fence object .
*
* returns true if the device goal seqno was updated . False otherwise .
*/
static bool vmw_fence_goal_check_locked ( struct vmw_fence_obj * fence )
{
2014-03-26 14:07:44 +01:00
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
2011-10-10 12:23:26 +02:00
u32 goal_seqno ;
2015-10-28 10:44:04 +01:00
u32 * fifo_mem ;
2011-10-10 12:23:26 +02:00
2016-10-25 13:00:45 +01:00
if ( dma_fence_is_signaled_locked ( & fence - > base ) )
2011-10-10 12:23:26 +02:00
return false ;
2014-03-26 14:07:44 +01:00
fifo_mem = fman - > dev_priv - > mmio_virt ;
2015-10-28 10:44:04 +01:00
goal_seqno = vmw_mmio_read ( fifo_mem + SVGA_FIFO_FENCE_GOAL ) ;
2014-03-26 14:07:44 +01:00
if ( likely ( fman - > seqno_valid & &
goal_seqno - fence - > base . seqno < VMW_FENCE_WRAP ) )
2011-10-10 12:23:26 +02:00
return false ;
2015-10-28 10:44:04 +01:00
vmw_mmio_write ( fence - > base . seqno , fifo_mem + SVGA_FIFO_FENCE_GOAL ) ;
2014-03-26 14:07:44 +01:00
fman - > seqno_valid = true ;
2011-10-10 12:23:26 +02:00
return true ;
2011-09-01 20:18:44 +00:00
}
2014-03-26 14:07:44 +01:00
static void __vmw_fences_update ( struct vmw_fence_manager * fman )
2011-09-01 20:18:44 +00:00
{
struct vmw_fence_obj * fence , * next_fence ;
struct list_head action_list ;
2011-10-10 12:23:26 +02:00
bool needs_rerun ;
uint32_t seqno , new_seqno ;
2015-10-28 10:44:04 +01:00
u32 * fifo_mem = fman - > dev_priv - > mmio_virt ;
2011-09-01 20:18:44 +00:00
2015-10-28 10:44:04 +01:00
seqno = vmw_mmio_read ( fifo_mem + SVGA_FIFO_FENCE ) ;
2011-10-10 12:23:26 +02:00
rerun :
2011-09-01 20:18:44 +00:00
list_for_each_entry_safe ( fence , next_fence , & fman - > fence_list , head ) {
2014-03-26 14:07:44 +01:00
if ( seqno - fence - > base . seqno < VMW_FENCE_WRAP ) {
2011-09-01 20:18:44 +00:00
list_del_init ( & fence - > head ) ;
2016-10-25 13:00:45 +01:00
dma_fence_signal_locked ( & fence - > base ) ;
2011-09-01 20:18:44 +00:00
INIT_LIST_HEAD ( & action_list ) ;
list_splice_init ( & fence - > seq_passed_actions ,
& action_list ) ;
vmw_fences_perform_actions ( fman , & action_list ) ;
2011-10-10 12:23:26 +02:00
} else
break ;
2011-09-01 20:18:44 +00:00
}
2011-10-10 12:23:26 +02:00
/*
* Rerun if the fence goal seqno was updated , and the
* hardware might have raced with that update , so that
* we missed a fence_goal irq .
*/
2014-03-26 14:07:44 +01:00
needs_rerun = vmw_fence_goal_new_locked ( fman , seqno ) ;
2011-10-10 12:23:26 +02:00
if ( unlikely ( needs_rerun ) ) {
2015-10-28 10:44:04 +01:00
new_seqno = vmw_mmio_read ( fifo_mem + SVGA_FIFO_FENCE ) ;
2011-10-10 12:23:26 +02:00
if ( new_seqno ! = seqno ) {
seqno = new_seqno ;
goto rerun ;
}
}
2014-03-26 14:07:44 +01:00
if ( ! list_empty ( & fman - > cleanup_list ) )
( void ) schedule_work ( & fman - > work ) ;
2011-10-10 12:23:26 +02:00
}
2011-09-01 20:18:44 +00:00
2014-03-26 14:07:44 +01:00
void vmw_fences_update ( struct vmw_fence_manager * fman )
2011-09-01 20:18:44 +00:00
{
2017-08-24 08:06:28 +02:00
spin_lock ( & fman - > lock ) ;
2014-03-26 14:07:44 +01:00
__vmw_fences_update ( fman ) ;
2017-08-24 08:06:28 +02:00
spin_unlock ( & fman - > lock ) ;
2014-03-26 14:07:44 +01:00
}
bool vmw_fence_obj_signaled ( struct vmw_fence_obj * fence )
{
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
2011-09-01 20:18:44 +00:00
2016-10-25 13:00:45 +01:00
if ( test_bit ( DMA_FENCE_FLAG_SIGNALED_BIT , & fence - > base . flags ) )
2011-09-01 20:18:44 +00:00
return 1 ;
2014-03-26 13:06:24 +01:00
vmw_fences_update ( fman ) ;
2011-09-01 20:18:44 +00:00
2016-10-25 13:00:45 +01:00
return dma_fence_is_signaled ( & fence - > base ) ;
2011-09-01 20:18:44 +00:00
}
2014-03-26 13:06:24 +01:00
int vmw_fence_obj_wait ( struct vmw_fence_obj * fence , bool lazy ,
2011-09-01 20:18:44 +00:00
bool interruptible , unsigned long timeout )
{
2016-10-25 13:00:45 +01:00
long ret = dma_fence_wait_timeout ( & fence - > base , interruptible , timeout ) ;
2011-09-01 20:18:44 +00:00
2014-03-26 14:07:44 +01:00
if ( likely ( ret > 0 ) )
2011-09-01 20:18:44 +00:00
return 0 ;
2014-03-26 14:07:44 +01:00
else if ( ret = = 0 )
return - EBUSY ;
2011-09-01 20:18:44 +00:00
else
2014-03-26 14:07:44 +01:00
return ret ;
2011-09-01 20:18:44 +00:00
}
void vmw_fence_obj_flush ( struct vmw_fence_obj * fence )
{
2014-03-26 14:07:44 +01:00
struct vmw_private * dev_priv = fman_from_fence ( fence ) - > dev_priv ;
2011-09-01 20:18:44 +00:00
vmw_fifo_ping_host ( dev_priv , SVGA_SYNC_GENERIC ) ;
}
static void vmw_fence_destroy ( struct vmw_fence_obj * fence )
{
2016-10-25 13:00:45 +01:00
dma_fence_free ( & fence - > base ) ;
2011-09-01 20:18:44 +00:00
}
int vmw_fence_create ( struct vmw_fence_manager * fman ,
uint32_t seqno ,
struct vmw_fence_obj * * p_fence )
{
struct vmw_fence_obj * fence ;
2017-03-27 11:09:08 +02:00
int ret ;
2011-09-01 20:18:44 +00:00
fence = kzalloc ( sizeof ( * fence ) , GFP_KERNEL ) ;
2016-11-08 17:30:31 +05:30
if ( unlikely ( ! fence ) )
2014-12-02 03:32:24 -08:00
return - ENOMEM ;
2011-09-01 20:18:44 +00:00
2014-03-26 13:06:24 +01:00
ret = vmw_fence_obj_init ( fman , fence , seqno ,
2011-09-01 20:18:44 +00:00
vmw_fence_destroy ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_err_init ;
* p_fence = fence ;
return 0 ;
out_err_init :
kfree ( fence ) ;
return ret ;
}
static void vmw_user_fence_destroy ( struct vmw_fence_obj * fence )
{
struct vmw_user_fence * ufence =
container_of ( fence , struct vmw_user_fence , fence ) ;
2014-03-26 14:07:44 +01:00
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
2011-09-01 20:18:44 +00:00
2012-11-20 12:16:49 +00:00
ttm_base_object_kfree ( ufence , base ) ;
2011-09-01 20:18:44 +00:00
/*
* Free kernel space accounting .
*/
ttm_mem_global_free ( vmw_mem_glob ( fman - > dev_priv ) ,
fman - > user_fence_size ) ;
}
static void vmw_user_fence_base_release ( struct ttm_base_object * * p_base )
{
struct ttm_base_object * base = * p_base ;
struct vmw_user_fence * ufence =
container_of ( base , struct vmw_user_fence , base ) ;
struct vmw_fence_obj * fence = & ufence - > fence ;
* p_base = NULL ;
vmw_fence_obj_unreference ( & fence ) ;
}
int vmw_user_fence_create ( struct drm_file * file_priv ,
struct vmw_fence_manager * fman ,
uint32_t seqno ,
struct vmw_fence_obj * * p_fence ,
uint32_t * p_handle )
{
struct ttm_object_file * tfile = vmw_fpriv ( file_priv ) - > tfile ;
struct vmw_user_fence * ufence ;
struct vmw_fence_obj * tmp ;
struct ttm_mem_global * mem_glob = vmw_mem_glob ( fman - > dev_priv ) ;
2017-12-08 15:09:50 +08:00
struct ttm_operation_ctx ctx = {
. interruptible = false ,
. no_wait_gpu = false
} ;
2011-09-01 20:18:44 +00:00
int ret ;
/*
* Kernel memory space accounting , since this object may
* be created by a user - space request .
*/
ret = ttm_mem_global_alloc ( mem_glob , fman - > user_fence_size ,
2017-12-08 15:09:50 +08:00
& ctx ) ;
2011-09-01 20:18:44 +00:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
ufence = kzalloc ( sizeof ( * ufence ) , GFP_KERNEL ) ;
2016-11-08 17:30:31 +05:30
if ( unlikely ( ! ufence ) ) {
2011-09-01 20:18:44 +00:00
ret = - ENOMEM ;
goto out_no_object ;
}
ret = vmw_fence_obj_init ( fman , & ufence - > fence , seqno ,
2014-03-26 13:06:24 +01:00
vmw_user_fence_destroy ) ;
2011-09-01 20:18:44 +00:00
if ( unlikely ( ret ! = 0 ) ) {
kfree ( ufence ) ;
goto out_no_object ;
}
/*
* The base object holds a reference which is freed in
* vmw_user_fence_base_release .
*/
tmp = vmw_fence_obj_reference ( & ufence - > fence ) ;
ret = ttm_base_object_init ( tfile , & ufence - > base , false ,
VMW_RES_FENCE ,
& vmw_user_fence_base_release , NULL ) ;
if ( unlikely ( ret ! = 0 ) ) {
/*
* Free the base object ' s reference
*/
vmw_fence_obj_unreference ( & tmp ) ;
goto out_err ;
}
* p_fence = & ufence - > fence ;
2018-09-26 15:50:13 +02:00
* p_handle = ufence - > base . handle ;
2011-09-01 20:18:44 +00:00
return 0 ;
out_err :
tmp = & ufence - > fence ;
vmw_fence_obj_unreference ( & tmp ) ;
out_no_object :
ttm_mem_global_free ( mem_glob , fman - > user_fence_size ) ;
return ret ;
2017-07-05 01:45:40 -07:00
}
/**
* vmw_wait_dma_fence - Wait for a dma fence
*
* @ fman : pointer to a fence manager
* @ fence : DMA fence to wait on
*
* This function handles the case when the fence is actually a fence
* array . If that ' s the case , it ' ll wait on each of the child fence
*/
int vmw_wait_dma_fence ( struct vmw_fence_manager * fman ,
struct dma_fence * fence )
{
struct dma_fence_array * fence_array ;
int ret = 0 ;
int i ;
if ( dma_fence_is_signaled ( fence ) )
return 0 ;
if ( ! dma_fence_is_array ( fence ) )
return dma_fence_wait ( fence , true ) ;
/* From i915: Note that if the fence-array was created in
* signal - on - any mode , we should * not * decompose it into its individual
* fences . However , we don ' t currently store which mode the fence - array
* is operating in . Fortunately , the only user of signal - on - any is
* private to amdgpu and we should not see any incoming fence - array
* from sync - file being in signal - on - any mode .
*/
fence_array = to_dma_fence_array ( fence ) ;
for ( i = 0 ; i < fence_array - > num_fences ; i + + ) {
struct dma_fence * child = fence_array - > fences [ i ] ;
ret = dma_fence_wait ( child , true ) ;
if ( ret < 0 )
return ret ;
}
return 0 ;
2011-09-01 20:18:44 +00:00
}
/**
* vmw_fence_fifo_down - signal all unsignaled fence objects .
*/
void vmw_fence_fifo_down ( struct vmw_fence_manager * fman )
{
struct list_head action_list ;
int ret ;
/*
* The list may be altered while we traverse it , so always
* restart when we ' ve released the fman - > lock .
*/
2017-08-24 08:06:28 +02:00
spin_lock ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
fman - > fifo_down = true ;
while ( ! list_empty ( & fman - > fence_list ) ) {
struct vmw_fence_obj * fence =
list_entry ( fman - > fence_list . prev , struct vmw_fence_obj ,
head ) ;
2016-10-25 13:00:45 +01:00
dma_fence_get ( & fence - > base ) ;
2017-08-24 08:06:28 +02:00
spin_unlock ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
2014-03-26 13:06:24 +01:00
ret = vmw_fence_obj_wait ( fence , false , false ,
2011-09-01 20:18:44 +00:00
VMW_FENCE_WAIT_TIMEOUT ) ;
if ( unlikely ( ret ! = 0 ) ) {
list_del_init ( & fence - > head ) ;
2016-10-25 13:00:45 +01:00
dma_fence_signal ( & fence - > base ) ;
2011-09-01 20:18:44 +00:00
INIT_LIST_HEAD ( & action_list ) ;
list_splice_init ( & fence - > seq_passed_actions ,
& action_list ) ;
vmw_fences_perform_actions ( fman , & action_list ) ;
}
BUG_ON ( ! list_empty ( & fence - > head ) ) ;
2016-10-25 13:00:45 +01:00
dma_fence_put ( & fence - > base ) ;
2017-08-24 08:06:28 +02:00
spin_lock ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
}
2017-08-24 08:06:28 +02:00
spin_unlock ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
}
void vmw_fence_fifo_up ( struct vmw_fence_manager * fman )
{
2017-08-24 08:06:28 +02:00
spin_lock ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
fman - > fifo_down = false ;
2017-08-24 08:06:28 +02:00
spin_unlock ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
}
2017-03-27 11:09:08 +02:00
/**
* vmw_fence_obj_lookup - Look up a user - space fence object
*
* @ tfile : A struct ttm_object_file identifying the caller .
* @ handle : A handle identifying the fence object .
* @ return : A struct vmw_user_fence base ttm object on success or
* an error pointer on failure .
*
* The fence object is looked up and type - checked . The caller needs
* to have opened the fence object first , but since that happens on
* creation and fence objects aren ' t shareable , that ' s not an
* issue currently .
*/
static struct ttm_base_object *
vmw_fence_obj_lookup ( struct ttm_object_file * tfile , u32 handle )
{
struct ttm_base_object * base = ttm_base_object_lookup ( tfile , handle ) ;
if ( ! base ) {
pr_err ( " Invalid fence object handle 0x%08lx. \n " ,
( unsigned long ) handle ) ;
return ERR_PTR ( - EINVAL ) ;
}
if ( base - > refcount_release ! = vmw_user_fence_base_release ) {
pr_err ( " Invalid fence object handle 0x%08lx. \n " ,
( unsigned long ) handle ) ;
ttm_base_object_unref ( & base ) ;
return ERR_PTR ( - EINVAL ) ;
}
return base ;
}
2011-09-01 20:18:44 +00:00
int vmw_fence_obj_wait_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_vmw_fence_wait_arg * arg =
( struct drm_vmw_fence_wait_arg * ) data ;
unsigned long timeout ;
struct ttm_base_object * base ;
struct vmw_fence_obj * fence ;
struct ttm_object_file * tfile = vmw_fpriv ( file_priv ) - > tfile ;
int ret ;
uint64_t wait_timeout = ( ( uint64_t ) arg - > timeout_us * HZ ) ;
/*
* 64 - bit division not present on 32 - bit systems , so do an
* approximation . ( Divide by 1000000 ) .
*/
wait_timeout = ( wait_timeout > > 20 ) + ( wait_timeout > > 24 ) -
( wait_timeout > > 26 ) ;
if ( ! arg - > cookie_valid ) {
arg - > cookie_valid = 1 ;
arg - > kernel_cookie = jiffies + wait_timeout ;
}
2017-03-27 11:09:08 +02:00
base = vmw_fence_obj_lookup ( tfile , arg - > handle ) ;
if ( IS_ERR ( base ) )
return PTR_ERR ( base ) ;
2011-09-01 20:18:44 +00:00
fence = & ( container_of ( base , struct vmw_user_fence , base ) - > fence ) ;
timeout = jiffies ;
if ( time_after_eq ( timeout , ( unsigned long ) arg - > kernel_cookie ) ) {
2014-03-26 13:06:24 +01:00
ret = ( ( vmw_fence_obj_signaled ( fence ) ) ?
2011-09-01 20:18:44 +00:00
0 : - EBUSY ) ;
goto out ;
}
timeout = ( unsigned long ) arg - > kernel_cookie - timeout ;
2014-03-26 13:06:24 +01:00
ret = vmw_fence_obj_wait ( fence , arg - > lazy , true , timeout ) ;
2011-09-01 20:18:44 +00:00
out :
ttm_base_object_unref ( & base ) ;
/*
* Optionally unref the fence object .
*/
if ( ret = = 0 & & ( arg - > wait_options & DRM_VMW_WAIT_OPTION_UNREF ) )
return ttm_ref_object_base_unref ( tfile , arg - > handle ,
TTM_REF_USAGE ) ;
return ret ;
}
int vmw_fence_obj_signaled_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_vmw_fence_signaled_arg * arg =
( struct drm_vmw_fence_signaled_arg * ) data ;
struct ttm_base_object * base ;
struct vmw_fence_obj * fence ;
struct vmw_fence_manager * fman ;
struct ttm_object_file * tfile = vmw_fpriv ( file_priv ) - > tfile ;
struct vmw_private * dev_priv = vmw_priv ( dev ) ;
2017-03-27 11:09:08 +02:00
base = vmw_fence_obj_lookup ( tfile , arg - > handle ) ;
if ( IS_ERR ( base ) )
return PTR_ERR ( base ) ;
2011-09-01 20:18:44 +00:00
fence = & ( container_of ( base , struct vmw_user_fence , base ) - > fence ) ;
2014-03-26 14:07:44 +01:00
fman = fman_from_fence ( fence ) ;
2011-09-01 20:18:44 +00:00
2014-03-26 13:06:24 +01:00
arg - > signaled = vmw_fence_obj_signaled ( fence ) ;
2011-09-01 20:18:44 +00:00
2014-03-26 13:06:24 +01:00
arg - > signaled_flags = arg - > flags ;
2017-08-24 08:06:28 +02:00
spin_lock ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
arg - > passed_seqno = dev_priv - > last_read_seqno ;
2017-08-24 08:06:28 +02:00
spin_unlock ( & fman - > lock ) ;
2011-09-01 20:18:44 +00:00
ttm_base_object_unref ( & base ) ;
return 0 ;
}
int vmw_fence_obj_unref_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_vmw_fence_arg * arg =
( struct drm_vmw_fence_arg * ) data ;
return ttm_ref_object_base_unref ( vmw_fpriv ( file_priv ) - > tfile ,
arg - > handle ,
TTM_REF_USAGE ) ;
}
2011-10-10 12:23:26 +02:00
/**
* vmw_event_fence_action_seq_passed
*
* @ action : The struct vmw_fence_action embedded in a struct
* vmw_event_fence_action .
*
* This function is called when the seqno of the fence where @ action is
* attached has passed . It queues the event on the submitter ' s event list .
2017-08-24 08:06:28 +02:00
* This function is always called from atomic context .
2011-10-10 12:23:26 +02:00
*/
static void vmw_event_fence_action_seq_passed ( struct vmw_fence_action * action )
{
struct vmw_event_fence_action * eaction =
container_of ( action , struct vmw_event_fence_action , action ) ;
struct drm_device * dev = eaction - > dev ;
2012-02-09 16:56:42 +01:00
struct drm_pending_event * event = eaction - > event ;
2011-10-10 12:23:26 +02:00
2012-02-09 16:56:42 +01:00
if ( unlikely ( event = = NULL ) )
return ;
2017-08-24 08:06:28 +02:00
spin_lock_irq ( & dev - > event_lock ) ;
2011-10-10 12:23:26 +02:00
if ( likely ( eaction - > tv_sec ! = NULL ) ) {
2018-01-16 18:18:43 +01:00
struct timespec64 ts ;
2011-10-10 12:23:26 +02:00
2018-01-16 18:18:43 +01:00
ktime_get_ts64 ( & ts ) ;
/* monotonic time, so no y2038 overflow */
* eaction - > tv_sec = ts . tv_sec ;
* eaction - > tv_usec = ts . tv_nsec / NSEC_PER_USEC ;
2011-10-10 12:23:26 +02:00
}
2016-01-11 22:40:59 +01:00
drm_send_event_locked ( dev , eaction - > event ) ;
2016-01-28 12:06:46 +03:00
eaction - > event = NULL ;
2017-08-24 08:06:28 +02:00
spin_unlock_irq ( & dev - > event_lock ) ;
2011-10-10 12:23:26 +02:00
}
/**
* vmw_event_fence_action_cleanup
*
* @ action : The struct vmw_fence_action embedded in a struct
* vmw_event_fence_action .
*
* This function is the struct vmw_fence_action destructor . It ' s typically
* called from a workqueue .
*/
static void vmw_event_fence_action_cleanup ( struct vmw_fence_action * action )
{
struct vmw_event_fence_action * eaction =
container_of ( action , struct vmw_event_fence_action , action ) ;
vmw_fence_obj_unreference ( & eaction - > fence ) ;
2012-02-09 16:56:41 +01:00
kfree ( eaction ) ;
2011-10-10 12:23:26 +02:00
}
/**
* vmw_fence_obj_add_action - Add an action to a fence object .
*
* @ fence - The fence object .
* @ action - The action to add .
*
* Note that the action callbacks may be executed before this function
* returns .
*/
2014-01-06 22:21:21 +05:30
static void vmw_fence_obj_add_action ( struct vmw_fence_obj * fence ,
2011-10-10 12:23:26 +02:00
struct vmw_fence_action * action )
{
2014-03-26 14:07:44 +01:00
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
2011-10-10 12:23:26 +02:00
bool run_update = false ;
mutex_lock ( & fman - > goal_irq_mutex ) ;
2017-08-24 08:06:28 +02:00
spin_lock ( & fman - > lock ) ;
2011-10-10 12:23:26 +02:00
fman - > pending_actions [ action - > type ] + + ;
2016-10-25 13:00:45 +01:00
if ( dma_fence_is_signaled_locked ( & fence - > base ) ) {
2011-10-10 12:23:26 +02:00
struct list_head action_list ;
INIT_LIST_HEAD ( & action_list ) ;
list_add_tail ( & action - > head , & action_list ) ;
vmw_fences_perform_actions ( fman , & action_list ) ;
} else {
list_add_tail ( & action - > head , & fence - > seq_passed_actions ) ;
/*
* This function may set fman : : seqno_valid , so it must
* be run with the goal_irq_mutex held .
*/
run_update = vmw_fence_goal_check_locked ( fence ) ;
}
2017-08-24 08:06:28 +02:00
spin_unlock ( & fman - > lock ) ;
2011-10-10 12:23:26 +02:00
if ( run_update ) {
if ( ! fman - > goal_irq_on ) {
fman - > goal_irq_on = true ;
vmw_goal_waiter_add ( fman - > dev_priv ) ;
}
vmw_fences_update ( fman ) ;
}
mutex_unlock ( & fman - > goal_irq_mutex ) ;
}
/**
* vmw_event_fence_action_create - Post an event for sending when a fence
* object seqno has passed .
*
* @ file_priv : The file connection on which the event should be posted .
* @ fence : The fence object on which to post the event .
* @ event : Event to be posted . This event should ' ve been alloced
* using k [ mz ] alloc , and should ' ve been completely initialized .
* @ interruptible : Interruptible waits if possible .
*
* As a side effect , the object pointed to by @ event may have been
* freed when this function returns . If this function returns with
* an error code , the caller needs to free that object .
*/
2012-02-09 16:56:41 +01:00
int vmw_event_fence_action_queue ( struct drm_file * file_priv ,
struct vmw_fence_obj * fence ,
struct drm_pending_event * event ,
uint32_t * tv_sec ,
uint32_t * tv_usec ,
bool interruptible )
2011-10-10 12:23:26 +02:00
{
2011-10-18 09:09:45 +03:00
struct vmw_event_fence_action * eaction ;
2014-03-26 14:07:44 +01:00
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
2011-10-10 12:23:26 +02:00
eaction = kzalloc ( sizeof ( * eaction ) , GFP_KERNEL ) ;
2016-11-08 17:30:31 +05:30
if ( unlikely ( ! eaction ) )
2011-10-10 12:23:26 +02:00
return - ENOMEM ;
2012-02-09 16:56:41 +01:00
eaction - > event = event ;
2011-10-10 12:23:26 +02:00
eaction - > action . seq_passed = vmw_event_fence_action_seq_passed ;
eaction - > action . cleanup = vmw_event_fence_action_cleanup ;
eaction - > action . type = VMW_ACTION_EVENT ;
eaction - > fence = vmw_fence_obj_reference ( fence ) ;
eaction - > dev = fman - > dev_priv - > dev ;
eaction - > tv_sec = tv_sec ;
eaction - > tv_usec = tv_usec ;
vmw_fence_obj_add_action ( fence , & eaction - > action ) ;
return 0 ;
}
2012-02-09 16:56:41 +01:00
struct vmw_event_fence_pending {
struct drm_pending_event base ;
struct drm_vmw_event_fence event ;
} ;
2014-01-06 22:21:21 +05:30
static int vmw_event_fence_action_create ( struct drm_file * file_priv ,
2012-02-09 16:56:41 +01:00
struct vmw_fence_obj * fence ,
uint32_t flags ,
uint64_t user_data ,
bool interruptible )
{
struct vmw_event_fence_pending * event ;
2014-03-26 14:07:44 +01:00
struct vmw_fence_manager * fman = fman_from_fence ( fence ) ;
struct drm_device * dev = fman - > dev_priv - > dev ;
2012-02-09 16:56:41 +01:00
int ret ;
2012-09-23 19:33:55 +03:00
event = kzalloc ( sizeof ( * event ) , GFP_KERNEL ) ;
2016-11-08 17:30:31 +05:30
if ( unlikely ( ! event ) ) {
2012-02-09 16:56:41 +01:00
DRM_ERROR ( " Failed to allocate an event. \n " ) ;
ret = - ENOMEM ;
2016-01-11 22:40:58 +01:00
goto out_no_space ;
2012-02-09 16:56:41 +01:00
}
event - > event . base . type = DRM_VMW_EVENT_FENCE_SIGNALED ;
event - > event . base . length = sizeof ( * event ) ;
event - > event . user_data = user_data ;
2016-01-11 22:40:58 +01:00
ret = drm_event_reserve_init ( dev , file_priv , & event - > base , & event - > event . base ) ;
2012-02-09 16:56:41 +01:00
2016-01-11 22:40:58 +01:00
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Failed to allocate event space for this file. \n " ) ;
kfree ( event ) ;
goto out_no_space ;
}
2012-02-09 16:56:41 +01:00
if ( flags & DRM_VMW_FE_FLAG_REQ_TIME )
ret = vmw_event_fence_action_queue ( file_priv , fence ,
& event - > base ,
& event - > event . tv_sec ,
& event - > event . tv_usec ,
interruptible ) ;
else
ret = vmw_event_fence_action_queue ( file_priv , fence ,
& event - > base ,
NULL ,
NULL ,
interruptible ) ;
if ( ret ! = 0 )
goto out_no_queue ;
2014-12-02 03:36:57 -08:00
return 0 ;
2012-02-09 16:56:41 +01:00
out_no_queue :
2016-01-11 22:40:58 +01:00
drm_event_cancel_free ( dev , & event - > base ) ;
2012-02-09 16:56:41 +01:00
out_no_space :
return ret ;
}
2011-10-10 12:23:26 +02:00
int vmw_fence_event_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct vmw_private * dev_priv = vmw_priv ( dev ) ;
struct drm_vmw_fence_event_arg * arg =
( struct drm_vmw_fence_event_arg * ) data ;
struct vmw_fence_obj * fence = NULL ;
struct vmw_fpriv * vmw_fp = vmw_fpriv ( file_priv ) ;
2017-03-27 11:09:08 +02:00
struct ttm_object_file * tfile = vmw_fp - > tfile ;
2011-10-10 12:23:26 +02:00
struct drm_vmw_fence_rep __user * user_fence_rep =
( struct drm_vmw_fence_rep __user * ) ( unsigned long )
arg - > fence_rep ;
uint32_t handle ;
int ret ;
/*
* Look up an existing fence object ,
* and if user - space wants a new reference ,
* add one .
*/
if ( arg - > handle ) {
struct ttm_base_object * base =
2017-03-27 11:09:08 +02:00
vmw_fence_obj_lookup ( tfile , arg - > handle ) ;
if ( IS_ERR ( base ) )
return PTR_ERR ( base ) ;
2011-10-10 12:23:26 +02:00
fence = & ( container_of ( base , struct vmw_user_fence ,
base ) - > fence ) ;
( void ) vmw_fence_obj_reference ( fence ) ;
if ( user_fence_rep ! = NULL ) {
2017-03-27 11:21:25 +02:00
ret = ttm_ref_object_add ( vmw_fp - > tfile , base ,
TTM_REF_USAGE , NULL , false ) ;
2011-10-10 12:23:26 +02:00
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Failed to reference a fence "
" object. \n " ) ;
goto out_no_ref_obj ;
}
2018-09-26 15:50:13 +02:00
handle = base - > handle ;
2011-10-10 12:23:26 +02:00
}
ttm_base_object_unref ( & base ) ;
}
/*
* Create a new fence object .
*/
if ( ! fence ) {
ret = vmw_execbuf_fence_commands ( file_priv , dev_priv ,
& fence ,
( user_fence_rep ) ?
& handle : NULL ) ;
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Fence event failed to create fence. \n " ) ;
return ret ;
}
}
BUG_ON ( fence = = NULL ) ;
2014-12-02 03:36:57 -08:00
ret = vmw_event_fence_action_create ( file_priv , fence ,
arg - > flags ,
arg - > user_data ,
true ) ;
2011-10-10 12:23:26 +02:00
if ( unlikely ( ret ! = 0 ) ) {
if ( ret ! = - ERESTARTSYS )
DRM_ERROR ( " Failed to attach event to fence. \n " ) ;
2012-02-09 16:56:41 +01:00
goto out_no_create ;
2011-10-10 12:23:26 +02:00
}
vmw_execbuf_copy_fence_user ( dev_priv , vmw_fp , 0 , user_fence_rep , fence ,
2017-07-05 01:49:32 -07:00
handle , - 1 , NULL ) ;
2011-10-10 12:23:26 +02:00
vmw_fence_obj_unreference ( & fence ) ;
return 0 ;
2012-02-09 16:56:41 +01:00
out_no_create :
2011-10-10 12:23:26 +02:00
if ( user_fence_rep ! = NULL )
2017-03-27 11:09:08 +02:00
ttm_ref_object_base_unref ( tfile , handle , TTM_REF_USAGE ) ;
2011-10-10 12:23:26 +02:00
out_no_ref_obj :
vmw_fence_obj_unreference ( & fence ) ;
return ret ;
}