2009-12-10 00:19:58 +00:00
/**************************************************************************
*
2015-07-29 12:38:02 -07:00
* Copyright © 2009 - 2015 VMware , Inc . , Palo Alto , CA . , USA
2009-12-10 00:19:58 +00:00
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# include "vmwgfx_drv.h"
2012-10-02 18:01:07 +01:00
# include <drm/vmwgfx_drm.h>
# include <drm/ttm/ttm_placement.h>
# include <drm/drmP.h>
2012-11-20 12:19:36 +00:00
# include "vmwgfx_resource_priv.h"
2015-08-10 10:39:35 -07:00
# include "vmwgfx_binding.h"
2009-12-10 00:19:58 +00:00
2013-11-12 00:09:54 -08:00
# define VMW_RES_EVICT_ERR_COUNT 10
2009-12-10 00:19:58 +00:00
struct vmw_resource * vmw_resource_reference ( struct vmw_resource * res )
{
kref_get ( & res - > kref ) ;
return res ;
}
2014-02-05 08:13:56 +01:00
struct vmw_resource *
vmw_resource_reference_unless_doomed ( struct vmw_resource * res )
{
return kref_get_unless_zero ( & res - > kref ) ? res : NULL ;
}
2011-10-04 20:13:33 +02:00
/**
* vmw_resource_release_id - release a resource id to the id manager .
*
* @ res : Pointer to the resource .
*
* Release the resource id to the resource id manager and set it to - 1
*/
2012-11-20 12:19:36 +00:00
void vmw_resource_release_id ( struct vmw_resource * res )
2011-10-04 20:13:33 +02:00
{
struct vmw_private * dev_priv = res - > dev_priv ;
2012-11-20 12:19:35 +00:00
struct idr * idr = & dev_priv - > res_idr [ res - > func - > res_type ] ;
2011-10-04 20:13:33 +02:00
write_lock ( & dev_priv - > resource_lock ) ;
if ( res - > id ! = - 1 )
2012-11-20 12:19:35 +00:00
idr_remove ( idr , res - > id ) ;
2011-10-04 20:13:33 +02:00
res - > id = - 1 ;
write_unlock ( & dev_priv - > resource_lock ) ;
}
2009-12-10 00:19:58 +00:00
static void vmw_resource_release ( struct kref * kref )
{
struct vmw_resource * res =
container_of ( kref , struct vmw_resource , kref ) ;
struct vmw_private * dev_priv = res - > dev_priv ;
2012-11-20 12:19:35 +00:00
int id ;
struct idr * idr = & dev_priv - > res_idr [ res - > func - > res_type ] ;
2009-12-10 00:19:58 +00:00
2015-04-02 02:39:45 -07:00
write_lock ( & dev_priv - > resource_lock ) ;
2011-10-04 20:13:33 +02:00
res - > avail = false ;
2012-11-20 12:19:35 +00:00
list_del_init ( & res - > lru_head ) ;
2009-12-10 00:19:58 +00:00
write_unlock ( & dev_priv - > resource_lock ) ;
2012-11-20 12:19:35 +00:00
if ( res - > backup ) {
struct ttm_buffer_object * bo = & res - > backup - > base ;
2016-04-06 11:12:03 +02:00
ttm_bo_reserve ( bo , false , false , NULL ) ;
2012-11-20 12:19:35 +00:00
if ( ! list_empty ( & res - > mob_head ) & &
res - > func - > unbind ! = NULL ) {
struct ttm_validate_buffer val_buf ;
val_buf . bo = bo ;
2014-09-04 20:01:52 +02:00
val_buf . shared = false ;
2012-11-20 12:19:35 +00:00
res - > func - > unbind ( res , false , & val_buf ) ;
}
res - > backup_dirty = false ;
list_del_init ( & res - > mob_head ) ;
ttm_bo_unreserve ( bo ) ;
2018-06-19 15:02:16 +02:00
vmw_bo_unreference ( & res - > backup ) ;
2012-11-20 12:19:35 +00:00
}
2009-12-10 00:19:58 +00:00
2014-02-05 08:13:56 +01:00
if ( likely ( res - > hw_destroy ! = NULL ) ) {
mutex_lock ( & dev_priv - > binding_mutex ) ;
2015-08-10 10:39:35 -07:00
vmw_binding_res_list_kill ( & res - > binding_head ) ;
2014-02-05 08:13:56 +01:00
mutex_unlock ( & dev_priv - > binding_mutex ) ;
2015-08-10 10:39:35 -07:00
res - > hw_destroy ( res ) ;
2014-02-05 08:13:56 +01:00
}
2009-12-10 00:19:58 +00:00
2012-11-20 12:19:35 +00:00
id = res - > id ;
2009-12-10 00:19:58 +00:00
if ( res - > res_free ! = NULL )
res - > res_free ( res ) ;
else
kfree ( res ) ;
write_lock ( & dev_priv - > resource_lock ) ;
2011-10-04 20:13:33 +02:00
if ( id ! = - 1 )
idr_remove ( idr , id ) ;
2015-04-02 02:39:45 -07:00
write_unlock ( & dev_priv - > resource_lock ) ;
2009-12-10 00:19:58 +00:00
}
void vmw_resource_unreference ( struct vmw_resource * * p_res )
{
struct vmw_resource * res = * p_res ;
* p_res = NULL ;
kref_put ( & res - > kref , vmw_resource_release ) ;
}
2011-10-04 20:13:33 +02:00
/**
* vmw_resource_alloc_id - release a resource id to the id manager .
*
* @ res : Pointer to the resource .
*
* Allocate the lowest free resource from the resource manager , and set
* @ res - > id to that id . Returns 0 on success and - ENOMEM on failure .
*/
2012-11-20 12:19:36 +00:00
int vmw_resource_alloc_id ( struct vmw_resource * res )
2011-10-04 20:13:33 +02:00
{
2012-11-20 12:19:35 +00:00
struct vmw_private * dev_priv = res - > dev_priv ;
2011-10-04 20:13:33 +02:00
int ret ;
2012-11-20 12:19:35 +00:00
struct idr * idr = & dev_priv - > res_idr [ res - > func - > res_type ] ;
2011-10-04 20:13:33 +02:00
BUG_ON ( res - > id ! = - 1 ) ;
2013-02-27 17:04:14 -08:00
idr_preload ( GFP_KERNEL ) ;
write_lock ( & dev_priv - > resource_lock ) ;
2011-10-04 20:13:33 +02:00
2013-02-27 17:04:14 -08:00
ret = idr_alloc ( idr , res , 1 , 0 , GFP_NOWAIT ) ;
if ( ret > = 0 )
res - > id = ret ;
2011-10-04 20:13:33 +02:00
2013-02-27 17:04:14 -08:00
write_unlock ( & dev_priv - > resource_lock ) ;
idr_preload_end ( ) ;
return ret < 0 ? ret : 0 ;
2011-10-04 20:13:33 +02:00
}
2012-11-20 12:19:35 +00:00
/**
* vmw_resource_init - initialize a struct vmw_resource
*
* @ dev_priv : Pointer to a device private struct .
* @ res : The struct vmw_resource to initialize .
* @ obj_type : Resource object type .
* @ delay_id : Boolean whether to defer device id allocation until
* the first validation .
* @ res_free : Resource destructor .
* @ func : Resource function table .
*/
2012-11-20 12:19:36 +00:00
int vmw_resource_init ( struct vmw_private * dev_priv , struct vmw_resource * res ,
bool delay_id ,
void ( * res_free ) ( struct vmw_resource * res ) ,
const struct vmw_res_func * func )
2009-12-10 00:19:58 +00:00
{
kref_init ( & res - > kref ) ;
res - > hw_destroy = NULL ;
res - > res_free = res_free ;
res - > avail = false ;
res - > dev_priv = dev_priv ;
2012-11-20 12:19:35 +00:00
res - > func = func ;
INIT_LIST_HEAD ( & res - > lru_head ) ;
INIT_LIST_HEAD ( & res - > mob_head ) ;
2013-10-08 02:32:36 -07:00
INIT_LIST_HEAD ( & res - > binding_head ) ;
2011-10-04 20:13:33 +02:00
res - > id = - 1 ;
2012-11-20 12:19:35 +00:00
res - > backup = NULL ;
res - > backup_offset = 0 ;
res - > backup_dirty = false ;
res - > res_dirty = false ;
2011-10-04 20:13:33 +02:00
if ( delay_id )
return 0 ;
else
2012-11-20 12:19:35 +00:00
return vmw_resource_alloc_id ( res ) ;
2009-12-10 00:19:58 +00:00
}
/**
* vmw_resource_activate
*
* @ res : Pointer to the newly created resource
* @ hw_destroy : Destroy function . NULL if none .
*
* Activate a resource after the hardware has been made aware of it .
* Set tye destroy function to @ destroy . Typically this frees the
* resource and destroys the hardware resources associated with it .
* Activate basically means that the function vmw_resource_lookup will
* find it .
*/
2012-11-20 12:19:36 +00:00
void vmw_resource_activate ( struct vmw_resource * res ,
void ( * hw_destroy ) ( struct vmw_resource * ) )
2009-12-10 00:19:58 +00:00
{
struct vmw_private * dev_priv = res - > dev_priv ;
write_lock ( & dev_priv - > resource_lock ) ;
res - > avail = true ;
res - > hw_destroy = hw_destroy ;
write_unlock ( & dev_priv - > resource_lock ) ;
}
2012-11-20 12:19:35 +00:00
/**
* vmw_user_resource_lookup_handle - lookup a struct resource from a
* TTM user - space handle and perform basic type checks
*
* @ dev_priv : Pointer to a device private struct
* @ tfile : Pointer to a struct ttm_object_file identifying the caller
* @ handle : The TTM user - space handle
* @ converter : Pointer to an object describing the resource type
* @ p_res : On successful return the location pointed to will contain
* a pointer to a refcounted struct vmw_resource .
*
* If the handle can ' t be found or is associated with an incorrect resource
* type , - EINVAL will be returned .
*/
int vmw_user_resource_lookup_handle ( struct vmw_private * dev_priv ,
struct ttm_object_file * tfile ,
uint32_t handle ,
const struct vmw_user_resource_conv
* converter ,
struct vmw_resource * * p_res )
2009-12-10 00:19:58 +00:00
{
2009-12-22 16:53:41 +01:00
struct ttm_base_object * base ;
2012-11-20 12:19:35 +00:00
struct vmw_resource * res ;
int ret = - EINVAL ;
2009-12-10 00:19:58 +00:00
2009-12-22 16:53:41 +01:00
base = ttm_base_object_lookup ( tfile , handle ) ;
if ( unlikely ( base = = NULL ) )
return - EINVAL ;
2013-11-08 02:12:51 -08:00
if ( unlikely ( ttm_base_object_type ( base ) ! = converter - > object_type ) )
2012-11-20 12:19:35 +00:00
goto out_bad_resource ;
2009-12-22 16:53:41 +01:00
2012-11-20 12:19:35 +00:00
res = converter - > base_obj_to_res ( base ) ;
2009-12-22 16:53:41 +01:00
2012-11-20 12:19:35 +00:00
read_lock ( & dev_priv - > resource_lock ) ;
if ( ! res - > avail | | res - > res_free ! = converter - > res_free ) {
read_unlock ( & dev_priv - > resource_lock ) ;
goto out_bad_resource ;
}
2009-12-10 00:19:58 +00:00
2012-11-20 12:19:35 +00:00
kref_get ( & res - > kref ) ;
read_unlock ( & dev_priv - > resource_lock ) ;
* p_res = res ;
ret = 0 ;
out_bad_resource :
2009-12-22 16:53:41 +01:00
ttm_base_object_unref ( & base ) ;
2012-11-20 12:19:35 +00:00
return ret ;
}
/**
2018-06-19 15:02:16 +02:00
* Helper function that looks either a surface or bo .
2012-11-20 12:19:35 +00:00
*
* The pointer this pointed at by out_surf and out_buf needs to be null .
*/
int vmw_user_lookup_handle ( struct vmw_private * dev_priv ,
struct ttm_object_file * tfile ,
uint32_t handle ,
struct vmw_surface * * out_surf ,
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * * out_buf )
2012-11-20 12:19:35 +00:00
{
struct vmw_resource * res ;
int ret ;
BUG_ON ( * out_surf | | * out_buf ) ;
ret = vmw_user_resource_lookup_handle ( dev_priv , tfile , handle ,
user_surface_converter ,
& res ) ;
if ( ! ret ) {
* out_surf = vmw_res_to_srf ( res ) ;
return 0 ;
}
* out_surf = NULL ;
2018-06-19 15:02:16 +02:00
ret = vmw_user_bo_lookup ( tfile , handle , out_buf , NULL ) ;
2009-12-10 00:19:58 +00:00
return ret ;
}
2012-11-20 12:19:35 +00:00
/**
* vmw_resource_buf_alloc - Allocate a backup buffer for a resource .
*
* @ res : The resource for which to allocate a backup buffer .
* @ interruptible : Whether any sleeps during allocation should be
* performed while interruptible .
*/
static int vmw_resource_buf_alloc ( struct vmw_resource * res ,
bool interruptible )
{
unsigned long size =
( res - > backup_size + PAGE_SIZE - 1 ) & PAGE_MASK ;
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * backup ;
2012-11-20 12:19:35 +00:00
int ret ;
if ( likely ( res - > backup ) ) {
BUG_ON ( res - > backup - > base . num_pages * PAGE_SIZE < size ) ;
return 0 ;
}
backup = kzalloc ( sizeof ( * backup ) , GFP_KERNEL ) ;
2016-11-08 17:30:31 +05:30
if ( unlikely ( ! backup ) )
2012-11-20 12:19:35 +00:00
return - ENOMEM ;
2018-06-19 15:02:16 +02:00
ret = vmw_bo_init ( res - > dev_priv , backup , res - > backup_size ,
2012-11-20 12:19:35 +00:00
res - > func - > backup_placement ,
interruptible ,
2018-06-19 15:02:16 +02:00
& vmw_bo_bo_free ) ;
2012-11-20 12:19:35 +00:00
if ( unlikely ( ret ! = 0 ) )
2018-06-19 15:02:16 +02:00
goto out_no_bo ;
2012-11-20 12:19:35 +00:00
res - > backup = backup ;
2018-06-19 15:02:16 +02:00
out_no_bo :
2012-11-20 12:19:35 +00:00
return ret ;
}
/**
* vmw_resource_do_validate - Make a resource up - to - date and visible
* to the device .
*
* @ res : The resource to make visible to the device .
* @ val_buf : Information about a buffer possibly
* containing backup data if a bind operation is needed .
*
* On hardware resource shortage , this function returns - EBUSY and
* should be retried once resources have been freed up .
*/
static int vmw_resource_do_validate ( struct vmw_resource * res ,
struct ttm_validate_buffer * val_buf )
{
int ret = 0 ;
const struct vmw_res_func * func = res - > func ;
if ( unlikely ( res - > id = = - 1 ) ) {
ret = func - > create ( res ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
}
if ( func - > bind & &
( ( func - > needs_backup & & list_empty ( & res - > mob_head ) & &
val_buf - > bo ! = NULL ) | |
( ! func - > needs_backup & & val_buf - > bo ! = NULL ) ) ) {
ret = func - > bind ( res , val_buf ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_bind_failed ;
if ( func - > needs_backup )
list_add_tail ( & res - > mob_head , & res - > backup - > res_list ) ;
}
/*
* Only do this on write operations , and move to
* vmw_resource_unreserve if it can be called after
* backup buffers have been unreserved . Otherwise
* sort out locking .
*/
res - > res_dirty = true ;
return 0 ;
out_bind_failed :
func - > destroy ( res ) ;
return ret ;
}
/**
* vmw_resource_unreserve - Unreserve a resource previously reserved for
* command submission .
*
* @ res : Pointer to the struct vmw_resource to unreserve .
2015-08-10 10:39:35 -07:00
* @ switch_backup : Backup buffer has been switched .
2012-11-20 12:19:35 +00:00
* @ new_backup : Pointer to new backup buffer if command submission
2015-08-10 10:39:35 -07:00
* switched . May be NULL .
* @ new_backup_offset : New backup offset if @ switch_backup is true .
2012-11-20 12:19:35 +00:00
*
* Currently unreserving a resource means putting it back on the device ' s
* resource lru list , so that it can be evicted if necessary .
*/
void vmw_resource_unreserve ( struct vmw_resource * res ,
2015-08-10 10:39:35 -07:00
bool switch_backup ,
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * new_backup ,
2012-11-20 12:19:35 +00:00
unsigned long new_backup_offset )
{
struct vmw_private * dev_priv = res - > dev_priv ;
if ( ! list_empty ( & res - > lru_head ) )
return ;
2015-08-10 10:39:35 -07:00
if ( switch_backup & & new_backup ! = res - > backup ) {
2012-11-20 12:19:35 +00:00
if ( res - > backup ) {
2013-06-27 13:48:27 +02:00
lockdep_assert_held ( & res - > backup - > base . resv - > lock . base ) ;
2012-11-20 12:19:35 +00:00
list_del_init ( & res - > mob_head ) ;
2018-06-19 15:02:16 +02:00
vmw_bo_unreference ( & res - > backup ) ;
2012-11-20 12:19:35 +00:00
}
2015-08-10 10:39:35 -07:00
if ( new_backup ) {
2018-06-19 15:02:16 +02:00
res - > backup = vmw_bo_reference ( new_backup ) ;
2015-08-10 10:39:35 -07:00
lockdep_assert_held ( & new_backup - > base . resv - > lock . base ) ;
list_add_tail ( & res - > mob_head , & new_backup - > res_list ) ;
} else {
res - > backup = NULL ;
}
2012-11-20 12:19:35 +00:00
}
2015-08-10 10:39:35 -07:00
if ( switch_backup )
2012-11-20 12:19:35 +00:00
res - > backup_offset = new_backup_offset ;
2015-03-02 23:26:06 -08:00
if ( ! res - > func - > may_evict | | res - > id = = - 1 | | res - > pin_count )
2012-11-20 12:19:35 +00:00
return ;
write_lock ( & dev_priv - > resource_lock ) ;
list_add_tail ( & res - > lru_head ,
& res - > dev_priv - > res_lru [ res - > func - > res_type ] ) ;
write_unlock ( & dev_priv - > resource_lock ) ;
}
/**
* vmw_resource_check_buffer - Check whether a backup buffer is needed
* for a resource and in that case , allocate
* one , reserve and validate it .
*
* @ res : The resource for which to allocate a backup buffer .
* @ interruptible : Whether any sleeps during allocation should be
* performed while interruptible .
* @ val_buf : On successful return contains data about the
* reserved and validated backup buffer .
*/
2013-06-27 13:48:17 +02:00
static int
vmw_resource_check_buffer ( struct vmw_resource * res ,
bool interruptible ,
struct ttm_validate_buffer * val_buf )
2012-11-20 12:19:35 +00:00
{
2017-04-12 14:24:39 +02:00
struct ttm_operation_ctx ctx = { true , false } ;
2012-11-20 12:19:35 +00:00
struct list_head val_list ;
bool backup_dirty = false ;
int ret ;
if ( unlikely ( res - > backup = = NULL ) ) {
ret = vmw_resource_buf_alloc ( res , interruptible ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
}
INIT_LIST_HEAD ( & val_list ) ;
val_buf - > bo = ttm_bo_reference ( & res - > backup - > base ) ;
2014-09-04 20:01:52 +02:00
val_buf - > shared = false ;
2012-11-20 12:19:35 +00:00
list_add_tail ( & val_buf - > head , & val_list ) ;
2014-12-03 15:46:48 +01:00
ret = ttm_eu_reserve_buffers ( NULL , & val_list , interruptible , NULL ) ;
2012-11-20 12:19:35 +00:00
if ( unlikely ( ret ! = 0 ) )
goto out_no_reserve ;
if ( res - > func - > needs_backup & & list_empty ( & res - > mob_head ) )
return 0 ;
backup_dirty = res - > backup_dirty ;
ret = ttm_bo_validate ( & res - > backup - > base ,
res - > func - > backup_placement ,
2017-04-12 14:24:39 +02:00
& ctx ) ;
2012-11-20 12:19:35 +00:00
if ( unlikely ( ret ! = 0 ) )
goto out_no_validate ;
return 0 ;
out_no_validate :
2013-11-15 00:06:47 -08:00
ttm_eu_backoff_reservation ( NULL , & val_list ) ;
2012-11-20 12:19:35 +00:00
out_no_reserve :
ttm_bo_unref ( & val_buf - > bo ) ;
if ( backup_dirty )
2018-06-19 15:02:16 +02:00
vmw_bo_unreference ( & res - > backup ) ;
2012-11-20 12:19:35 +00:00
return ret ;
}
/**
* vmw_resource_reserve - Reserve a resource for command submission
*
* @ res : The resource to reserve .
*
* This function takes the resource off the LRU list and make sure
* a backup buffer is present for guest - backed resources . However ,
* the buffer may not be bound to the resource at this point .
*
*/
2015-06-26 02:03:53 -07:00
int vmw_resource_reserve ( struct vmw_resource * res , bool interruptible ,
bool no_backup )
2012-11-20 12:19:35 +00:00
{
struct vmw_private * dev_priv = res - > dev_priv ;
int ret ;
write_lock ( & dev_priv - > resource_lock ) ;
list_del_init ( & res - > lru_head ) ;
write_unlock ( & dev_priv - > resource_lock ) ;
if ( res - > func - > needs_backup & & res - > backup = = NULL & &
! no_backup ) {
2015-06-26 02:03:53 -07:00
ret = vmw_resource_buf_alloc ( res , interruptible ) ;
2015-08-10 10:39:35 -07:00
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Failed to allocate a backup buffer "
" of size %lu. bytes \n " ,
( unsigned long ) res - > backup_size ) ;
2012-11-20 12:19:35 +00:00
return ret ;
2015-08-10 10:39:35 -07:00
}
2012-11-20 12:19:35 +00:00
}
return 0 ;
}
/**
* vmw_resource_backoff_reservation - Unreserve and unreference a
* backup buffer
* .
* @ val_buf : Backup buffer information .
*/
2013-06-27 13:48:17 +02:00
static void
2013-11-15 00:06:47 -08:00
vmw_resource_backoff_reservation ( struct ttm_validate_buffer * val_buf )
2012-11-20 12:19:35 +00:00
{
struct list_head val_list ;
if ( likely ( val_buf - > bo = = NULL ) )
return ;
INIT_LIST_HEAD ( & val_list ) ;
list_add_tail ( & val_buf - > head , & val_list ) ;
2013-11-15 00:06:47 -08:00
ttm_eu_backoff_reservation ( NULL , & val_list ) ;
2012-11-20 12:19:35 +00:00
ttm_bo_unref ( & val_buf - > bo ) ;
}
/**
* vmw_resource_do_evict - Evict a resource , and transfer its data
* to a backup buffer .
*
* @ res : The resource to evict .
2013-11-12 00:09:54 -08:00
* @ interruptible : Whether to wait interruptible .
2012-11-20 12:19:35 +00:00
*/
2015-04-02 02:39:45 -07:00
static int vmw_resource_do_evict ( struct vmw_resource * res , bool interruptible )
2012-11-20 12:19:35 +00:00
{
struct ttm_validate_buffer val_buf ;
const struct vmw_res_func * func = res - > func ;
int ret ;
BUG_ON ( ! func - > may_evict ) ;
val_buf . bo = NULL ;
2014-09-04 20:01:52 +02:00
val_buf . shared = false ;
2013-11-15 00:06:47 -08:00
ret = vmw_resource_check_buffer ( res , interruptible , & val_buf ) ;
2012-11-20 12:19:35 +00:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
if ( unlikely ( func - > unbind ! = NULL & &
( ! func - > needs_backup | | ! list_empty ( & res - > mob_head ) ) ) ) {
ret = func - > unbind ( res , res - > res_dirty , & val_buf ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_no_unbind ;
list_del_init ( & res - > mob_head ) ;
}
ret = func - > destroy ( res ) ;
res - > backup_dirty = true ;
res - > res_dirty = false ;
out_no_unbind :
2013-11-15 00:06:47 -08:00
vmw_resource_backoff_reservation ( & val_buf ) ;
2012-11-20 12:19:35 +00:00
return ret ;
}
/**
* vmw_resource_validate - Make a resource up - to - date and visible
* to the device .
*
* @ res : The resource to make visible to the device .
*
* On succesful return , any backup DMA buffer pointed to by @ res - > backup will
* be reserved and validated .
* On hardware resource shortage , this function will repeatedly evict
* resources of the same type until the validation succeeds .
*/
int vmw_resource_validate ( struct vmw_resource * res )
{
int ret ;
struct vmw_resource * evict_res ;
struct vmw_private * dev_priv = res - > dev_priv ;
struct list_head * lru_list = & dev_priv - > res_lru [ res - > func - > res_type ] ;
struct ttm_validate_buffer val_buf ;
2013-11-12 00:09:54 -08:00
unsigned err_count = 0 ;
2012-11-20 12:19:35 +00:00
2015-08-10 10:39:35 -07:00
if ( ! res - > func - > create )
2012-11-20 12:19:35 +00:00
return 0 ;
val_buf . bo = NULL ;
2014-09-04 20:01:52 +02:00
val_buf . shared = false ;
2012-11-20 12:19:35 +00:00
if ( res - > backup )
val_buf . bo = & res - > backup - > base ;
do {
ret = vmw_resource_do_validate ( res , & val_buf ) ;
if ( likely ( ret ! = - EBUSY ) )
break ;
write_lock ( & dev_priv - > resource_lock ) ;
if ( list_empty ( lru_list ) | | ! res - > func - > may_evict ) {
2013-11-12 00:09:54 -08:00
DRM_ERROR ( " Out of device device resources "
2012-11-20 12:19:35 +00:00
" for %s. \n " , res - > func - > type_name ) ;
ret = - EBUSY ;
write_unlock ( & dev_priv - > resource_lock ) ;
break ;
}
evict_res = vmw_resource_reference
( list_first_entry ( lru_list , struct vmw_resource ,
lru_head ) ) ;
list_del_init ( & evict_res - > lru_head ) ;
write_unlock ( & dev_priv - > resource_lock ) ;
2013-11-12 00:09:54 -08:00
ret = vmw_resource_do_evict ( evict_res , true ) ;
if ( unlikely ( ret ! = 0 ) ) {
write_lock ( & dev_priv - > resource_lock ) ;
list_add_tail ( & evict_res - > lru_head , lru_list ) ;
write_unlock ( & dev_priv - > resource_lock ) ;
if ( ret = = - ERESTARTSYS | |
+ + err_count > VMW_RES_EVICT_ERR_COUNT ) {
vmw_resource_unreference ( & evict_res ) ;
goto out_no_validate ;
}
}
2012-11-20 12:19:35 +00:00
vmw_resource_unreference ( & evict_res ) ;
} while ( 1 ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_no_validate ;
else if ( ! res - > func - > needs_backup & & res - > backup ) {
list_del_init ( & res - > mob_head ) ;
2018-06-19 15:02:16 +02:00
vmw_bo_unreference ( & res - > backup ) ;
2012-11-20 12:19:35 +00:00
}
return 0 ;
out_no_validate :
return ret ;
}
/**
2018-06-19 15:33:53 +02:00
* vmw_resource_unbind_list
2012-11-20 12:19:35 +00:00
*
2018-06-19 15:33:53 +02:00
* @ vbo : Pointer to the current backing MOB .
2012-11-20 12:19:35 +00:00
*
2012-11-21 11:29:13 +01:00
* Evicts the Guest Backed hardware resource if the backup
* buffer is being moved out of MOB memory .
2018-06-19 15:33:53 +02:00
* Note that this function will not race with the resource
* validation code , since resource validation and eviction
* both require the backup buffer to be reserved .
2012-11-20 12:19:35 +00:00
*/
2018-06-19 15:33:53 +02:00
void vmw_resource_unbind_list ( struct vmw_buffer_object * vbo )
2012-11-20 12:19:35 +00:00
{
2018-03-22 10:19:01 +01:00
2018-06-19 15:33:53 +02:00
struct vmw_resource * res , * next ;
struct ttm_validate_buffer val_buf = {
. bo = & vbo - > base ,
. shared = false
} ;
2012-11-21 11:29:13 +01:00
2018-06-19 15:33:53 +02:00
lockdep_assert_held ( & vbo - > base . resv - > lock . base ) ;
list_for_each_entry_safe ( res , next , & vbo - > res_list , mob_head ) {
if ( ! res - > func - > unbind )
continue ;
2012-11-21 11:29:13 +01:00
2018-06-19 15:33:53 +02:00
( void ) res - > func - > unbind ( res , true , & val_buf ) ;
res - > backup_dirty = true ;
res - > res_dirty = false ;
list_del_init ( & res - > mob_head ) ;
2012-11-21 11:29:13 +01:00
}
2018-03-22 10:19:01 +01:00
2018-06-19 15:33:53 +02:00
( void ) ttm_bo_wait ( & vbo - > base , false , false ) ;
2018-03-22 10:19:01 +01:00
}
2015-08-10 10:56:15 -07:00
/**
* vmw_query_readback_all - Read back cached query states
*
* @ dx_query_mob : Buffer containing the DX query MOB
*
* Read back cached states from the device if they exist . This function
* assumings binding_mutex is held .
*/
2018-06-19 15:02:16 +02:00
int vmw_query_readback_all ( struct vmw_buffer_object * dx_query_mob )
2015-08-10 10:56:15 -07:00
{
struct vmw_resource * dx_query_ctx ;
struct vmw_private * dev_priv ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXReadbackAllQuery body ;
} * cmd ;
/* No query bound, so do nothing */
if ( ! dx_query_mob | | ! dx_query_mob - > dx_query_ctx )
return 0 ;
dx_query_ctx = dx_query_mob - > dx_query_ctx ;
dev_priv = dx_query_ctx - > dev_priv ;
cmd = vmw_fifo_reserve_dx ( dev_priv , sizeof ( * cmd ) , dx_query_ctx - > id ) ;
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Failed reserving FIFO space for "
" query MOB read back. \n " ) ;
return - ENOMEM ;
}
cmd - > header . id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . cid = dx_query_ctx - > id ;
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
/* Triggers a rebind the next time affected context is bound */
dx_query_mob - > dx_query_ctx = NULL ;
return 0 ;
}
/**
* vmw_query_move_notify - Read back cached query states
*
* @ bo : The TTM buffer object about to move .
* @ mem : The memory region @ bo is moving to .
*
* Called before the query MOB is swapped out to read back cached query
* states from the device .
*/
void vmw_query_move_notify ( struct ttm_buffer_object * bo ,
struct ttm_mem_reg * mem )
{
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * dx_query_mob ;
2015-08-10 10:56:15 -07:00
struct ttm_bo_device * bdev = bo - > bdev ;
struct vmw_private * dev_priv ;
dev_priv = container_of ( bdev , struct vmw_private , bdev ) ;
mutex_lock ( & dev_priv - > binding_mutex ) ;
2018-06-19 15:02:16 +02:00
dx_query_mob = container_of ( bo , struct vmw_buffer_object , base ) ;
2015-08-10 10:56:15 -07:00
if ( mem = = NULL | | ! dx_query_mob | | ! dx_query_mob - > dx_query_ctx ) {
mutex_unlock ( & dev_priv - > binding_mutex ) ;
return ;
}
/* If BO is being moved from MOB to system memory */
if ( mem - > mem_type = = TTM_PL_SYSTEM & & bo - > mem . mem_type = = VMW_PL_MOB ) {
struct vmw_fence_obj * fence ;
( void ) vmw_query_readback_all ( dx_query_mob ) ;
mutex_unlock ( & dev_priv - > binding_mutex ) ;
/* Create a fence and attach the BO to it */
( void ) vmw_execbuf_fence_commands ( NULL , dev_priv , & fence , NULL ) ;
2018-06-19 15:33:53 +02:00
vmw_bo_fence_single ( bo , fence ) ;
2015-08-10 10:56:15 -07:00
if ( fence ! = NULL )
vmw_fence_obj_unreference ( & fence ) ;
2016-04-06 11:12:04 +02:00
( void ) ttm_bo_wait ( bo , false , false ) ;
2015-08-10 10:56:15 -07:00
} else
mutex_unlock ( & dev_priv - > binding_mutex ) ;
}
2012-11-20 12:19:35 +00:00
/**
* vmw_resource_needs_backup - Return whether a resource needs a backup buffer .
*
* @ res : The resource being queried .
*/
bool vmw_resource_needs_backup ( const struct vmw_resource * res )
{
return res - > func - > needs_backup ;
}
/**
* vmw_resource_evict_type - Evict all resources of a specific type
*
* @ dev_priv : Pointer to a device private struct
* @ type : The resource type to evict
*
* To avoid thrashing starvation or as part of the hibernation sequence ,
2013-11-12 00:09:54 -08:00
* try to evict all evictable resources of a specific type .
2012-11-20 12:19:35 +00:00
*/
static void vmw_resource_evict_type ( struct vmw_private * dev_priv ,
enum vmw_res_type type )
{
struct list_head * lru_list = & dev_priv - > res_lru [ type ] ;
struct vmw_resource * evict_res ;
2013-11-12 00:09:54 -08:00
unsigned err_count = 0 ;
int ret ;
2012-11-20 12:19:35 +00:00
do {
write_lock ( & dev_priv - > resource_lock ) ;
if ( list_empty ( lru_list ) )
goto out_unlock ;
evict_res = vmw_resource_reference (
list_first_entry ( lru_list , struct vmw_resource ,
lru_head ) ) ;
list_del_init ( & evict_res - > lru_head ) ;
write_unlock ( & dev_priv - > resource_lock ) ;
2013-11-12 00:09:54 -08:00
ret = vmw_resource_do_evict ( evict_res , false ) ;
if ( unlikely ( ret ! = 0 ) ) {
write_lock ( & dev_priv - > resource_lock ) ;
list_add_tail ( & evict_res - > lru_head , lru_list ) ;
write_unlock ( & dev_priv - > resource_lock ) ;
if ( + + err_count > VMW_RES_EVICT_ERR_COUNT ) {
vmw_resource_unreference ( & evict_res ) ;
return ;
}
}
2012-11-20 12:19:35 +00:00
vmw_resource_unreference ( & evict_res ) ;
} while ( 1 ) ;
out_unlock :
write_unlock ( & dev_priv - > resource_lock ) ;
}
/**
* vmw_resource_evict_all - Evict all evictable resources
*
* @ dev_priv : Pointer to a device private struct
*
* To avoid thrashing starvation or as part of the hibernation sequence ,
* evict all evictable resources . In particular this means that all
* guest - backed resources that are registered with the device are
* evicted and the OTable becomes clean .
*/
void vmw_resource_evict_all ( struct vmw_private * dev_priv )
{
enum vmw_res_type type ;
mutex_lock ( & dev_priv - > cmdbuf_mutex ) ;
for ( type = 0 ; type < vmw_res_max ; + + type )
vmw_resource_evict_type ( dev_priv , type ) ;
mutex_unlock ( & dev_priv - > cmdbuf_mutex ) ;
}
2015-03-02 23:26:06 -08:00
/**
* vmw_resource_pin - Add a pin reference on a resource
*
* @ res : The resource to add a pin reference on
*
* This function adds a pin reference , and if needed validates the resource .
* Having a pin reference means that the resource can never be evicted , and
* its id will never change as long as there is a pin reference .
* This function returns 0 on success and a negative error code on failure .
*/
2015-06-26 02:03:53 -07:00
int vmw_resource_pin ( struct vmw_resource * res , bool interruptible )
2015-03-02 23:26:06 -08:00
{
2017-04-12 14:24:39 +02:00
struct ttm_operation_ctx ctx = { interruptible , false } ;
2015-03-02 23:26:06 -08:00
struct vmw_private * dev_priv = res - > dev_priv ;
int ret ;
2015-06-26 02:03:53 -07:00
ttm_write_lock ( & dev_priv - > reservation_sem , interruptible ) ;
2015-03-02 23:26:06 -08:00
mutex_lock ( & dev_priv - > cmdbuf_mutex ) ;
2015-06-26 02:03:53 -07:00
ret = vmw_resource_reserve ( res , interruptible , false ) ;
2015-03-02 23:26:06 -08:00
if ( ret )
goto out_no_reserve ;
if ( res - > pin_count = = 0 ) {
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * vbo = NULL ;
2015-03-02 23:26:06 -08:00
if ( res - > backup ) {
2015-06-26 00:25:37 -07:00
vbo = res - > backup ;
2016-04-06 11:12:03 +02:00
ttm_bo_reserve ( & vbo - > base , interruptible , false , NULL ) ;
2015-06-26 00:25:37 -07:00
if ( ! vbo - > pin_count ) {
ret = ttm_bo_validate
( & vbo - > base ,
res - > func - > backup_placement ,
2017-04-12 14:24:39 +02:00
& ctx ) ;
2015-06-26 00:25:37 -07:00
if ( ret ) {
ttm_bo_unreserve ( & vbo - > base ) ;
goto out_no_validate ;
}
2015-03-02 23:26:06 -08:00
}
/* Do we really need to pin the MOB as well? */
2015-06-26 00:25:37 -07:00
vmw_bo_pin_reserved ( vbo , true ) ;
2015-03-02 23:26:06 -08:00
}
ret = vmw_resource_validate ( res ) ;
2015-06-26 00:25:37 -07:00
if ( vbo )
ttm_bo_unreserve ( & vbo - > base ) ;
2015-03-02 23:26:06 -08:00
if ( ret )
goto out_no_validate ;
}
res - > pin_count + + ;
out_no_validate :
2015-08-10 10:39:35 -07:00
vmw_resource_unreserve ( res , false , NULL , 0UL ) ;
2015-03-02 23:26:06 -08:00
out_no_reserve :
mutex_unlock ( & dev_priv - > cmdbuf_mutex ) ;
ttm_write_unlock ( & dev_priv - > reservation_sem ) ;
return ret ;
}
/**
* vmw_resource_unpin - Remove a pin reference from a resource
*
* @ res : The resource to remove a pin reference from
*
* Having a pin reference means that the resource can never be evicted , and
* its id will never change as long as there is a pin reference .
*/
void vmw_resource_unpin ( struct vmw_resource * res )
{
struct vmw_private * dev_priv = res - > dev_priv ;
int ret ;
2017-01-19 10:57:00 -08:00
( void ) ttm_read_lock ( & dev_priv - > reservation_sem , false ) ;
2015-03-02 23:26:06 -08:00
mutex_lock ( & dev_priv - > cmdbuf_mutex ) ;
2015-06-26 02:03:53 -07:00
ret = vmw_resource_reserve ( res , false , true ) ;
2015-03-02 23:26:06 -08:00
WARN_ON ( ret ) ;
WARN_ON ( res - > pin_count = = 0 ) ;
if ( - - res - > pin_count = = 0 & & res - > backup ) {
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * vbo = res - > backup ;
2015-03-02 23:26:06 -08:00
2017-01-19 10:57:00 -08:00
( void ) ttm_bo_reserve ( & vbo - > base , false , false , NULL ) ;
2015-06-26 00:25:37 -07:00
vmw_bo_pin_reserved ( vbo , false ) ;
ttm_bo_unreserve ( & vbo - > base ) ;
2015-03-02 23:26:06 -08:00
}
2015-08-10 10:39:35 -07:00
vmw_resource_unreserve ( res , false , NULL , 0UL ) ;
2015-03-02 23:26:06 -08:00
mutex_unlock ( & dev_priv - > cmdbuf_mutex ) ;
ttm_read_unlock ( & dev_priv - > reservation_sem ) ;
}
2015-08-10 10:39:35 -07:00
/**
* vmw_res_type - Return the resource type
*
* @ res : Pointer to the resource
*/
enum vmw_res_type vmw_res_type ( const struct vmw_resource * res )
{
return res - > func - > res_type ;
}