2018-05-07 01:16:26 +02:00
// SPDX-License-Identifier: GPL-2.0 OR MIT
2009-12-10 00:19:58 +00:00
/**************************************************************************
*
2018-05-07 01:16:26 +02:00
* Copyright 2009 - 2015 VMware , Inc . , Palo Alto , CA . , USA
2009-12-10 00:19:58 +00:00
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2012-10-02 18:01:07 +01:00
# include <drm/ttm/ttm_placement.h>
2019-06-23 12:23:34 +02:00
2012-11-20 12:19:36 +00:00
# include "vmwgfx_resource_priv.h"
2015-08-10 10:39:35 -07:00
# include "vmwgfx_binding.h"
2019-06-23 12:23:34 +02:00
# include "vmwgfx_drv.h"
2009-12-10 00:19:58 +00:00
2013-11-12 00:09:54 -08:00
# define VMW_RES_EVICT_ERR_COUNT 10
2019-03-04 19:37:40 +01:00
/**
* vmw_resource_mob_attach - Mark a resource as attached to its backing mob
* @ res : The resource
*/
void vmw_resource_mob_attach ( struct vmw_resource * res )
{
struct vmw_buffer_object * backup = res - > backup ;
2019-03-05 08:24:35 +01:00
struct rb_node * * new = & backup - > res_tree . rb_node , * parent = NULL ;
2019-03-04 19:37:40 +01:00
drm-misc-next for 5.4:
UAPI Changes:
Cross-subsystem Changes:
Core Changes:
- dma-buf: add reservation_object_fences helper, relax
reservation_object_add_shared_fence, remove
reservation_object seq number (and then
restored)
- dma-fence: Shrinkage of the dma_fence structure,
Merge dma_fence_signal and dma_fence_signal_locked,
Store the timestamp in struct dma_fence in a union with
cb_list
Driver Changes:
- More dt-bindings YAML conversions
- More removal of drmP.h includes
- dw-hdmi: Support get_eld and various i2s improvements
- gm12u320: Few fixes
- meson: Global cleanup
- panfrost: Few refactors, Support for GPU heap allocations
- sun4i: Support for DDC enable GPIO
- New panels: TI nspire, NEC NL8048HL11, LG Philips LB035Q02,
Sharp LS037V7DW01, Sony ACX565AKM, Toppoly TD028TTEC1
Toppoly TD043MTEA1
-----BEGIN PGP SIGNATURE-----
iHUEABYIAB0WIQRcEzekXsqa64kGDp7j7w1vZxhRxQUCXVqvpwAKCRDj7w1vZxhR
xa3RAQDzAnt5zeesAxX4XhRJzHoCEwj2PJj9Re6xMJ9PlcfcvwD+OS+bcB6jfiXV
Ug9IBd/DqjlmD9G9MxFxfSV946rksAw=
=8uv4
-----END PGP SIGNATURE-----
Merge tag 'drm-misc-next-2019-08-19' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.4:
UAPI Changes:
Cross-subsystem Changes:
Core Changes:
- dma-buf: add reservation_object_fences helper, relax
reservation_object_add_shared_fence, remove
reservation_object seq number (and then
restored)
- dma-fence: Shrinkage of the dma_fence structure,
Merge dma_fence_signal and dma_fence_signal_locked,
Store the timestamp in struct dma_fence in a union with
cb_list
Driver Changes:
- More dt-bindings YAML conversions
- More removal of drmP.h includes
- dw-hdmi: Support get_eld and various i2s improvements
- gm12u320: Few fixes
- meson: Global cleanup
- panfrost: Few refactors, Support for GPU heap allocations
- sun4i: Support for DDC enable GPIO
- New panels: TI nspire, NEC NL8048HL11, LG Philips LB035Q02,
Sharp LS037V7DW01, Sony ACX565AKM, Toppoly TD028TTEC1
Toppoly TD043MTEA1
Signed-off-by: Dave Airlie <airlied@redhat.com>
[airlied: fixup dma_resv rename fallout]
From: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190819141923.7l2adietcr2pioct@flea
2019-08-21 15:38:43 +10:00
dma_resv_assert_held ( res - > backup - > base . base . resv ) ;
2019-03-04 19:37:40 +01:00
res - > used_prio = ( res - > res_dirty ) ? res - > func - > dirty_prio :
res - > func - > prio ;
2019-03-05 08:24:35 +01:00
while ( * new ) {
struct vmw_resource * this =
container_of ( * new , struct vmw_resource , mob_node ) ;
parent = * new ;
new = ( res - > backup_offset < this - > backup_offset ) ?
& ( ( * new ) - > rb_left ) : & ( ( * new ) - > rb_right ) ;
}
rb_link_node ( & res - > mob_node , parent , new ) ;
rb_insert_color ( & res - > mob_node , & backup - > res_tree ) ;
2019-03-04 19:37:40 +01:00
vmw_bo_prio_add ( backup , res - > used_prio ) ;
}
/**
* vmw_resource_mob_detach - Mark a resource as detached from its backing mob
* @ res : The resource
*/
void vmw_resource_mob_detach ( struct vmw_resource * res )
{
struct vmw_buffer_object * backup = res - > backup ;
drm-misc-next for 5.4:
UAPI Changes:
Cross-subsystem Changes:
Core Changes:
- dma-buf: add reservation_object_fences helper, relax
reservation_object_add_shared_fence, remove
reservation_object seq number (and then
restored)
- dma-fence: Shrinkage of the dma_fence structure,
Merge dma_fence_signal and dma_fence_signal_locked,
Store the timestamp in struct dma_fence in a union with
cb_list
Driver Changes:
- More dt-bindings YAML conversions
- More removal of drmP.h includes
- dw-hdmi: Support get_eld and various i2s improvements
- gm12u320: Few fixes
- meson: Global cleanup
- panfrost: Few refactors, Support for GPU heap allocations
- sun4i: Support for DDC enable GPIO
- New panels: TI nspire, NEC NL8048HL11, LG Philips LB035Q02,
Sharp LS037V7DW01, Sony ACX565AKM, Toppoly TD028TTEC1
Toppoly TD043MTEA1
-----BEGIN PGP SIGNATURE-----
iHUEABYIAB0WIQRcEzekXsqa64kGDp7j7w1vZxhRxQUCXVqvpwAKCRDj7w1vZxhR
xa3RAQDzAnt5zeesAxX4XhRJzHoCEwj2PJj9Re6xMJ9PlcfcvwD+OS+bcB6jfiXV
Ug9IBd/DqjlmD9G9MxFxfSV946rksAw=
=8uv4
-----END PGP SIGNATURE-----
Merge tag 'drm-misc-next-2019-08-19' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.4:
UAPI Changes:
Cross-subsystem Changes:
Core Changes:
- dma-buf: add reservation_object_fences helper, relax
reservation_object_add_shared_fence, remove
reservation_object seq number (and then
restored)
- dma-fence: Shrinkage of the dma_fence structure,
Merge dma_fence_signal and dma_fence_signal_locked,
Store the timestamp in struct dma_fence in a union with
cb_list
Driver Changes:
- More dt-bindings YAML conversions
- More removal of drmP.h includes
- dw-hdmi: Support get_eld and various i2s improvements
- gm12u320: Few fixes
- meson: Global cleanup
- panfrost: Few refactors, Support for GPU heap allocations
- sun4i: Support for DDC enable GPIO
- New panels: TI nspire, NEC NL8048HL11, LG Philips LB035Q02,
Sharp LS037V7DW01, Sony ACX565AKM, Toppoly TD028TTEC1
Toppoly TD043MTEA1
Signed-off-by: Dave Airlie <airlied@redhat.com>
[airlied: fixup dma_resv rename fallout]
From: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190819141923.7l2adietcr2pioct@flea
2019-08-21 15:38:43 +10:00
dma_resv_assert_held ( backup - > base . base . resv ) ;
2019-03-04 19:37:40 +01:00
if ( vmw_resource_mob_attached ( res ) ) {
2019-03-05 08:24:35 +01:00
rb_erase ( & res - > mob_node , & backup - > res_tree ) ;
RB_CLEAR_NODE ( & res - > mob_node ) ;
2019-03-04 19:37:40 +01:00
vmw_bo_prio_del ( backup , res - > used_prio ) ;
}
}
2009-12-10 00:19:58 +00:00
struct vmw_resource * vmw_resource_reference ( struct vmw_resource * res )
{
kref_get ( & res - > kref ) ;
return res ;
}
2014-02-05 08:13:56 +01:00
struct vmw_resource *
vmw_resource_reference_unless_doomed ( struct vmw_resource * res )
{
return kref_get_unless_zero ( & res - > kref ) ? res : NULL ;
}
2011-10-04 20:13:33 +02:00
/**
* vmw_resource_release_id - release a resource id to the id manager .
*
* @ res : Pointer to the resource .
*
* Release the resource id to the resource id manager and set it to - 1
*/
2012-11-20 12:19:36 +00:00
void vmw_resource_release_id ( struct vmw_resource * res )
2011-10-04 20:13:33 +02:00
{
struct vmw_private * dev_priv = res - > dev_priv ;
2012-11-20 12:19:35 +00:00
struct idr * idr = & dev_priv - > res_idr [ res - > func - > res_type ] ;
2011-10-04 20:13:33 +02:00
2018-09-26 15:41:52 +02:00
spin_lock ( & dev_priv - > resource_lock ) ;
2011-10-04 20:13:33 +02:00
if ( res - > id ! = - 1 )
2012-11-20 12:19:35 +00:00
idr_remove ( idr , res - > id ) ;
2011-10-04 20:13:33 +02:00
res - > id = - 1 ;
2018-09-26 15:41:52 +02:00
spin_unlock ( & dev_priv - > resource_lock ) ;
2011-10-04 20:13:33 +02:00
}
2009-12-10 00:19:58 +00:00
static void vmw_resource_release ( struct kref * kref )
{
struct vmw_resource * res =
container_of ( kref , struct vmw_resource , kref ) ;
struct vmw_private * dev_priv = res - > dev_priv ;
2012-11-20 12:19:35 +00:00
int id ;
struct idr * idr = & dev_priv - > res_idr [ res - > func - > res_type ] ;
2009-12-10 00:19:58 +00:00
2018-09-26 15:41:52 +02:00
spin_lock ( & dev_priv - > resource_lock ) ;
2012-11-20 12:19:35 +00:00
list_del_init ( & res - > lru_head ) ;
2018-09-26 15:41:52 +02:00
spin_unlock ( & dev_priv - > resource_lock ) ;
2012-11-20 12:19:35 +00:00
if ( res - > backup ) {
struct ttm_buffer_object * bo = & res - > backup - > base ;
2016-04-06 11:12:03 +02:00
ttm_bo_reserve ( bo , false , false , NULL ) ;
2019-03-04 19:37:40 +01:00
if ( vmw_resource_mob_attached ( res ) & &
2012-11-20 12:19:35 +00:00
res - > func - > unbind ! = NULL ) {
struct ttm_validate_buffer val_buf ;
val_buf . bo = bo ;
2018-09-19 16:25:08 +02:00
val_buf . num_shared = 0 ;
2012-11-20 12:19:35 +00:00
res - > func - > unbind ( res , false , & val_buf ) ;
}
res - > backup_dirty = false ;
2019-03-04 19:37:40 +01:00
vmw_resource_mob_detach ( res ) ;
2019-03-27 10:56:08 +01:00
if ( res - > dirty )
res - > func - > dirty_free ( res ) ;
if ( res - > coherent )
vmw_bo_dirty_release ( res - > backup ) ;
2012-11-20 12:19:35 +00:00
ttm_bo_unreserve ( bo ) ;
2018-06-19 15:02:16 +02:00
vmw_bo_unreference ( & res - > backup ) ;
2012-11-20 12:19:35 +00:00
}
2009-12-10 00:19:58 +00:00
2014-02-05 08:13:56 +01:00
if ( likely ( res - > hw_destroy ! = NULL ) ) {
mutex_lock ( & dev_priv - > binding_mutex ) ;
2015-08-10 10:39:35 -07:00
vmw_binding_res_list_kill ( & res - > binding_head ) ;
2014-02-05 08:13:56 +01:00
mutex_unlock ( & dev_priv - > binding_mutex ) ;
2015-08-10 10:39:35 -07:00
res - > hw_destroy ( res ) ;
2014-02-05 08:13:56 +01:00
}
2009-12-10 00:19:58 +00:00
2012-11-20 12:19:35 +00:00
id = res - > id ;
2009-12-10 00:19:58 +00:00
if ( res - > res_free ! = NULL )
res - > res_free ( res ) ;
else
kfree ( res ) ;
2018-09-26 15:41:52 +02:00
spin_lock ( & dev_priv - > resource_lock ) ;
2011-10-04 20:13:33 +02:00
if ( id ! = - 1 )
idr_remove ( idr , id ) ;
2018-09-26 15:41:52 +02:00
spin_unlock ( & dev_priv - > resource_lock ) ;
2009-12-10 00:19:58 +00:00
}
void vmw_resource_unreference ( struct vmw_resource * * p_res )
{
struct vmw_resource * res = * p_res ;
* p_res = NULL ;
kref_put ( & res - > kref , vmw_resource_release ) ;
}
2011-10-04 20:13:33 +02:00
/**
* vmw_resource_alloc_id - release a resource id to the id manager .
*
* @ res : Pointer to the resource .
*
* Allocate the lowest free resource from the resource manager , and set
* @ res - > id to that id . Returns 0 on success and - ENOMEM on failure .
*/
2012-11-20 12:19:36 +00:00
int vmw_resource_alloc_id ( struct vmw_resource * res )
2011-10-04 20:13:33 +02:00
{
2012-11-20 12:19:35 +00:00
struct vmw_private * dev_priv = res - > dev_priv ;
2011-10-04 20:13:33 +02:00
int ret ;
2012-11-20 12:19:35 +00:00
struct idr * idr = & dev_priv - > res_idr [ res - > func - > res_type ] ;
2011-10-04 20:13:33 +02:00
BUG_ON ( res - > id ! = - 1 ) ;
2013-02-27 17:04:14 -08:00
idr_preload ( GFP_KERNEL ) ;
2018-09-26 15:41:52 +02:00
spin_lock ( & dev_priv - > resource_lock ) ;
2011-10-04 20:13:33 +02:00
2013-02-27 17:04:14 -08:00
ret = idr_alloc ( idr , res , 1 , 0 , GFP_NOWAIT ) ;
if ( ret > = 0 )
res - > id = ret ;
2011-10-04 20:13:33 +02:00
2018-09-26 15:41:52 +02:00
spin_unlock ( & dev_priv - > resource_lock ) ;
2013-02-27 17:04:14 -08:00
idr_preload_end ( ) ;
return ret < 0 ? ret : 0 ;
2011-10-04 20:13:33 +02:00
}
2012-11-20 12:19:35 +00:00
/**
* vmw_resource_init - initialize a struct vmw_resource
*
* @ dev_priv : Pointer to a device private struct .
* @ res : The struct vmw_resource to initialize .
* @ obj_type : Resource object type .
* @ delay_id : Boolean whether to defer device id allocation until
* the first validation .
* @ res_free : Resource destructor .
* @ func : Resource function table .
*/
2012-11-20 12:19:36 +00:00
int vmw_resource_init ( struct vmw_private * dev_priv , struct vmw_resource * res ,
bool delay_id ,
void ( * res_free ) ( struct vmw_resource * res ) ,
const struct vmw_res_func * func )
2009-12-10 00:19:58 +00:00
{
kref_init ( & res - > kref ) ;
res - > hw_destroy = NULL ;
res - > res_free = res_free ;
res - > dev_priv = dev_priv ;
2012-11-20 12:19:35 +00:00
res - > func = func ;
2019-03-05 08:24:35 +01:00
RB_CLEAR_NODE ( & res - > mob_node ) ;
2012-11-20 12:19:35 +00:00
INIT_LIST_HEAD ( & res - > lru_head ) ;
2013-10-08 02:32:36 -07:00
INIT_LIST_HEAD ( & res - > binding_head ) ;
2011-10-04 20:13:33 +02:00
res - > id = - 1 ;
2012-11-20 12:19:35 +00:00
res - > backup = NULL ;
res - > backup_offset = 0 ;
res - > backup_dirty = false ;
res - > res_dirty = false ;
2019-03-27 10:56:08 +01:00
res - > coherent = false ;
2019-03-04 19:37:40 +01:00
res - > used_prio = 3 ;
2019-03-27 10:56:08 +01:00
res - > dirty = NULL ;
2011-10-04 20:13:33 +02:00
if ( delay_id )
return 0 ;
else
2012-11-20 12:19:35 +00:00
return vmw_resource_alloc_id ( res ) ;
2009-12-10 00:19:58 +00:00
}
2012-11-20 12:19:35 +00:00
/**
* vmw_user_resource_lookup_handle - lookup a struct resource from a
* TTM user - space handle and perform basic type checks
*
* @ dev_priv : Pointer to a device private struct
* @ tfile : Pointer to a struct ttm_object_file identifying the caller
* @ handle : The TTM user - space handle
* @ converter : Pointer to an object describing the resource type
* @ p_res : On successful return the location pointed to will contain
* a pointer to a refcounted struct vmw_resource .
*
* If the handle can ' t be found or is associated with an incorrect resource
* type , - EINVAL will be returned .
*/
int vmw_user_resource_lookup_handle ( struct vmw_private * dev_priv ,
struct ttm_object_file * tfile ,
uint32_t handle ,
const struct vmw_user_resource_conv
* converter ,
struct vmw_resource * * p_res )
2009-12-10 00:19:58 +00:00
{
2009-12-22 16:53:41 +01:00
struct ttm_base_object * base ;
2012-11-20 12:19:35 +00:00
struct vmw_resource * res ;
int ret = - EINVAL ;
2009-12-10 00:19:58 +00:00
2009-12-22 16:53:41 +01:00
base = ttm_base_object_lookup ( tfile , handle ) ;
if ( unlikely ( base = = NULL ) )
return - EINVAL ;
2013-11-08 02:12:51 -08:00
if ( unlikely ( ttm_base_object_type ( base ) ! = converter - > object_type ) )
2012-11-20 12:19:35 +00:00
goto out_bad_resource ;
2009-12-22 16:53:41 +01:00
2012-11-20 12:19:35 +00:00
res = converter - > base_obj_to_res ( base ) ;
kref_get ( & res - > kref ) ;
* p_res = res ;
ret = 0 ;
out_bad_resource :
2009-12-22 16:53:41 +01:00
ttm_base_object_unref ( & base ) ;
2012-11-20 12:19:35 +00:00
return ret ;
}
2018-09-26 16:32:40 +02:00
/**
* vmw_user_resource_lookup_handle - lookup a struct resource from a
* TTM user - space handle and perform basic type checks
*
* @ dev_priv : Pointer to a device private struct
* @ tfile : Pointer to a struct ttm_object_file identifying the caller
* @ handle : The TTM user - space handle
* @ converter : Pointer to an object describing the resource type
* @ p_res : On successful return the location pointed to will contain
* a pointer to a refcounted struct vmw_resource .
*
* If the handle can ' t be found or is associated with an incorrect resource
* type , - EINVAL will be returned .
*/
struct vmw_resource *
vmw_user_resource_noref_lookup_handle ( struct vmw_private * dev_priv ,
struct ttm_object_file * tfile ,
uint32_t handle ,
const struct vmw_user_resource_conv
* converter )
{
struct ttm_base_object * base ;
base = ttm_base_object_noref_lookup ( tfile , handle ) ;
if ( ! base )
return ERR_PTR ( - ESRCH ) ;
if ( unlikely ( ttm_base_object_type ( base ) ! = converter - > object_type ) ) {
ttm_base_object_noref_release ( ) ;
return ERR_PTR ( - EINVAL ) ;
}
return converter - > base_obj_to_res ( base ) ;
}
2012-11-20 12:19:35 +00:00
/**
2018-06-19 15:02:16 +02:00
* Helper function that looks either a surface or bo .
2012-11-20 12:19:35 +00:00
*
* The pointer this pointed at by out_surf and out_buf needs to be null .
*/
int vmw_user_lookup_handle ( struct vmw_private * dev_priv ,
struct ttm_object_file * tfile ,
uint32_t handle ,
struct vmw_surface * * out_surf ,
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * * out_buf )
2012-11-20 12:19:35 +00:00
{
struct vmw_resource * res ;
int ret ;
BUG_ON ( * out_surf | | * out_buf ) ;
ret = vmw_user_resource_lookup_handle ( dev_priv , tfile , handle ,
user_surface_converter ,
& res ) ;
if ( ! ret ) {
* out_surf = vmw_res_to_srf ( res ) ;
return 0 ;
}
* out_surf = NULL ;
2018-06-19 15:02:16 +02:00
ret = vmw_user_bo_lookup ( tfile , handle , out_buf , NULL ) ;
2009-12-10 00:19:58 +00:00
return ret ;
}
2012-11-20 12:19:35 +00:00
/**
* vmw_resource_buf_alloc - Allocate a backup buffer for a resource .
*
* @ res : The resource for which to allocate a backup buffer .
* @ interruptible : Whether any sleeps during allocation should be
* performed while interruptible .
*/
static int vmw_resource_buf_alloc ( struct vmw_resource * res ,
bool interruptible )
{
unsigned long size =
( res - > backup_size + PAGE_SIZE - 1 ) & PAGE_MASK ;
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * backup ;
2012-11-20 12:19:35 +00:00
int ret ;
if ( likely ( res - > backup ) ) {
BUG_ON ( res - > backup - > base . num_pages * PAGE_SIZE < size ) ;
return 0 ;
}
backup = kzalloc ( sizeof ( * backup ) , GFP_KERNEL ) ;
2016-11-08 17:30:31 +05:30
if ( unlikely ( ! backup ) )
2012-11-20 12:19:35 +00:00
return - ENOMEM ;
2018-06-19 15:02:16 +02:00
ret = vmw_bo_init ( res - > dev_priv , backup , res - > backup_size ,
2012-11-20 12:19:35 +00:00
res - > func - > backup_placement ,
interruptible ,
2018-06-19 15:02:16 +02:00
& vmw_bo_bo_free ) ;
2012-11-20 12:19:35 +00:00
if ( unlikely ( ret ! = 0 ) )
2018-06-19 15:02:16 +02:00
goto out_no_bo ;
2012-11-20 12:19:35 +00:00
res - > backup = backup ;
2018-06-19 15:02:16 +02:00
out_no_bo :
2012-11-20 12:19:35 +00:00
return ret ;
}
/**
* vmw_resource_do_validate - Make a resource up - to - date and visible
* to the device .
*
* @ res : The resource to make visible to the device .
* @ val_buf : Information about a buffer possibly
* containing backup data if a bind operation is needed .
*
* On hardware resource shortage , this function returns - EBUSY and
* should be retried once resources have been freed up .
*/
static int vmw_resource_do_validate ( struct vmw_resource * res ,
struct ttm_validate_buffer * val_buf )
{
int ret = 0 ;
const struct vmw_res_func * func = res - > func ;
if ( unlikely ( res - > id = = - 1 ) ) {
ret = func - > create ( res ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
}
if ( func - > bind & &
2019-03-04 19:37:40 +01:00
( ( func - > needs_backup & & ! vmw_resource_mob_attached ( res ) & &
2012-11-20 12:19:35 +00:00
val_buf - > bo ! = NULL ) | |
( ! func - > needs_backup & & val_buf - > bo ! = NULL ) ) ) {
ret = func - > bind ( res , val_buf ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_bind_failed ;
if ( func - > needs_backup )
2019-03-04 19:37:40 +01:00
vmw_resource_mob_attach ( res ) ;
2012-11-20 12:19:35 +00:00
}
2019-03-27 10:56:08 +01:00
/*
* Handle the case where the backup mob is marked coherent but
* the resource isn ' t .
*/
if ( func - > dirty_alloc & & vmw_resource_mob_attached ( res ) & &
! res - > coherent ) {
if ( res - > backup - > dirty & & ! res - > dirty ) {
ret = func - > dirty_alloc ( res ) ;
if ( ret )
return ret ;
} else if ( ! res - > backup - > dirty & & res - > dirty ) {
func - > dirty_free ( res ) ;
}
}
/*
* Transfer the dirty regions to the resource and update
* the resource .
*/
if ( res - > dirty ) {
vmw_bo_dirty_transfer_to_res ( res ) ;
return func - > dirty_sync ( res ) ;
}
2012-11-20 12:19:35 +00:00
return 0 ;
out_bind_failed :
func - > destroy ( res ) ;
return ret ;
}
/**
* vmw_resource_unreserve - Unreserve a resource previously reserved for
* command submission .
*
* @ res : Pointer to the struct vmw_resource to unreserve .
2019-02-20 08:21:26 +01:00
* @ dirty_set : Change dirty status of the resource .
* @ dirty : When changing dirty status indicates the new status .
2015-08-10 10:39:35 -07:00
* @ switch_backup : Backup buffer has been switched .
2012-11-20 12:19:35 +00:00
* @ new_backup : Pointer to new backup buffer if command submission
2015-08-10 10:39:35 -07:00
* switched . May be NULL .
* @ new_backup_offset : New backup offset if @ switch_backup is true .
2012-11-20 12:19:35 +00:00
*
* Currently unreserving a resource means putting it back on the device ' s
* resource lru list , so that it can be evicted if necessary .
*/
void vmw_resource_unreserve ( struct vmw_resource * res ,
2019-02-20 08:21:26 +01:00
bool dirty_set ,
bool dirty ,
2015-08-10 10:39:35 -07:00
bool switch_backup ,
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * new_backup ,
2012-11-20 12:19:35 +00:00
unsigned long new_backup_offset )
{
struct vmw_private * dev_priv = res - > dev_priv ;
if ( ! list_empty ( & res - > lru_head ) )
return ;
2015-08-10 10:39:35 -07:00
if ( switch_backup & & new_backup ! = res - > backup ) {
2012-11-20 12:19:35 +00:00
if ( res - > backup ) {
2019-03-04 19:37:40 +01:00
vmw_resource_mob_detach ( res ) ;
2019-03-27 10:56:08 +01:00
if ( res - > coherent )
vmw_bo_dirty_release ( res - > backup ) ;
2018-06-19 15:02:16 +02:00
vmw_bo_unreference ( & res - > backup ) ;
2012-11-20 12:19:35 +00:00
}
2015-08-10 10:39:35 -07:00
if ( new_backup ) {
2018-06-19 15:02:16 +02:00
res - > backup = vmw_bo_reference ( new_backup ) ;
2019-03-27 10:56:08 +01:00
/*
* The validation code should already have added a
* dirty tracker here .
*/
WARN_ON ( res - > coherent & & ! new_backup - > dirty ) ;
2019-03-04 19:37:40 +01:00
vmw_resource_mob_attach ( res ) ;
2015-08-10 10:39:35 -07:00
} else {
res - > backup = NULL ;
}
2019-03-27 10:56:08 +01:00
} else if ( switch_backup & & res - > coherent ) {
vmw_bo_dirty_release ( res - > backup ) ;
2012-11-20 12:19:35 +00:00
}
2019-03-27 10:56:08 +01:00
2015-08-10 10:39:35 -07:00
if ( switch_backup )
2012-11-20 12:19:35 +00:00
res - > backup_offset = new_backup_offset ;
2019-02-20 08:21:26 +01:00
if ( dirty_set )
res - > res_dirty = dirty ;
2015-03-02 23:26:06 -08:00
if ( ! res - > func - > may_evict | | res - > id = = - 1 | | res - > pin_count )
2012-11-20 12:19:35 +00:00
return ;
2018-09-26 15:41:52 +02:00
spin_lock ( & dev_priv - > resource_lock ) ;
2012-11-20 12:19:35 +00:00
list_add_tail ( & res - > lru_head ,
& res - > dev_priv - > res_lru [ res - > func - > res_type ] ) ;
2018-09-26 15:41:52 +02:00
spin_unlock ( & dev_priv - > resource_lock ) ;
2012-11-20 12:19:35 +00:00
}
/**
* vmw_resource_check_buffer - Check whether a backup buffer is needed
* for a resource and in that case , allocate
* one , reserve and validate it .
*
2018-06-19 19:22:16 +02:00
* @ ticket : The ww aqcquire context to use , or NULL if trylocking .
2012-11-20 12:19:35 +00:00
* @ res : The resource for which to allocate a backup buffer .
* @ interruptible : Whether any sleeps during allocation should be
* performed while interruptible .
* @ val_buf : On successful return contains data about the
* reserved and validated backup buffer .
*/
2013-06-27 13:48:17 +02:00
static int
2018-06-19 19:22:16 +02:00
vmw_resource_check_buffer ( struct ww_acquire_ctx * ticket ,
struct vmw_resource * res ,
2013-06-27 13:48:17 +02:00
bool interruptible ,
struct ttm_validate_buffer * val_buf )
2012-11-20 12:19:35 +00:00
{
2017-04-12 14:24:39 +02:00
struct ttm_operation_ctx ctx = { true , false } ;
2012-11-20 12:19:35 +00:00
struct list_head val_list ;
bool backup_dirty = false ;
int ret ;
if ( unlikely ( res - > backup = = NULL ) ) {
ret = vmw_resource_buf_alloc ( res , interruptible ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
}
INIT_LIST_HEAD ( & val_list ) ;
2019-01-25 12:02:08 +01:00
ttm_bo_get ( & res - > backup - > base ) ;
val_buf - > bo = & res - > backup - > base ;
2018-09-19 16:25:08 +02:00
val_buf - > num_shared = 0 ;
2012-11-20 12:19:35 +00:00
list_add_tail ( & val_buf - > head , & val_list ) ;
2019-09-19 12:56:15 +02:00
ret = ttm_eu_reserve_buffers ( ticket , & val_list , interruptible , NULL ) ;
2012-11-20 12:19:35 +00:00
if ( unlikely ( ret ! = 0 ) )
goto out_no_reserve ;
2019-03-04 19:37:40 +01:00
if ( res - > func - > needs_backup & & ! vmw_resource_mob_attached ( res ) )
2012-11-20 12:19:35 +00:00
return 0 ;
backup_dirty = res - > backup_dirty ;
ret = ttm_bo_validate ( & res - > backup - > base ,
res - > func - > backup_placement ,
2017-04-12 14:24:39 +02:00
& ctx ) ;
2012-11-20 12:19:35 +00:00
if ( unlikely ( ret ! = 0 ) )
goto out_no_validate ;
return 0 ;
out_no_validate :
2018-06-19 19:22:16 +02:00
ttm_eu_backoff_reservation ( ticket , & val_list ) ;
2012-11-20 12:19:35 +00:00
out_no_reserve :
2019-01-25 12:02:09 +01:00
ttm_bo_put ( val_buf - > bo ) ;
val_buf - > bo = NULL ;
2012-11-20 12:19:35 +00:00
if ( backup_dirty )
2018-06-19 15:02:16 +02:00
vmw_bo_unreference ( & res - > backup ) ;
2012-11-20 12:19:35 +00:00
return ret ;
}
/**
* vmw_resource_reserve - Reserve a resource for command submission
*
* @ res : The resource to reserve .
*
* This function takes the resource off the LRU list and make sure
* a backup buffer is present for guest - backed resources . However ,
* the buffer may not be bound to the resource at this point .
*
*/
2015-06-26 02:03:53 -07:00
int vmw_resource_reserve ( struct vmw_resource * res , bool interruptible ,
bool no_backup )
2012-11-20 12:19:35 +00:00
{
struct vmw_private * dev_priv = res - > dev_priv ;
int ret ;
2018-09-26 15:41:52 +02:00
spin_lock ( & dev_priv - > resource_lock ) ;
2012-11-20 12:19:35 +00:00
list_del_init ( & res - > lru_head ) ;
2018-09-26 15:41:52 +02:00
spin_unlock ( & dev_priv - > resource_lock ) ;
2012-11-20 12:19:35 +00:00
if ( res - > func - > needs_backup & & res - > backup = = NULL & &
! no_backup ) {
2015-06-26 02:03:53 -07:00
ret = vmw_resource_buf_alloc ( res , interruptible ) ;
2015-08-10 10:39:35 -07:00
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Failed to allocate a backup buffer "
" of size %lu. bytes \n " ,
( unsigned long ) res - > backup_size ) ;
2012-11-20 12:19:35 +00:00
return ret ;
2015-08-10 10:39:35 -07:00
}
2012-11-20 12:19:35 +00:00
}
return 0 ;
}
/**
* vmw_resource_backoff_reservation - Unreserve and unreference a
* backup buffer
* .
2018-06-19 19:22:16 +02:00
* @ ticket : The ww acquire ctx used for reservation .
2012-11-20 12:19:35 +00:00
* @ val_buf : Backup buffer information .
*/
2013-06-27 13:48:17 +02:00
static void
2018-06-19 19:22:16 +02:00
vmw_resource_backoff_reservation ( struct ww_acquire_ctx * ticket ,
struct ttm_validate_buffer * val_buf )
2012-11-20 12:19:35 +00:00
{
struct list_head val_list ;
if ( likely ( val_buf - > bo = = NULL ) )
return ;
INIT_LIST_HEAD ( & val_list ) ;
list_add_tail ( & val_buf - > head , & val_list ) ;
2018-06-19 19:22:16 +02:00
ttm_eu_backoff_reservation ( ticket , & val_list ) ;
2019-01-25 12:02:09 +01:00
ttm_bo_put ( val_buf - > bo ) ;
val_buf - > bo = NULL ;
2012-11-20 12:19:35 +00:00
}
/**
* vmw_resource_do_evict - Evict a resource , and transfer its data
* to a backup buffer .
*
2018-06-19 19:22:16 +02:00
* @ ticket : The ww acquire ticket to use , or NULL if trylocking .
2012-11-20 12:19:35 +00:00
* @ res : The resource to evict .
2013-11-12 00:09:54 -08:00
* @ interruptible : Whether to wait interruptible .
2012-11-20 12:19:35 +00:00
*/
2018-06-19 19:22:16 +02:00
static int vmw_resource_do_evict ( struct ww_acquire_ctx * ticket ,
struct vmw_resource * res , bool interruptible )
2012-11-20 12:19:35 +00:00
{
struct ttm_validate_buffer val_buf ;
const struct vmw_res_func * func = res - > func ;
int ret ;
BUG_ON ( ! func - > may_evict ) ;
val_buf . bo = NULL ;
2018-09-19 16:25:08 +02:00
val_buf . num_shared = 0 ;
2018-06-19 19:22:16 +02:00
ret = vmw_resource_check_buffer ( ticket , res , interruptible , & val_buf ) ;
2012-11-20 12:19:35 +00:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
if ( unlikely ( func - > unbind ! = NULL & &
2019-03-04 19:37:40 +01:00
( ! func - > needs_backup | | vmw_resource_mob_attached ( res ) ) ) ) {
2012-11-20 12:19:35 +00:00
ret = func - > unbind ( res , res - > res_dirty , & val_buf ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_no_unbind ;
2019-03-04 19:37:40 +01:00
vmw_resource_mob_detach ( res ) ;
2012-11-20 12:19:35 +00:00
}
ret = func - > destroy ( res ) ;
res - > backup_dirty = true ;
res - > res_dirty = false ;
out_no_unbind :
2018-06-19 19:22:16 +02:00
vmw_resource_backoff_reservation ( ticket , & val_buf ) ;
2012-11-20 12:19:35 +00:00
return ret ;
}
/**
* vmw_resource_validate - Make a resource up - to - date and visible
* to the device .
2018-09-26 15:22:54 +02:00
* @ res : The resource to make visible to the device .
* @ intr : Perform waits interruptible if possible .
2012-11-20 12:19:35 +00:00
*
* On succesful return , any backup DMA buffer pointed to by @ res - > backup will
* be reserved and validated .
* On hardware resource shortage , this function will repeatedly evict
* resources of the same type until the validation succeeds .
2018-09-26 15:22:54 +02:00
*
* Return : Zero on success , - ERESTARTSYS if interrupted , negative error code
* on failure .
2012-11-20 12:19:35 +00:00
*/
2018-09-26 15:22:54 +02:00
int vmw_resource_validate ( struct vmw_resource * res , bool intr )
2012-11-20 12:19:35 +00:00
{
int ret ;
struct vmw_resource * evict_res ;
struct vmw_private * dev_priv = res - > dev_priv ;
struct list_head * lru_list = & dev_priv - > res_lru [ res - > func - > res_type ] ;
struct ttm_validate_buffer val_buf ;
2013-11-12 00:09:54 -08:00
unsigned err_count = 0 ;
2012-11-20 12:19:35 +00:00
2015-08-10 10:39:35 -07:00
if ( ! res - > func - > create )
2012-11-20 12:19:35 +00:00
return 0 ;
val_buf . bo = NULL ;
2018-09-19 16:25:08 +02:00
val_buf . num_shared = 0 ;
2012-11-20 12:19:35 +00:00
if ( res - > backup )
val_buf . bo = & res - > backup - > base ;
do {
ret = vmw_resource_do_validate ( res , & val_buf ) ;
if ( likely ( ret ! = - EBUSY ) )
break ;
2018-09-26 15:41:52 +02:00
spin_lock ( & dev_priv - > resource_lock ) ;
2012-11-20 12:19:35 +00:00
if ( list_empty ( lru_list ) | | ! res - > func - > may_evict ) {
2013-11-12 00:09:54 -08:00
DRM_ERROR ( " Out of device device resources "
2012-11-20 12:19:35 +00:00
" for %s. \n " , res - > func - > type_name ) ;
ret = - EBUSY ;
2018-09-26 15:41:52 +02:00
spin_unlock ( & dev_priv - > resource_lock ) ;
2012-11-20 12:19:35 +00:00
break ;
}
evict_res = vmw_resource_reference
( list_first_entry ( lru_list , struct vmw_resource ,
lru_head ) ) ;
list_del_init ( & evict_res - > lru_head ) ;
2018-09-26 15:41:52 +02:00
spin_unlock ( & dev_priv - > resource_lock ) ;
2013-11-12 00:09:54 -08:00
2018-06-19 19:22:16 +02:00
/* Trylock backup buffers with a NULL ticket. */
2018-09-26 15:22:54 +02:00
ret = vmw_resource_do_evict ( NULL , evict_res , intr ) ;
2013-11-12 00:09:54 -08:00
if ( unlikely ( ret ! = 0 ) ) {
2018-09-26 15:41:52 +02:00
spin_lock ( & dev_priv - > resource_lock ) ;
2013-11-12 00:09:54 -08:00
list_add_tail ( & evict_res - > lru_head , lru_list ) ;
2018-09-26 15:41:52 +02:00
spin_unlock ( & dev_priv - > resource_lock ) ;
2013-11-12 00:09:54 -08:00
if ( ret = = - ERESTARTSYS | |
+ + err_count > VMW_RES_EVICT_ERR_COUNT ) {
vmw_resource_unreference ( & evict_res ) ;
goto out_no_validate ;
}
}
2012-11-20 12:19:35 +00:00
vmw_resource_unreference ( & evict_res ) ;
} while ( 1 ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_no_validate ;
else if ( ! res - > func - > needs_backup & & res - > backup ) {
2019-03-04 19:37:40 +01:00
WARN_ON_ONCE ( vmw_resource_mob_attached ( res ) ) ;
2018-06-19 15:02:16 +02:00
vmw_bo_unreference ( & res - > backup ) ;
2012-11-20 12:19:35 +00:00
}
return 0 ;
out_no_validate :
return ret ;
}
/**
2018-06-19 15:33:53 +02:00
* vmw_resource_unbind_list
2012-11-20 12:19:35 +00:00
*
2018-06-19 15:33:53 +02:00
* @ vbo : Pointer to the current backing MOB .
2012-11-20 12:19:35 +00:00
*
2012-11-21 11:29:13 +01:00
* Evicts the Guest Backed hardware resource if the backup
* buffer is being moved out of MOB memory .
2018-06-19 15:33:53 +02:00
* Note that this function will not race with the resource
* validation code , since resource validation and eviction
* both require the backup buffer to be reserved .
2012-11-20 12:19:35 +00:00
*/
2018-06-19 15:33:53 +02:00
void vmw_resource_unbind_list ( struct vmw_buffer_object * vbo )
2012-11-20 12:19:35 +00:00
{
2018-06-19 15:33:53 +02:00
struct ttm_validate_buffer val_buf = {
. bo = & vbo - > base ,
2018-09-19 16:25:08 +02:00
. num_shared = 0
2018-06-19 15:33:53 +02:00
} ;
2012-11-21 11:29:13 +01:00
2019-08-11 10:06:32 +02:00
dma_resv_assert_held ( vbo - > base . base . resv ) ;
2019-03-05 08:24:35 +01:00
while ( ! RB_EMPTY_ROOT ( & vbo - > res_tree ) ) {
struct rb_node * node = vbo - > res_tree . rb_node ;
struct vmw_resource * res =
container_of ( node , struct vmw_resource , mob_node ) ;
if ( ! WARN_ON_ONCE ( ! res - > func - > unbind ) )
( void ) res - > func - > unbind ( res , res - > res_dirty , & val_buf ) ;
2012-11-21 11:29:13 +01:00
2018-06-19 15:33:53 +02:00
res - > backup_dirty = true ;
res - > res_dirty = false ;
2019-03-04 19:37:40 +01:00
vmw_resource_mob_detach ( res ) ;
2012-11-21 11:29:13 +01:00
}
2018-03-22 10:19:01 +01:00
2018-06-19 15:33:53 +02:00
( void ) ttm_bo_wait ( & vbo - > base , false , false ) ;
2018-03-22 10:19:01 +01:00
}
2015-08-10 10:56:15 -07:00
/**
* vmw_query_readback_all - Read back cached query states
*
* @ dx_query_mob : Buffer containing the DX query MOB
*
* Read back cached states from the device if they exist . This function
* assumings binding_mutex is held .
*/
2018-06-19 15:02:16 +02:00
int vmw_query_readback_all ( struct vmw_buffer_object * dx_query_mob )
2015-08-10 10:56:15 -07:00
{
struct vmw_resource * dx_query_ctx ;
struct vmw_private * dev_priv ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXReadbackAllQuery body ;
} * cmd ;
/* No query bound, so do nothing */
if ( ! dx_query_mob | | ! dx_query_mob - > dx_query_ctx )
return 0 ;
dx_query_ctx = dx_query_mob - > dx_query_ctx ;
dev_priv = dx_query_ctx - > dev_priv ;
2019-02-14 16:15:39 -08:00
cmd = VMW_FIFO_RESERVE_DX ( dev_priv , sizeof ( * cmd ) , dx_query_ctx - > id ) ;
if ( unlikely ( cmd = = NULL ) )
2015-08-10 10:56:15 -07:00
return - ENOMEM ;
cmd - > header . id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . cid = dx_query_ctx - > id ;
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
/* Triggers a rebind the next time affected context is bound */
dx_query_mob - > dx_query_ctx = NULL ;
return 0 ;
}
/**
* vmw_query_move_notify - Read back cached query states
*
* @ bo : The TTM buffer object about to move .
* @ mem : The memory region @ bo is moving to .
*
* Called before the query MOB is swapped out to read back cached query
* states from the device .
*/
void vmw_query_move_notify ( struct ttm_buffer_object * bo ,
struct ttm_mem_reg * mem )
{
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * dx_query_mob ;
2015-08-10 10:56:15 -07:00
struct ttm_bo_device * bdev = bo - > bdev ;
struct vmw_private * dev_priv ;
dev_priv = container_of ( bdev , struct vmw_private , bdev ) ;
mutex_lock ( & dev_priv - > binding_mutex ) ;
2018-06-19 15:02:16 +02:00
dx_query_mob = container_of ( bo , struct vmw_buffer_object , base ) ;
2015-08-10 10:56:15 -07:00
if ( mem = = NULL | | ! dx_query_mob | | ! dx_query_mob - > dx_query_ctx ) {
mutex_unlock ( & dev_priv - > binding_mutex ) ;
return ;
}
/* If BO is being moved from MOB to system memory */
if ( mem - > mem_type = = TTM_PL_SYSTEM & & bo - > mem . mem_type = = VMW_PL_MOB ) {
struct vmw_fence_obj * fence ;
( void ) vmw_query_readback_all ( dx_query_mob ) ;
mutex_unlock ( & dev_priv - > binding_mutex ) ;
/* Create a fence and attach the BO to it */
( void ) vmw_execbuf_fence_commands ( NULL , dev_priv , & fence , NULL ) ;
2018-06-19 15:33:53 +02:00
vmw_bo_fence_single ( bo , fence ) ;
2015-08-10 10:56:15 -07:00
if ( fence ! = NULL )
vmw_fence_obj_unreference ( & fence ) ;
2016-04-06 11:12:04 +02:00
( void ) ttm_bo_wait ( bo , false , false ) ;
2015-08-10 10:56:15 -07:00
} else
mutex_unlock ( & dev_priv - > binding_mutex ) ;
}
2012-11-20 12:19:35 +00:00
/**
* vmw_resource_needs_backup - Return whether a resource needs a backup buffer .
*
* @ res : The resource being queried .
*/
bool vmw_resource_needs_backup ( const struct vmw_resource * res )
{
return res - > func - > needs_backup ;
}
/**
* vmw_resource_evict_type - Evict all resources of a specific type
*
* @ dev_priv : Pointer to a device private struct
* @ type : The resource type to evict
*
* To avoid thrashing starvation or as part of the hibernation sequence ,
2013-11-12 00:09:54 -08:00
* try to evict all evictable resources of a specific type .
2012-11-20 12:19:35 +00:00
*/
static void vmw_resource_evict_type ( struct vmw_private * dev_priv ,
enum vmw_res_type type )
{
struct list_head * lru_list = & dev_priv - > res_lru [ type ] ;
struct vmw_resource * evict_res ;
2013-11-12 00:09:54 -08:00
unsigned err_count = 0 ;
int ret ;
2018-06-19 19:22:16 +02:00
struct ww_acquire_ctx ticket ;
2012-11-20 12:19:35 +00:00
do {
2018-09-26 15:41:52 +02:00
spin_lock ( & dev_priv - > resource_lock ) ;
2012-11-20 12:19:35 +00:00
if ( list_empty ( lru_list ) )
goto out_unlock ;
evict_res = vmw_resource_reference (
list_first_entry ( lru_list , struct vmw_resource ,
lru_head ) ) ;
list_del_init ( & evict_res - > lru_head ) ;
2018-09-26 15:41:52 +02:00
spin_unlock ( & dev_priv - > resource_lock ) ;
2013-11-12 00:09:54 -08:00
2018-06-19 19:22:16 +02:00
/* Wait lock backup buffers with a ticket. */
ret = vmw_resource_do_evict ( & ticket , evict_res , false ) ;
2013-11-12 00:09:54 -08:00
if ( unlikely ( ret ! = 0 ) ) {
2018-09-26 15:41:52 +02:00
spin_lock ( & dev_priv - > resource_lock ) ;
2013-11-12 00:09:54 -08:00
list_add_tail ( & evict_res - > lru_head , lru_list ) ;
2018-09-26 15:41:52 +02:00
spin_unlock ( & dev_priv - > resource_lock ) ;
2013-11-12 00:09:54 -08:00
if ( + + err_count > VMW_RES_EVICT_ERR_COUNT ) {
vmw_resource_unreference ( & evict_res ) ;
return ;
}
}
2012-11-20 12:19:35 +00:00
vmw_resource_unreference ( & evict_res ) ;
} while ( 1 ) ;
out_unlock :
2018-09-26 15:41:52 +02:00
spin_unlock ( & dev_priv - > resource_lock ) ;
2012-11-20 12:19:35 +00:00
}
/**
* vmw_resource_evict_all - Evict all evictable resources
*
* @ dev_priv : Pointer to a device private struct
*
* To avoid thrashing starvation or as part of the hibernation sequence ,
* evict all evictable resources . In particular this means that all
* guest - backed resources that are registered with the device are
* evicted and the OTable becomes clean .
*/
void vmw_resource_evict_all ( struct vmw_private * dev_priv )
{
enum vmw_res_type type ;
mutex_lock ( & dev_priv - > cmdbuf_mutex ) ;
for ( type = 0 ; type < vmw_res_max ; + + type )
vmw_resource_evict_type ( dev_priv , type ) ;
mutex_unlock ( & dev_priv - > cmdbuf_mutex ) ;
}
2015-03-02 23:26:06 -08:00
/**
* vmw_resource_pin - Add a pin reference on a resource
*
* @ res : The resource to add a pin reference on
*
* This function adds a pin reference , and if needed validates the resource .
* Having a pin reference means that the resource can never be evicted , and
* its id will never change as long as there is a pin reference .
* This function returns 0 on success and a negative error code on failure .
*/
2015-06-26 02:03:53 -07:00
int vmw_resource_pin ( struct vmw_resource * res , bool interruptible )
2015-03-02 23:26:06 -08:00
{
2017-04-12 14:24:39 +02:00
struct ttm_operation_ctx ctx = { interruptible , false } ;
2015-03-02 23:26:06 -08:00
struct vmw_private * dev_priv = res - > dev_priv ;
int ret ;
2015-06-26 02:03:53 -07:00
ttm_write_lock ( & dev_priv - > reservation_sem , interruptible ) ;
2015-03-02 23:26:06 -08:00
mutex_lock ( & dev_priv - > cmdbuf_mutex ) ;
2015-06-26 02:03:53 -07:00
ret = vmw_resource_reserve ( res , interruptible , false ) ;
2015-03-02 23:26:06 -08:00
if ( ret )
goto out_no_reserve ;
if ( res - > pin_count = = 0 ) {
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * vbo = NULL ;
2015-03-02 23:26:06 -08:00
if ( res - > backup ) {
2015-06-26 00:25:37 -07:00
vbo = res - > backup ;
2016-04-06 11:12:03 +02:00
ttm_bo_reserve ( & vbo - > base , interruptible , false , NULL ) ;
2015-06-26 00:25:37 -07:00
if ( ! vbo - > pin_count ) {
ret = ttm_bo_validate
( & vbo - > base ,
res - > func - > backup_placement ,
2017-04-12 14:24:39 +02:00
& ctx ) ;
2015-06-26 00:25:37 -07:00
if ( ret ) {
ttm_bo_unreserve ( & vbo - > base ) ;
goto out_no_validate ;
}
2015-03-02 23:26:06 -08:00
}
/* Do we really need to pin the MOB as well? */
2015-06-26 00:25:37 -07:00
vmw_bo_pin_reserved ( vbo , true ) ;
2015-03-02 23:26:06 -08:00
}
2018-09-26 15:22:54 +02:00
ret = vmw_resource_validate ( res , interruptible ) ;
2015-06-26 00:25:37 -07:00
if ( vbo )
ttm_bo_unreserve ( & vbo - > base ) ;
2015-03-02 23:26:06 -08:00
if ( ret )
goto out_no_validate ;
}
res - > pin_count + + ;
out_no_validate :
2019-02-20 08:21:26 +01:00
vmw_resource_unreserve ( res , false , false , false , NULL , 0UL ) ;
2015-03-02 23:26:06 -08:00
out_no_reserve :
mutex_unlock ( & dev_priv - > cmdbuf_mutex ) ;
ttm_write_unlock ( & dev_priv - > reservation_sem ) ;
return ret ;
}
/**
* vmw_resource_unpin - Remove a pin reference from a resource
*
* @ res : The resource to remove a pin reference from
*
* Having a pin reference means that the resource can never be evicted , and
* its id will never change as long as there is a pin reference .
*/
void vmw_resource_unpin ( struct vmw_resource * res )
{
struct vmw_private * dev_priv = res - > dev_priv ;
int ret ;
2017-01-19 10:57:00 -08:00
( void ) ttm_read_lock ( & dev_priv - > reservation_sem , false ) ;
2015-03-02 23:26:06 -08:00
mutex_lock ( & dev_priv - > cmdbuf_mutex ) ;
2015-06-26 02:03:53 -07:00
ret = vmw_resource_reserve ( res , false , true ) ;
2015-03-02 23:26:06 -08:00
WARN_ON ( ret ) ;
WARN_ON ( res - > pin_count = = 0 ) ;
if ( - - res - > pin_count = = 0 & & res - > backup ) {
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * vbo = res - > backup ;
2015-03-02 23:26:06 -08:00
2017-01-19 10:57:00 -08:00
( void ) ttm_bo_reserve ( & vbo - > base , false , false , NULL ) ;
2015-06-26 00:25:37 -07:00
vmw_bo_pin_reserved ( vbo , false ) ;
ttm_bo_unreserve ( & vbo - > base ) ;
2015-03-02 23:26:06 -08:00
}
2019-02-20 08:21:26 +01:00
vmw_resource_unreserve ( res , false , false , false , NULL , 0UL ) ;
2015-03-02 23:26:06 -08:00
mutex_unlock ( & dev_priv - > cmdbuf_mutex ) ;
ttm_read_unlock ( & dev_priv - > reservation_sem ) ;
}
2015-08-10 10:39:35 -07:00
/**
* vmw_res_type - Return the resource type
*
* @ res : Pointer to the resource
*/
enum vmw_res_type vmw_res_type ( const struct vmw_resource * res )
{
return res - > func - > res_type ;
}
2019-03-27 10:56:08 +01:00
/**
* vmw_resource_update_dirty - Update a resource ' s dirty tracker with a
* sequential range of touched backing store memory .
* @ res : The resource .
* @ start : The first page touched .
* @ end : The last page touched + 1.
*/
void vmw_resource_dirty_update ( struct vmw_resource * res , pgoff_t start ,
pgoff_t end )
{
if ( res - > dirty )
res - > func - > dirty_range_add ( res , start < < PAGE_SHIFT ,
end < < PAGE_SHIFT ) ;
}