2012-11-20 12:19:36 +00:00
/**************************************************************************
*
2015-07-29 12:38:02 -07:00
* Copyright © 2009 - 2015 VMware , Inc . , Palo Alto , CA . , USA
2012-11-20 12:19:36 +00:00
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2017-04-24 13:50:35 +09:00
# include <drm/ttm/ttm_placement.h>
2012-11-20 12:19:36 +00:00
# include "vmwgfx_drv.h"
# include "vmwgfx_resource_priv.h"
2015-08-10 10:39:35 -07:00
# include "vmwgfx_binding.h"
2012-11-20 12:19:36 +00:00
struct vmw_user_context {
struct ttm_base_object base ;
struct vmw_resource res ;
2015-08-10 10:39:35 -07:00
struct vmw_ctx_binding_state * cbs ;
2014-06-09 12:39:22 +02:00
struct vmw_cmdbuf_res_manager * man ;
2015-08-10 10:39:35 -07:00
struct vmw_resource * cotables [ SVGA_COTABLE_DX10_MAX ] ;
spinlock_t cotable_lock ;
2015-08-10 10:45:11 -07:00
struct vmw_dma_buffer * dx_query_mob ;
2012-11-20 12:19:36 +00:00
} ;
static void vmw_user_context_free ( struct vmw_resource * res ) ;
static struct vmw_resource *
vmw_user_context_base_to_res ( struct ttm_base_object * base ) ;
2012-11-21 11:36:36 +01:00
static int vmw_gb_context_create ( struct vmw_resource * res ) ;
static int vmw_gb_context_bind ( struct vmw_resource * res ,
struct ttm_validate_buffer * val_buf ) ;
static int vmw_gb_context_unbind ( struct vmw_resource * res ,
bool readback ,
struct ttm_validate_buffer * val_buf ) ;
static int vmw_gb_context_destroy ( struct vmw_resource * res ) ;
2015-08-10 10:39:35 -07:00
static int vmw_dx_context_create ( struct vmw_resource * res ) ;
static int vmw_dx_context_bind ( struct vmw_resource * res ,
struct ttm_validate_buffer * val_buf ) ;
static int vmw_dx_context_unbind ( struct vmw_resource * res ,
bool readback ,
struct ttm_validate_buffer * val_buf ) ;
static int vmw_dx_context_destroy ( struct vmw_resource * res ) ;
2012-11-20 12:19:36 +00:00
static uint64_t vmw_user_context_size ;
static const struct vmw_user_resource_conv user_context_conv = {
. object_type = VMW_RES_CONTEXT ,
. base_obj_to_res = vmw_user_context_base_to_res ,
. res_free = vmw_user_context_free
} ;
const struct vmw_user_resource_conv * user_context_converter =
& user_context_conv ;
static const struct vmw_res_func vmw_legacy_context_func = {
. res_type = vmw_res_context ,
. needs_backup = false ,
. may_evict = false ,
. type_name = " legacy contexts " ,
. backup_placement = NULL ,
. create = NULL ,
. destroy = NULL ,
. bind = NULL ,
. unbind = NULL
} ;
2012-11-21 11:36:36 +01:00
static const struct vmw_res_func vmw_gb_context_func = {
. res_type = vmw_res_context ,
. needs_backup = true ,
. may_evict = true ,
. type_name = " guest backed contexts " ,
. backup_placement = & vmw_mob_placement ,
. create = vmw_gb_context_create ,
. destroy = vmw_gb_context_destroy ,
. bind = vmw_gb_context_bind ,
. unbind = vmw_gb_context_unbind
} ;
2015-08-10 10:39:35 -07:00
static const struct vmw_res_func vmw_dx_context_func = {
. res_type = vmw_res_dx_context ,
. needs_backup = true ,
. may_evict = true ,
. type_name = " dx contexts " ,
. backup_placement = & vmw_mob_placement ,
. create = vmw_dx_context_create ,
. destroy = vmw_dx_context_destroy ,
. bind = vmw_dx_context_bind ,
. unbind = vmw_dx_context_unbind
} ;
2013-10-08 02:27:17 -07:00
2012-11-20 12:19:36 +00:00
/**
* Context management :
*/
2015-08-10 10:39:35 -07:00
static void vmw_context_cotables_unref ( struct vmw_user_context * uctx )
{
struct vmw_resource * res ;
int i ;
for ( i = 0 ; i < SVGA_COTABLE_DX10_MAX ; + + i ) {
spin_lock ( & uctx - > cotable_lock ) ;
res = uctx - > cotables [ i ] ;
uctx - > cotables [ i ] = NULL ;
spin_unlock ( & uctx - > cotable_lock ) ;
2015-08-10 10:56:15 -07:00
if ( res )
vmw_resource_unreference ( & res ) ;
2015-08-10 10:39:35 -07:00
}
}
2012-11-20 12:19:36 +00:00
static void vmw_hw_context_destroy ( struct vmw_resource * res )
{
2014-06-09 12:39:22 +02:00
struct vmw_user_context * uctx =
container_of ( res , struct vmw_user_context , res ) ;
2012-11-20 12:19:36 +00:00
struct vmw_private * dev_priv = res - > dev_priv ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDestroyContext body ;
} * cmd ;
2015-08-10 10:39:35 -07:00
if ( res - > func - > destroy = = vmw_gb_context_destroy | |
res - > func - > destroy = = vmw_dx_context_destroy ) {
2012-11-21 11:36:36 +01:00
mutex_lock ( & dev_priv - > cmdbuf_mutex ) ;
2014-06-09 12:39:22 +02:00
vmw_cmdbuf_res_man_destroy ( uctx - > man ) ;
2014-02-05 08:13:56 +01:00
mutex_lock ( & dev_priv - > binding_mutex ) ;
2015-08-10 10:39:35 -07:00
vmw_binding_state_kill ( uctx - > cbs ) ;
( void ) res - > func - > destroy ( res ) ;
2014-03-31 10:20:30 +02:00
mutex_unlock ( & dev_priv - > binding_mutex ) ;
2012-11-21 11:36:36 +01:00
if ( dev_priv - > pinned_bo ! = NULL & &
! dev_priv - > query_cid_valid )
__vmw_execbuf_release_pinned_bo ( dev_priv , NULL ) ;
mutex_unlock ( & dev_priv - > cmdbuf_mutex ) ;
2015-08-10 10:39:35 -07:00
vmw_context_cotables_unref ( uctx ) ;
2012-11-21 11:36:36 +01:00
return ;
}
2012-11-20 12:19:36 +00:00
vmw_execbuf_release_pinned_bo ( dev_priv ) ;
cmd = vmw_fifo_reserve ( dev_priv , sizeof ( * cmd ) ) ;
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Failed reserving FIFO space for surface "
" destruction. \n " ) ;
return ;
}
2015-04-02 02:39:45 -07:00
cmd - > header . id = SVGA_3D_CMD_CONTEXT_DESTROY ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . cid = res - > id ;
2012-11-20 12:19:36 +00:00
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
2015-06-25 10:47:43 -07:00
vmw_fifo_resource_dec ( dev_priv ) ;
2012-11-20 12:19:36 +00:00
}
2012-11-21 11:36:36 +01:00
static int vmw_gb_context_init ( struct vmw_private * dev_priv ,
2015-08-10 10:39:35 -07:00
bool dx ,
2012-11-21 11:36:36 +01:00
struct vmw_resource * res ,
2015-08-10 10:39:35 -07:00
void ( * res_free ) ( struct vmw_resource * res ) )
2012-11-21 11:36:36 +01:00
{
2015-08-10 10:39:35 -07:00
int ret , i ;
2013-10-08 02:32:36 -07:00
struct vmw_user_context * uctx =
container_of ( res , struct vmw_user_context , res ) ;
2012-11-21 11:36:36 +01:00
2015-08-10 10:39:35 -07:00
res - > backup_size = ( dx ? sizeof ( SVGADXContextMobFormat ) :
SVGA3D_CONTEXT_DATA_SIZE ) ;
2012-11-21 11:36:36 +01:00
ret = vmw_resource_init ( dev_priv , res , true ,
2015-08-10 10:39:35 -07:00
res_free ,
dx ? & vmw_dx_context_func :
& vmw_gb_context_func ) ;
2014-06-09 12:39:22 +02:00
if ( unlikely ( ret ! = 0 ) )
goto out_err ;
2012-11-21 11:36:36 +01:00
2014-06-09 12:39:22 +02:00
if ( dev_priv - > has_mob ) {
uctx - > man = vmw_cmdbuf_res_man_create ( dev_priv ) ;
2015-07-31 14:08:24 +05:30
if ( IS_ERR ( uctx - > man ) ) {
2014-06-09 12:39:22 +02:00
ret = PTR_ERR ( uctx - > man ) ;
uctx - > man = NULL ;
goto out_err ;
}
2012-11-21 11:36:36 +01:00
}
2015-08-10 10:39:35 -07:00
uctx - > cbs = vmw_binding_state_alloc ( dev_priv ) ;
if ( IS_ERR ( uctx - > cbs ) ) {
ret = PTR_ERR ( uctx - > cbs ) ;
goto out_err ;
}
spin_lock_init ( & uctx - > cotable_lock ) ;
if ( dx ) {
for ( i = 0 ; i < SVGA_COTABLE_DX10_MAX ; + + i ) {
uctx - > cotables [ i ] = vmw_cotable_alloc ( dev_priv ,
& uctx - > res , i ) ;
2016-11-29 07:49:19 +01:00
if ( unlikely ( IS_ERR ( uctx - > cotables [ i ] ) ) ) {
ret = PTR_ERR ( uctx - > cotables [ i ] ) ;
2015-08-10 10:39:35 -07:00
goto out_cotables ;
}
}
}
2013-10-08 02:32:36 -07:00
2012-11-21 11:36:36 +01:00
vmw_resource_activate ( res , vmw_hw_context_destroy ) ;
return 0 ;
2014-06-09 12:39:22 +02:00
2015-08-10 10:39:35 -07:00
out_cotables :
vmw_context_cotables_unref ( uctx ) ;
2014-06-09 12:39:22 +02:00
out_err :
if ( res_free )
res_free ( res ) ;
else
kfree ( res ) ;
return ret ;
2012-11-21 11:36:36 +01:00
}
2012-11-20 12:19:36 +00:00
static int vmw_context_init ( struct vmw_private * dev_priv ,
struct vmw_resource * res ,
2015-08-10 10:39:35 -07:00
void ( * res_free ) ( struct vmw_resource * res ) ,
bool dx )
2012-11-20 12:19:36 +00:00
{
int ret ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDefineContext body ;
} * cmd ;
2012-11-21 11:36:36 +01:00
if ( dev_priv - > has_mob )
2015-08-10 10:39:35 -07:00
return vmw_gb_context_init ( dev_priv , dx , res , res_free ) ;
2012-11-21 11:36:36 +01:00
2012-11-20 12:19:36 +00:00
ret = vmw_resource_init ( dev_priv , res , false ,
res_free , & vmw_legacy_context_func ) ;
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Failed to allocate a resource id. \n " ) ;
goto out_early ;
}
if ( unlikely ( res - > id > = SVGA3D_MAX_CONTEXT_IDS ) ) {
DRM_ERROR ( " Out of hw context ids. \n " ) ;
vmw_resource_unreference ( & res ) ;
return - ENOMEM ;
}
cmd = vmw_fifo_reserve ( dev_priv , sizeof ( * cmd ) ) ;
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Fifo reserve failed. \n " ) ;
vmw_resource_unreference ( & res ) ;
return - ENOMEM ;
}
2015-04-02 02:39:45 -07:00
cmd - > header . id = SVGA_3D_CMD_CONTEXT_DEFINE ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . cid = res - > id ;
2012-11-20 12:19:36 +00:00
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
2015-06-25 10:47:43 -07:00
vmw_fifo_resource_inc ( dev_priv ) ;
2012-11-20 12:19:36 +00:00
vmw_resource_activate ( res , vmw_hw_context_destroy ) ;
return 0 ;
out_early :
if ( res_free = = NULL )
kfree ( res ) ;
else
res_free ( res ) ;
return ret ;
}
2015-08-10 10:39:35 -07:00
/*
* GB context .
*/
2012-11-21 11:36:36 +01:00
static int vmw_gb_context_create ( struct vmw_resource * res )
{
struct vmw_private * dev_priv = res - > dev_priv ;
int ret ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDefineGBContext body ;
} * cmd ;
if ( likely ( res - > id ! = - 1 ) )
return 0 ;
ret = vmw_resource_alloc_id ( res ) ;
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Failed to allocate a context id. \n " ) ;
goto out_no_id ;
}
if ( unlikely ( res - > id > = VMWGFX_NUM_GB_CONTEXT ) ) {
ret = - EBUSY ;
goto out_no_fifo ;
}
cmd = vmw_fifo_reserve ( dev_priv , sizeof ( * cmd ) ) ;
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Failed reserving FIFO space for context "
" creation. \n " ) ;
ret = - ENOMEM ;
goto out_no_fifo ;
}
cmd - > header . id = SVGA_3D_CMD_DEFINE_GB_CONTEXT ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . cid = res - > id ;
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
2015-06-25 10:47:43 -07:00
vmw_fifo_resource_inc ( dev_priv ) ;
2012-11-21 11:36:36 +01:00
return 0 ;
out_no_fifo :
vmw_resource_release_id ( res ) ;
out_no_id :
return ret ;
}
static int vmw_gb_context_bind ( struct vmw_resource * res ,
struct ttm_validate_buffer * val_buf )
{
struct vmw_private * dev_priv = res - > dev_priv ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdBindGBContext body ;
} * cmd ;
struct ttm_buffer_object * bo = val_buf - > bo ;
BUG_ON ( bo - > mem . mem_type ! = VMW_PL_MOB ) ;
cmd = vmw_fifo_reserve ( dev_priv , sizeof ( * cmd ) ) ;
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Failed reserving FIFO space for context "
" binding. \n " ) ;
return - ENOMEM ;
}
cmd - > header . id = SVGA_3D_CMD_BIND_GB_CONTEXT ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . cid = res - > id ;
cmd - > body . mobid = bo - > mem . start ;
cmd - > body . validContents = res - > backup_dirty ;
res - > backup_dirty = false ;
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
return 0 ;
}
static int vmw_gb_context_unbind ( struct vmw_resource * res ,
bool readback ,
struct ttm_validate_buffer * val_buf )
{
struct vmw_private * dev_priv = res - > dev_priv ;
struct ttm_buffer_object * bo = val_buf - > bo ;
struct vmw_fence_obj * fence ;
2013-10-08 02:32:36 -07:00
struct vmw_user_context * uctx =
container_of ( res , struct vmw_user_context , res ) ;
2012-11-21 11:36:36 +01:00
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdReadbackGBContext body ;
} * cmd1 ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdBindGBContext body ;
} * cmd2 ;
uint32_t submit_size ;
uint8_t * cmd ;
BUG_ON ( bo - > mem . mem_type ! = VMW_PL_MOB ) ;
2013-10-08 02:32:36 -07:00
mutex_lock ( & dev_priv - > binding_mutex ) ;
2015-08-10 10:39:35 -07:00
vmw_binding_state_scrub ( uctx - > cbs ) ;
2013-10-08 02:32:36 -07:00
2012-11-21 11:36:36 +01:00
submit_size = sizeof ( * cmd2 ) + ( readback ? sizeof ( * cmd1 ) : 0 ) ;
cmd = vmw_fifo_reserve ( dev_priv , submit_size ) ;
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Failed reserving FIFO space for context "
" unbinding. \n " ) ;
2013-10-08 02:32:36 -07:00
mutex_unlock ( & dev_priv - > binding_mutex ) ;
2012-11-21 11:36:36 +01:00
return - ENOMEM ;
}
cmd2 = ( void * ) cmd ;
if ( readback ) {
cmd1 = ( void * ) cmd ;
cmd1 - > header . id = SVGA_3D_CMD_READBACK_GB_CONTEXT ;
cmd1 - > header . size = sizeof ( cmd1 - > body ) ;
cmd1 - > body . cid = res - > id ;
cmd2 = ( void * ) ( & cmd1 [ 1 ] ) ;
}
cmd2 - > header . id = SVGA_3D_CMD_BIND_GB_CONTEXT ;
cmd2 - > header . size = sizeof ( cmd2 - > body ) ;
cmd2 - > body . cid = res - > id ;
cmd2 - > body . mobid = SVGA3D_INVALID_ID ;
vmw_fifo_commit ( dev_priv , submit_size ) ;
2013-10-08 02:32:36 -07:00
mutex_unlock ( & dev_priv - > binding_mutex ) ;
2012-11-21 11:36:36 +01:00
/*
* Create a fence object and fence the backup buffer .
*/
( void ) vmw_execbuf_fence_commands ( NULL , dev_priv ,
& fence , NULL ) ;
vmw_fence_single_bo ( bo , fence ) ;
if ( likely ( fence ! = NULL ) )
vmw_fence_obj_unreference ( & fence ) ;
return 0 ;
}
static int vmw_gb_context_destroy ( struct vmw_resource * res )
{
struct vmw_private * dev_priv = res - > dev_priv ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDestroyGBContext body ;
} * cmd ;
if ( likely ( res - > id = = - 1 ) )
return 0 ;
cmd = vmw_fifo_reserve ( dev_priv , sizeof ( * cmd ) ) ;
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Failed reserving FIFO space for context "
" destruction. \n " ) ;
return - ENOMEM ;
}
cmd - > header . id = SVGA_3D_CMD_DESTROY_GB_CONTEXT ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . cid = res - > id ;
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
if ( dev_priv - > query_cid = = res - > id )
dev_priv - > query_cid_valid = false ;
vmw_resource_release_id ( res ) ;
2015-06-25 10:47:43 -07:00
vmw_fifo_resource_dec ( dev_priv ) ;
2012-11-21 11:36:36 +01:00
return 0 ;
}
2015-08-10 10:39:35 -07:00
/*
* DX context .
*/
static int vmw_dx_context_create ( struct vmw_resource * res )
{
struct vmw_private * dev_priv = res - > dev_priv ;
int ret ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXDefineContext body ;
} * cmd ;
if ( likely ( res - > id ! = - 1 ) )
return 0 ;
ret = vmw_resource_alloc_id ( res ) ;
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Failed to allocate a context id. \n " ) ;
goto out_no_id ;
}
if ( unlikely ( res - > id > = VMWGFX_NUM_DXCONTEXT ) ) {
ret = - EBUSY ;
goto out_no_fifo ;
}
cmd = vmw_fifo_reserve ( dev_priv , sizeof ( * cmd ) ) ;
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Failed reserving FIFO space for context "
" creation. \n " ) ;
ret = - ENOMEM ;
goto out_no_fifo ;
}
cmd - > header . id = SVGA_3D_CMD_DX_DEFINE_CONTEXT ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . cid = res - > id ;
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
vmw_fifo_resource_inc ( dev_priv ) ;
return 0 ;
out_no_fifo :
vmw_resource_release_id ( res ) ;
out_no_id :
return ret ;
}
static int vmw_dx_context_bind ( struct vmw_resource * res ,
struct ttm_validate_buffer * val_buf )
{
struct vmw_private * dev_priv = res - > dev_priv ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXBindContext body ;
} * cmd ;
struct ttm_buffer_object * bo = val_buf - > bo ;
BUG_ON ( bo - > mem . mem_type ! = VMW_PL_MOB ) ;
cmd = vmw_fifo_reserve ( dev_priv , sizeof ( * cmd ) ) ;
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Failed reserving FIFO space for context "
" binding. \n " ) ;
return - ENOMEM ;
}
cmd - > header . id = SVGA_3D_CMD_DX_BIND_CONTEXT ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . cid = res - > id ;
cmd - > body . mobid = bo - > mem . start ;
cmd - > body . validContents = res - > backup_dirty ;
res - > backup_dirty = false ;
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
return 0 ;
}
/**
* vmw_dx_context_scrub_cotables - Scrub all bindings and
* cotables from a context
*
* @ ctx : Pointer to the context resource
* @ readback : Whether to save the otable contents on scrubbing .
*
* COtables must be unbound before their context , but unbinding requires
* the backup buffer being reserved , whereas scrubbing does not .
* This function scrubs all cotables of a context , potentially reading back
* the contents into their backup buffers . However , scrubbing cotables
* also makes the device context invalid , so scrub all bindings first so
* that doesn ' t have to be done later with an invalid context .
*/
void vmw_dx_context_scrub_cotables ( struct vmw_resource * ctx ,
bool readback )
{
struct vmw_user_context * uctx =
container_of ( ctx , struct vmw_user_context , res ) ;
int i ;
vmw_binding_state_scrub ( uctx - > cbs ) ;
for ( i = 0 ; i < SVGA_COTABLE_DX10_MAX ; + + i ) {
struct vmw_resource * res ;
/* Avoid racing with ongoing cotable destruction. */
spin_lock ( & uctx - > cotable_lock ) ;
res = uctx - > cotables [ vmw_cotable_scrub_order [ i ] ] ;
if ( res )
res = vmw_resource_reference_unless_doomed ( res ) ;
spin_unlock ( & uctx - > cotable_lock ) ;
if ( ! res )
continue ;
WARN_ON ( vmw_cotable_scrub ( res , readback ) ) ;
vmw_resource_unreference ( & res ) ;
}
}
static int vmw_dx_context_unbind ( struct vmw_resource * res ,
bool readback ,
struct ttm_validate_buffer * val_buf )
{
struct vmw_private * dev_priv = res - > dev_priv ;
struct ttm_buffer_object * bo = val_buf - > bo ;
struct vmw_fence_obj * fence ;
2015-08-10 10:56:15 -07:00
struct vmw_user_context * uctx =
container_of ( res , struct vmw_user_context , res ) ;
2015-08-10 10:39:35 -07:00
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXReadbackContext body ;
} * cmd1 ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXBindContext body ;
} * cmd2 ;
uint32_t submit_size ;
uint8_t * cmd ;
BUG_ON ( bo - > mem . mem_type ! = VMW_PL_MOB ) ;
mutex_lock ( & dev_priv - > binding_mutex ) ;
vmw_dx_context_scrub_cotables ( res , readback ) ;
2015-08-10 10:56:15 -07:00
if ( uctx - > dx_query_mob & & uctx - > dx_query_mob - > dx_query_ctx & &
readback ) {
WARN_ON ( uctx - > dx_query_mob - > dx_query_ctx ! = res ) ;
if ( vmw_query_readback_all ( uctx - > dx_query_mob ) )
DRM_ERROR ( " Failed to read back query states \n " ) ;
}
2015-08-10 10:39:35 -07:00
submit_size = sizeof ( * cmd2 ) + ( readback ? sizeof ( * cmd1 ) : 0 ) ;
cmd = vmw_fifo_reserve ( dev_priv , submit_size ) ;
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Failed reserving FIFO space for context "
" unbinding. \n " ) ;
mutex_unlock ( & dev_priv - > binding_mutex ) ;
return - ENOMEM ;
}
cmd2 = ( void * ) cmd ;
if ( readback ) {
cmd1 = ( void * ) cmd ;
cmd1 - > header . id = SVGA_3D_CMD_DX_READBACK_CONTEXT ;
cmd1 - > header . size = sizeof ( cmd1 - > body ) ;
cmd1 - > body . cid = res - > id ;
cmd2 = ( void * ) ( & cmd1 [ 1 ] ) ;
}
cmd2 - > header . id = SVGA_3D_CMD_DX_BIND_CONTEXT ;
cmd2 - > header . size = sizeof ( cmd2 - > body ) ;
cmd2 - > body . cid = res - > id ;
cmd2 - > body . mobid = SVGA3D_INVALID_ID ;
vmw_fifo_commit ( dev_priv , submit_size ) ;
mutex_unlock ( & dev_priv - > binding_mutex ) ;
/*
* Create a fence object and fence the backup buffer .
*/
( void ) vmw_execbuf_fence_commands ( NULL , dev_priv ,
& fence , NULL ) ;
vmw_fence_single_bo ( bo , fence ) ;
if ( likely ( fence ! = NULL ) )
vmw_fence_obj_unreference ( & fence ) ;
return 0 ;
}
static int vmw_dx_context_destroy ( struct vmw_resource * res )
{
struct vmw_private * dev_priv = res - > dev_priv ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXDestroyContext body ;
} * cmd ;
if ( likely ( res - > id = = - 1 ) )
return 0 ;
cmd = vmw_fifo_reserve ( dev_priv , sizeof ( * cmd ) ) ;
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Failed reserving FIFO space for context "
" destruction. \n " ) ;
return - ENOMEM ;
}
cmd - > header . id = SVGA_3D_CMD_DX_DESTROY_CONTEXT ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . cid = res - > id ;
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
if ( dev_priv - > query_cid = = res - > id )
dev_priv - > query_cid_valid = false ;
vmw_resource_release_id ( res ) ;
vmw_fifo_resource_dec ( dev_priv ) ;
2012-11-21 11:36:36 +01:00
return 0 ;
}
2012-11-20 12:19:36 +00:00
/**
* User - space context management :
*/
static struct vmw_resource *
vmw_user_context_base_to_res ( struct ttm_base_object * base )
{
return & ( container_of ( base , struct vmw_user_context , base ) - > res ) ;
}
static void vmw_user_context_free ( struct vmw_resource * res )
{
struct vmw_user_context * ctx =
container_of ( res , struct vmw_user_context , res ) ;
struct vmw_private * dev_priv = res - > dev_priv ;
2015-08-10 10:39:35 -07:00
if ( ctx - > cbs )
vmw_binding_state_free ( ctx - > cbs ) ;
2015-08-10 10:56:15 -07:00
( void ) vmw_context_bind_dx_query ( res , NULL ) ;
2012-11-20 12:19:36 +00:00
ttm_base_object_kfree ( ctx , base ) ;
ttm_mem_global_free ( vmw_mem_glob ( dev_priv ) ,
vmw_user_context_size ) ;
}
/**
* This function is called when user space has no more references on the
* base object . It releases the base - object ' s reference on the resource object .
*/
static void vmw_user_context_base_release ( struct ttm_base_object * * p_base )
{
struct ttm_base_object * base = * p_base ;
struct vmw_user_context * ctx =
container_of ( base , struct vmw_user_context , base ) ;
struct vmw_resource * res = & ctx - > res ;
* p_base = NULL ;
vmw_resource_unreference ( & res ) ;
}
int vmw_context_destroy_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_vmw_context_arg * arg = ( struct drm_vmw_context_arg * ) data ;
struct ttm_object_file * tfile = vmw_fpriv ( file_priv ) - > tfile ;
return ttm_ref_object_base_unref ( tfile , arg - > cid , TTM_REF_USAGE ) ;
}
2015-08-10 10:39:35 -07:00
static int vmw_context_define ( struct drm_device * dev , void * data ,
struct drm_file * file_priv , bool dx )
2012-11-20 12:19:36 +00:00
{
struct vmw_private * dev_priv = vmw_priv ( dev ) ;
struct vmw_user_context * ctx ;
struct vmw_resource * res ;
struct vmw_resource * tmp ;
struct drm_vmw_context_arg * arg = ( struct drm_vmw_context_arg * ) data ;
struct ttm_object_file * tfile = vmw_fpriv ( file_priv ) - > tfile ;
int ret ;
2015-08-10 10:39:35 -07:00
if ( ! dev_priv - > has_dx & & dx ) {
DRM_ERROR ( " DX contexts not supported by device. \n " ) ;
return - EINVAL ;
}
2012-11-20 12:19:36 +00:00
/*
* Approximate idr memory usage with 128 bytes . It will be limited
* by maximum number_of contexts anyway .
*/
if ( unlikely ( vmw_user_context_size = = 0 ) )
2014-06-09 12:39:22 +02:00
vmw_user_context_size = ttm_round_pot ( sizeof ( * ctx ) ) + 128 +
( ( dev_priv - > has_mob ) ? vmw_cmdbuf_res_man_size ( ) : 0 ) ;
2012-11-20 12:19:36 +00:00
2014-02-27 12:34:51 +01:00
ret = ttm_read_lock ( & dev_priv - > reservation_sem , true ) ;
2012-11-20 12:19:36 +00:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
ret = ttm_mem_global_alloc ( vmw_mem_glob ( dev_priv ) ,
vmw_user_context_size ,
false , true ) ;
if ( unlikely ( ret ! = 0 ) ) {
if ( ret ! = - ERESTARTSYS )
DRM_ERROR ( " Out of graphics memory for context "
" creation. \n " ) ;
goto out_unlock ;
}
ctx = kzalloc ( sizeof ( * ctx ) , GFP_KERNEL ) ;
2016-11-08 17:30:31 +05:30
if ( unlikely ( ! ctx ) ) {
2012-11-20 12:19:36 +00:00
ttm_mem_global_free ( vmw_mem_glob ( dev_priv ) ,
vmw_user_context_size ) ;
ret = - ENOMEM ;
goto out_unlock ;
}
res = & ctx - > res ;
ctx - > base . shareable = false ;
ctx - > base . tfile = NULL ;
/*
* From here on , the destructor takes over resource freeing .
*/
2015-08-10 10:39:35 -07:00
ret = vmw_context_init ( dev_priv , res , vmw_user_context_free , dx ) ;
2012-11-20 12:19:36 +00:00
if ( unlikely ( ret ! = 0 ) )
goto out_unlock ;
tmp = vmw_resource_reference ( & ctx - > res ) ;
ret = ttm_base_object_init ( tfile , & ctx - > base , false , VMW_RES_CONTEXT ,
& vmw_user_context_base_release , NULL ) ;
if ( unlikely ( ret ! = 0 ) ) {
vmw_resource_unreference ( & tmp ) ;
goto out_err ;
}
arg - > cid = ctx - > base . hash . key ;
out_err :
vmw_resource_unreference ( & res ) ;
out_unlock :
2014-02-27 12:34:51 +01:00
ttm_read_unlock ( & dev_priv - > reservation_sem ) ;
2012-11-20 12:19:36 +00:00
return ret ;
2013-10-08 02:27:17 -07:00
}
2015-08-10 10:39:35 -07:00
int vmw_context_define_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
2013-10-08 02:27:17 -07:00
{
2015-08-10 10:39:35 -07:00
return vmw_context_define ( dev , data , file_priv , false ) ;
2013-10-08 02:27:17 -07:00
}
2015-08-10 10:39:35 -07:00
int vmw_extended_context_define_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
2013-10-08 02:27:17 -07:00
{
2015-08-10 10:39:35 -07:00
union drm_vmw_extended_context_arg * arg = ( typeof ( arg ) ) data ;
struct drm_vmw_context_arg * rep = & arg - > rep ;
switch ( arg - > req ) {
case drm_vmw_context_legacy :
return vmw_context_define ( dev , rep , file_priv , false ) ;
case drm_vmw_context_dx :
return vmw_context_define ( dev , rep , file_priv , true ) ;
2013-10-08 02:27:17 -07:00
default :
2013-10-08 02:32:36 -07:00
break ;
2014-02-05 08:13:56 +01:00
}
2015-08-10 10:39:35 -07:00
return - EINVAL ;
2013-10-08 02:32:36 -07:00
}
2013-10-08 02:27:17 -07:00
/**
2015-08-10 10:39:35 -07:00
* vmw_context_binding_list - Return a list of context bindings
2013-10-08 02:27:17 -07:00
*
2015-08-10 10:39:35 -07:00
* @ ctx : The context resource
2013-10-08 02:27:17 -07:00
*
2015-08-10 10:39:35 -07:00
* Returns the current list of bindings of the given context . Note that
* this list becomes stale as soon as the dev_priv : : binding_mutex is unlocked .
2013-10-08 02:27:17 -07:00
*/
2015-08-10 10:39:35 -07:00
struct list_head * vmw_context_binding_list ( struct vmw_resource * ctx )
2013-10-08 02:27:17 -07:00
{
2013-10-08 02:32:36 -07:00
struct vmw_user_context * uctx =
container_of ( ctx , struct vmw_user_context , res ) ;
2013-10-08 02:27:17 -07:00
2015-08-10 10:39:35 -07:00
return vmw_binding_state_list ( uctx - > cbs ) ;
2013-10-08 02:32:36 -07:00
}
2015-08-10 10:39:35 -07:00
struct vmw_cmdbuf_res_manager * vmw_context_res_man ( struct vmw_resource * ctx )
2014-02-05 08:13:56 +01:00
{
2015-08-10 10:39:35 -07:00
return container_of ( ctx , struct vmw_user_context , res ) - > man ;
2014-02-05 08:13:56 +01:00
}
2015-08-10 10:39:35 -07:00
struct vmw_resource * vmw_context_cotable ( struct vmw_resource * ctx ,
SVGACOTableType cotable_type )
2013-10-08 02:32:36 -07:00
{
2015-08-10 10:39:35 -07:00
if ( cotable_type > = SVGA_COTABLE_DX10_MAX )
return ERR_PTR ( - EINVAL ) ;
2013-10-08 02:32:36 -07:00
2015-08-10 10:39:35 -07:00
return vmw_resource_reference
( container_of ( ctx , struct vmw_user_context , res ) - >
cotables [ cotable_type ] ) ;
2013-10-08 02:32:36 -07:00
}
2014-02-05 08:13:56 +01:00
/**
2015-08-10 10:39:35 -07:00
* vmw_context_binding_state -
* Return a pointer to a context binding state structure
2014-02-05 08:13:56 +01:00
*
* @ ctx : The context resource
*
2015-08-10 10:39:35 -07:00
* Returns the current state of bindings of the given context . Note that
* this state becomes stale as soon as the dev_priv : : binding_mutex is unlocked .
2014-02-05 08:13:56 +01:00
*/
2015-08-10 10:39:35 -07:00
struct vmw_ctx_binding_state *
vmw_context_binding_state ( struct vmw_resource * ctx )
2014-02-05 08:13:56 +01:00
{
2015-08-10 10:39:35 -07:00
return container_of ( ctx , struct vmw_user_context , res ) - > cbs ;
2014-02-05 08:13:56 +01:00
}
2013-10-08 02:32:36 -07:00
/**
2015-08-10 10:56:15 -07:00
* vmw_context_bind_dx_query -
* Sets query MOB for the context . If @ mob is NULL , then this function will
* remove the association between the MOB and the context . This function
* assumes the binding_mutex is held .
2013-10-08 02:32:36 -07:00
*
2015-08-10 10:56:15 -07:00
* @ ctx_res : The context resource
* @ mob : a reference to the query MOB
2013-10-08 02:32:36 -07:00
*
2015-08-10 10:56:15 -07:00
* Returns - EINVAL if a MOB has already been set and does not match the one
* specified in the parameter . 0 otherwise .
2013-10-08 02:32:36 -07:00
*/
2015-08-10 10:56:15 -07:00
int vmw_context_bind_dx_query ( struct vmw_resource * ctx_res ,
struct vmw_dma_buffer * mob )
2013-10-08 02:32:36 -07:00
{
struct vmw_user_context * uctx =
2015-08-10 10:56:15 -07:00
container_of ( ctx_res , struct vmw_user_context , res ) ;
2014-02-05 08:13:56 +01:00
2015-08-10 10:56:15 -07:00
if ( mob = = NULL ) {
if ( uctx - > dx_query_mob ) {
uctx - > dx_query_mob - > dx_query_ctx = NULL ;
vmw_dmabuf_unreference ( & uctx - > dx_query_mob ) ;
uctx - > dx_query_mob = NULL ;
}
2014-02-05 08:13:56 +01:00
2015-08-10 10:56:15 -07:00
return 0 ;
}
2014-02-05 08:13:56 +01:00
2015-08-10 10:56:15 -07:00
/* Can only have one MOB per context for queries */
if ( uctx - > dx_query_mob & & uctx - > dx_query_mob ! = mob )
return - EINVAL ;
2014-02-05 08:13:56 +01:00
2015-08-10 10:56:15 -07:00
mob - > dx_query_ctx = ctx_res ;
2014-02-05 08:13:56 +01:00
2015-08-10 10:56:15 -07:00
if ( ! uctx - > dx_query_mob )
uctx - > dx_query_mob = vmw_dmabuf_reference ( mob ) ;
2014-02-05 08:13:56 +01:00
return 0 ;
}
/**
2015-08-10 10:56:15 -07:00
* vmw_context_get_dx_query_mob - Returns non - counted reference to DX query mob
2014-02-05 08:13:56 +01:00
*
2015-08-10 10:56:15 -07:00
* @ ctx_res : The context resource
2014-02-05 08:13:56 +01:00
*/
2015-08-10 10:56:15 -07:00
struct vmw_dma_buffer *
vmw_context_get_dx_query_mob ( struct vmw_resource * ctx_res )
2014-02-05 08:13:56 +01:00
{
2015-08-10 10:56:15 -07:00
struct vmw_user_context * uctx =
container_of ( ctx_res , struct vmw_user_context , res ) ;
2014-06-09 12:39:22 +02:00
2015-08-10 10:56:15 -07:00
return uctx - > dx_query_mob ;
2014-06-09 12:39:22 +02:00
}