2012-11-21 15:10:26 +04:00
/**************************************************************************
*
2015-07-29 22:38:02 +03:00
* Copyright © 2009 - 2015 VMware , Inc . , Palo Alto , CA . , USA
2012-11-21 15:10:26 +04:00
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# include "vmwgfx_drv.h"
# include "vmwgfx_resource_priv.h"
2015-08-10 20:39:35 +03:00
# include "vmwgfx_binding.h"
2012-11-21 15:10:26 +04:00
# include "ttm/ttm_placement.h"
struct vmw_shader {
struct vmw_resource res ;
SVGA3dShaderType type ;
uint32_t size ;
2015-08-10 20:39:35 +03:00
uint8_t num_input_sig ;
uint8_t num_output_sig ;
2012-11-21 15:10:26 +04:00
} ;
struct vmw_user_shader {
struct ttm_base_object base ;
struct vmw_shader shader ;
} ;
2015-08-10 20:39:35 +03:00
struct vmw_dx_shader {
struct vmw_resource res ;
struct vmw_resource * ctx ;
struct vmw_resource * cotable ;
u32 id ;
bool committed ;
struct list_head cotable_head ;
} ;
2014-06-09 14:39:22 +04:00
static uint64_t vmw_user_shader_size ;
static uint64_t vmw_shader_size ;
2015-08-10 20:39:35 +03:00
static size_t vmw_shader_dx_size ;
2014-01-31 13:12:10 +04:00
2012-11-21 15:10:26 +04:00
static void vmw_user_shader_free ( struct vmw_resource * res ) ;
static struct vmw_resource *
vmw_user_shader_base_to_res ( struct ttm_base_object * base ) ;
static int vmw_gb_shader_create ( struct vmw_resource * res ) ;
static int vmw_gb_shader_bind ( struct vmw_resource * res ,
struct ttm_validate_buffer * val_buf ) ;
static int vmw_gb_shader_unbind ( struct vmw_resource * res ,
bool readback ,
struct ttm_validate_buffer * val_buf ) ;
static int vmw_gb_shader_destroy ( struct vmw_resource * res ) ;
2015-08-10 20:39:35 +03:00
static int vmw_dx_shader_create ( struct vmw_resource * res ) ;
static int vmw_dx_shader_bind ( struct vmw_resource * res ,
struct ttm_validate_buffer * val_buf ) ;
static int vmw_dx_shader_unbind ( struct vmw_resource * res ,
bool readback ,
struct ttm_validate_buffer * val_buf ) ;
static void vmw_dx_shader_commit_notify ( struct vmw_resource * res ,
enum vmw_cmdbuf_res_state state ) ;
static bool vmw_shader_id_ok ( u32 user_key , SVGA3dShaderType shader_type ) ;
static u32 vmw_shader_key ( u32 user_key , SVGA3dShaderType shader_type ) ;
static uint64_t vmw_user_shader_size ;
2012-11-21 15:10:26 +04:00
static const struct vmw_user_resource_conv user_shader_conv = {
. object_type = VMW_RES_SHADER ,
. base_obj_to_res = vmw_user_shader_base_to_res ,
. res_free = vmw_user_shader_free
} ;
const struct vmw_user_resource_conv * user_shader_converter =
& user_shader_conv ;
static const struct vmw_res_func vmw_gb_shader_func = {
. res_type = vmw_res_shader ,
. needs_backup = true ,
. may_evict = true ,
. type_name = " guest backed shaders " ,
. backup_placement = & vmw_mob_placement ,
. create = vmw_gb_shader_create ,
. destroy = vmw_gb_shader_destroy ,
. bind = vmw_gb_shader_bind ,
. unbind = vmw_gb_shader_unbind
} ;
2015-08-10 20:39:35 +03:00
static const struct vmw_res_func vmw_dx_shader_func = {
. res_type = vmw_res_shader ,
. needs_backup = true ,
. may_evict = false ,
. type_name = " dx shaders " ,
. backup_placement = & vmw_mob_placement ,
. create = vmw_dx_shader_create ,
/*
* The destroy callback is only called with a committed resource on
* context destroy , in which case we destroy the cotable anyway ,
* so there ' s no need to destroy DX shaders separately .
*/
. destroy = NULL ,
. bind = vmw_dx_shader_bind ,
. unbind = vmw_dx_shader_unbind ,
. commit_notify = vmw_dx_shader_commit_notify ,
} ;
2012-11-21 15:10:26 +04:00
/**
* Shader management :
*/
static inline struct vmw_shader *
vmw_res_to_shader ( struct vmw_resource * res )
{
return container_of ( res , struct vmw_shader , res ) ;
}
2015-08-10 20:39:35 +03:00
/**
* vmw_res_to_dx_shader - typecast a struct vmw_resource to a
* struct vmw_dx_shader
*
* @ res : Pointer to the struct vmw_resource .
*/
static inline struct vmw_dx_shader *
vmw_res_to_dx_shader ( struct vmw_resource * res )
{
return container_of ( res , struct vmw_dx_shader , res ) ;
}
2012-11-21 15:10:26 +04:00
static void vmw_hw_shader_destroy ( struct vmw_resource * res )
{
2015-08-10 20:39:35 +03:00
if ( likely ( res - > func - > destroy ) )
( void ) res - > func - > destroy ( res ) ;
else
res - > id = - 1 ;
2012-11-21 15:10:26 +04:00
}
2015-08-10 20:39:35 +03:00
2012-11-21 15:10:26 +04:00
static int vmw_gb_shader_init ( struct vmw_private * dev_priv ,
struct vmw_resource * res ,
uint32_t size ,
uint64_t offset ,
SVGA3dShaderType type ,
2015-08-10 20:39:35 +03:00
uint8_t num_input_sig ,
uint8_t num_output_sig ,
2012-11-21 15:10:26 +04:00
struct vmw_dma_buffer * byte_code ,
void ( * res_free ) ( struct vmw_resource * res ) )
{
struct vmw_shader * shader = vmw_res_to_shader ( res ) ;
int ret ;
2015-08-10 20:39:35 +03:00
ret = vmw_resource_init ( dev_priv , res , true , res_free ,
& vmw_gb_shader_func ) ;
2012-11-21 15:10:26 +04:00
if ( unlikely ( ret ! = 0 ) ) {
if ( res_free )
res_free ( res ) ;
else
kfree ( res ) ;
return ret ;
}
res - > backup_size = size ;
if ( byte_code ) {
res - > backup = vmw_dmabuf_reference ( byte_code ) ;
res - > backup_offset = offset ;
}
shader - > size = size ;
shader - > type = type ;
2015-08-10 20:39:35 +03:00
shader - > num_input_sig = num_input_sig ;
shader - > num_output_sig = num_output_sig ;
2012-11-21 15:10:26 +04:00
vmw_resource_activate ( res , vmw_hw_shader_destroy ) ;
return 0 ;
}
2015-08-10 20:39:35 +03:00
/*
* GB shader code :
*/
2012-11-21 15:10:26 +04:00
static int vmw_gb_shader_create ( struct vmw_resource * res )
{
struct vmw_private * dev_priv = res - > dev_priv ;
struct vmw_shader * shader = vmw_res_to_shader ( res ) ;
int ret ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDefineGBShader body ;
} * cmd ;
if ( likely ( res - > id ! = - 1 ) )
return 0 ;
ret = vmw_resource_alloc_id ( res ) ;
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Failed to allocate a shader id. \n " ) ;
goto out_no_id ;
}
if ( unlikely ( res - > id > = VMWGFX_NUM_GB_SHADER ) ) {
ret = - EBUSY ;
goto out_no_fifo ;
}
cmd = vmw_fifo_reserve ( dev_priv , sizeof ( * cmd ) ) ;
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Failed reserving FIFO space for shader "
" creation. \n " ) ;
ret = - ENOMEM ;
goto out_no_fifo ;
}
cmd - > header . id = SVGA_3D_CMD_DEFINE_GB_SHADER ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . shid = res - > id ;
cmd - > body . type = shader - > type ;
cmd - > body . sizeInBytes = shader - > size ;
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
2015-06-25 20:47:43 +03:00
vmw_fifo_resource_inc ( dev_priv ) ;
2012-11-21 15:10:26 +04:00
return 0 ;
out_no_fifo :
vmw_resource_release_id ( res ) ;
out_no_id :
return ret ;
}
static int vmw_gb_shader_bind ( struct vmw_resource * res ,
struct ttm_validate_buffer * val_buf )
{
struct vmw_private * dev_priv = res - > dev_priv ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdBindGBShader body ;
} * cmd ;
struct ttm_buffer_object * bo = val_buf - > bo ;
BUG_ON ( bo - > mem . mem_type ! = VMW_PL_MOB ) ;
cmd = vmw_fifo_reserve ( dev_priv , sizeof ( * cmd ) ) ;
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Failed reserving FIFO space for shader "
" binding. \n " ) ;
return - ENOMEM ;
}
cmd - > header . id = SVGA_3D_CMD_BIND_GB_SHADER ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . shid = res - > id ;
cmd - > body . mobid = bo - > mem . start ;
2014-12-02 14:41:01 +03:00
cmd - > body . offsetInBytes = res - > backup_offset ;
2012-11-21 15:10:26 +04:00
res - > backup_dirty = false ;
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
return 0 ;
}
static int vmw_gb_shader_unbind ( struct vmw_resource * res ,
bool readback ,
struct ttm_validate_buffer * val_buf )
{
struct vmw_private * dev_priv = res - > dev_priv ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdBindGBShader body ;
} * cmd ;
struct vmw_fence_obj * fence ;
BUG_ON ( res - > backup - > base . mem . mem_type ! = VMW_PL_MOB ) ;
cmd = vmw_fifo_reserve ( dev_priv , sizeof ( * cmd ) ) ;
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Failed reserving FIFO space for shader "
" unbinding. \n " ) ;
return - ENOMEM ;
}
cmd - > header . id = SVGA_3D_CMD_BIND_GB_SHADER ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . shid = res - > id ;
cmd - > body . mobid = SVGA3D_INVALID_ID ;
cmd - > body . offsetInBytes = 0 ;
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
/*
* Create a fence object and fence the backup buffer .
*/
( void ) vmw_execbuf_fence_commands ( NULL , dev_priv ,
& fence , NULL ) ;
vmw_fence_single_bo ( val_buf - > bo , fence ) ;
if ( likely ( fence ! = NULL ) )
vmw_fence_obj_unreference ( & fence ) ;
return 0 ;
}
static int vmw_gb_shader_destroy ( struct vmw_resource * res )
{
struct vmw_private * dev_priv = res - > dev_priv ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDestroyGBShader body ;
} * cmd ;
if ( likely ( res - > id = = - 1 ) )
return 0 ;
2013-10-08 13:32:36 +04:00
mutex_lock ( & dev_priv - > binding_mutex ) ;
2015-08-10 20:39:35 +03:00
vmw_binding_res_list_scrub ( & res - > binding_head ) ;
2013-10-08 13:32:36 +04:00
2012-11-21 15:10:26 +04:00
cmd = vmw_fifo_reserve ( dev_priv , sizeof ( * cmd ) ) ;
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Failed reserving FIFO space for shader "
" destruction. \n " ) ;
2014-01-20 14:33:04 +04:00
mutex_unlock ( & dev_priv - > binding_mutex ) ;
2012-11-21 15:10:26 +04:00
return - ENOMEM ;
}
cmd - > header . id = SVGA_3D_CMD_DESTROY_GB_SHADER ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . shid = res - > id ;
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
2013-10-08 13:32:36 +04:00
mutex_unlock ( & dev_priv - > binding_mutex ) ;
2012-11-21 15:10:26 +04:00
vmw_resource_release_id ( res ) ;
2015-06-25 20:47:43 +03:00
vmw_fifo_resource_dec ( dev_priv ) ;
2012-11-21 15:10:26 +04:00
return 0 ;
}
2015-08-10 20:39:35 +03:00
/*
* DX shader code :
*/
/**
* vmw_dx_shader_commit_notify - Notify that a shader operation has been
* committed to hardware from a user - supplied command stream .
*
* @ res : Pointer to the shader resource .
* @ state : Indicating whether a creation or removal has been committed .
*
*/
static void vmw_dx_shader_commit_notify ( struct vmw_resource * res ,
enum vmw_cmdbuf_res_state state )
{
struct vmw_dx_shader * shader = vmw_res_to_dx_shader ( res ) ;
struct vmw_private * dev_priv = res - > dev_priv ;
if ( state = = VMW_CMDBUF_RES_ADD ) {
mutex_lock ( & dev_priv - > binding_mutex ) ;
vmw_cotable_add_resource ( shader - > cotable ,
& shader - > cotable_head ) ;
shader - > committed = true ;
res - > id = shader - > id ;
mutex_unlock ( & dev_priv - > binding_mutex ) ;
} else {
mutex_lock ( & dev_priv - > binding_mutex ) ;
list_del_init ( & shader - > cotable_head ) ;
shader - > committed = false ;
res - > id = - 1 ;
mutex_unlock ( & dev_priv - > binding_mutex ) ;
}
}
/**
* vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader .
*
* @ res : The shader resource
*
* This function reverts a scrub operation .
*/
static int vmw_dx_shader_unscrub ( struct vmw_resource * res )
{
struct vmw_dx_shader * shader = vmw_res_to_dx_shader ( res ) ;
struct vmw_private * dev_priv = res - > dev_priv ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXBindShader body ;
} * cmd ;
if ( ! list_empty ( & shader - > cotable_head ) | | ! shader - > committed )
return 0 ;
cmd = vmw_fifo_reserve_dx ( dev_priv , sizeof ( * cmd ) ,
shader - > ctx - > id ) ;
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Failed reserving FIFO space for shader "
" scrubbing. \n " ) ;
return - ENOMEM ;
}
cmd - > header . id = SVGA_3D_CMD_DX_BIND_SHADER ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . cid = shader - > ctx - > id ;
cmd - > body . shid = shader - > id ;
cmd - > body . mobid = res - > backup - > base . mem . start ;
cmd - > body . offsetInBytes = res - > backup_offset ;
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
vmw_cotable_add_resource ( shader - > cotable , & shader - > cotable_head ) ;
return 0 ;
}
/**
* vmw_dx_shader_create - The DX shader create callback
*
* @ res : The DX shader resource
*
* The create callback is called as part of resource validation and
* makes sure that we unscrub the shader if it ' s previously been scrubbed .
*/
static int vmw_dx_shader_create ( struct vmw_resource * res )
{
struct vmw_private * dev_priv = res - > dev_priv ;
struct vmw_dx_shader * shader = vmw_res_to_dx_shader ( res ) ;
int ret = 0 ;
WARN_ON_ONCE ( ! shader - > committed ) ;
if ( ! list_empty ( & res - > mob_head ) ) {
mutex_lock ( & dev_priv - > binding_mutex ) ;
ret = vmw_dx_shader_unscrub ( res ) ;
mutex_unlock ( & dev_priv - > binding_mutex ) ;
}
res - > id = shader - > id ;
return ret ;
}
/**
* vmw_dx_shader_bind - The DX shader bind callback
*
* @ res : The DX shader resource
* @ val_buf : Pointer to the validate buffer .
*
*/
static int vmw_dx_shader_bind ( struct vmw_resource * res ,
struct ttm_validate_buffer * val_buf )
{
struct vmw_private * dev_priv = res - > dev_priv ;
struct ttm_buffer_object * bo = val_buf - > bo ;
BUG_ON ( bo - > mem . mem_type ! = VMW_PL_MOB ) ;
mutex_lock ( & dev_priv - > binding_mutex ) ;
vmw_dx_shader_unscrub ( res ) ;
mutex_unlock ( & dev_priv - > binding_mutex ) ;
return 0 ;
}
/**
* vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader .
*
* @ res : The shader resource
*
* This function unbinds a MOB from the DX shader without requiring the
* MOB dma_buffer to be reserved . The driver still considers the MOB bound .
* However , once the driver eventually decides to unbind the MOB , it doesn ' t
* need to access the context .
*/
static int vmw_dx_shader_scrub ( struct vmw_resource * res )
{
struct vmw_dx_shader * shader = vmw_res_to_dx_shader ( res ) ;
struct vmw_private * dev_priv = res - > dev_priv ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXBindShader body ;
} * cmd ;
if ( list_empty ( & shader - > cotable_head ) )
return 0 ;
WARN_ON_ONCE ( ! shader - > committed ) ;
cmd = vmw_fifo_reserve ( dev_priv , sizeof ( * cmd ) ) ;
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Failed reserving FIFO space for shader "
" scrubbing. \n " ) ;
return - ENOMEM ;
}
cmd - > header . id = SVGA_3D_CMD_DX_BIND_SHADER ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . cid = shader - > ctx - > id ;
cmd - > body . shid = res - > id ;
cmd - > body . mobid = SVGA3D_INVALID_ID ;
cmd - > body . offsetInBytes = 0 ;
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
res - > id = - 1 ;
list_del_init ( & shader - > cotable_head ) ;
return 0 ;
}
/**
* vmw_dx_shader_unbind - The dx shader unbind callback .
*
* @ res : The shader resource
* @ readback : Whether this is a readback unbind . Currently unused .
* @ val_buf : MOB buffer information .
*/
static int vmw_dx_shader_unbind ( struct vmw_resource * res ,
bool readback ,
struct ttm_validate_buffer * val_buf )
{
struct vmw_private * dev_priv = res - > dev_priv ;
struct vmw_fence_obj * fence ;
int ret ;
BUG_ON ( res - > backup - > base . mem . mem_type ! = VMW_PL_MOB ) ;
mutex_lock ( & dev_priv - > binding_mutex ) ;
ret = vmw_dx_shader_scrub ( res ) ;
mutex_unlock ( & dev_priv - > binding_mutex ) ;
if ( ret )
return ret ;
( void ) vmw_execbuf_fence_commands ( NULL , dev_priv ,
& fence , NULL ) ;
vmw_fence_single_bo ( val_buf - > bo , fence ) ;
if ( likely ( fence ! = NULL ) )
vmw_fence_obj_unreference ( & fence ) ;
return 0 ;
}
/**
* vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for
* DX shaders .
*
* @ dev_priv : Pointer to device private structure .
* @ list : The list of cotable resources .
* @ readback : Whether the call was part of a readback unbind .
*
* Scrubs all shader MOBs so that any subsequent shader unbind or shader
* destroy operation won ' t need to swap in the context .
*/
void vmw_dx_shader_cotable_list_scrub ( struct vmw_private * dev_priv ,
struct list_head * list ,
bool readback )
{
struct vmw_dx_shader * entry , * next ;
WARN_ON_ONCE ( ! mutex_is_locked ( & dev_priv - > binding_mutex ) ) ;
list_for_each_entry_safe ( entry , next , list , cotable_head ) {
WARN_ON ( vmw_dx_shader_scrub ( & entry - > res ) ) ;
if ( ! readback )
entry - > committed = false ;
}
}
/**
* vmw_dx_shader_res_free - The DX shader free callback
*
* @ res : The shader resource
*
* Frees the DX shader resource and updates memory accounting .
*/
static void vmw_dx_shader_res_free ( struct vmw_resource * res )
{
struct vmw_private * dev_priv = res - > dev_priv ;
struct vmw_dx_shader * shader = vmw_res_to_dx_shader ( res ) ;
vmw_resource_unreference ( & shader - > cotable ) ;
kfree ( shader ) ;
ttm_mem_global_free ( vmw_mem_glob ( dev_priv ) , vmw_shader_dx_size ) ;
}
/**
* vmw_dx_shader_add - Add a shader resource as a command buffer managed
* resource .
*
* @ man : The command buffer resource manager .
* @ ctx : Pointer to the context resource .
* @ user_key : The id used for this shader .
* @ shader_type : The shader type .
* @ list : The list of staged command buffer managed resources .
*/
int vmw_dx_shader_add ( struct vmw_cmdbuf_res_manager * man ,
struct vmw_resource * ctx ,
u32 user_key ,
SVGA3dShaderType shader_type ,
struct list_head * list )
{
struct vmw_dx_shader * shader ;
struct vmw_resource * res ;
struct vmw_private * dev_priv = ctx - > dev_priv ;
int ret ;
if ( ! vmw_shader_dx_size )
vmw_shader_dx_size = ttm_round_pot ( sizeof ( * shader ) ) ;
if ( ! vmw_shader_id_ok ( user_key , shader_type ) )
return - EINVAL ;
ret = ttm_mem_global_alloc ( vmw_mem_glob ( dev_priv ) , vmw_shader_dx_size ,
false , true ) ;
if ( ret ) {
if ( ret ! = - ERESTARTSYS )
DRM_ERROR ( " Out of graphics memory for shader "
" creation. \n " ) ;
return ret ;
}
shader = kmalloc ( sizeof ( * shader ) , GFP_KERNEL ) ;
if ( ! shader ) {
ttm_mem_global_free ( vmw_mem_glob ( dev_priv ) , vmw_shader_dx_size ) ;
return - ENOMEM ;
}
res = & shader - > res ;
shader - > ctx = ctx ;
shader - > cotable = vmw_context_cotable ( ctx , SVGA_COTABLE_DXSHADER ) ;
shader - > id = user_key ;
shader - > committed = false ;
INIT_LIST_HEAD ( & shader - > cotable_head ) ;
ret = vmw_resource_init ( dev_priv , res , true ,
vmw_dx_shader_res_free , & vmw_dx_shader_func ) ;
if ( ret )
goto out_resource_init ;
/*
* The user_key name - space is not per shader type for DX shaders ,
* so when hashing , use a single zero shader type .
*/
ret = vmw_cmdbuf_res_add ( man , vmw_cmdbuf_res_shader ,
vmw_shader_key ( user_key , 0 ) ,
res , list ) ;
if ( ret )
goto out_resource_init ;
res - > id = shader - > id ;
vmw_resource_activate ( res , vmw_hw_shader_destroy ) ;
out_resource_init :
vmw_resource_unreference ( & res ) ;
return ret ;
}
2012-11-21 15:10:26 +04:00
/**
* User - space shader management :
*/
static struct vmw_resource *
vmw_user_shader_base_to_res ( struct ttm_base_object * base )
{
return & ( container_of ( base , struct vmw_user_shader , base ) - >
shader . res ) ;
}
static void vmw_user_shader_free ( struct vmw_resource * res )
{
struct vmw_user_shader * ushader =
container_of ( res , struct vmw_user_shader , shader . res ) ;
struct vmw_private * dev_priv = res - > dev_priv ;
ttm_base_object_kfree ( ushader , base ) ;
ttm_mem_global_free ( vmw_mem_glob ( dev_priv ) ,
vmw_user_shader_size ) ;
}
2014-06-09 14:39:22 +04:00
static void vmw_shader_free ( struct vmw_resource * res )
{
struct vmw_shader * shader = vmw_res_to_shader ( res ) ;
struct vmw_private * dev_priv = res - > dev_priv ;
kfree ( shader ) ;
ttm_mem_global_free ( vmw_mem_glob ( dev_priv ) ,
vmw_shader_size ) ;
}
2012-11-21 15:10:26 +04:00
/**
* This function is called when user space has no more references on the
* base object . It releases the base - object ' s reference on the resource object .
*/
static void vmw_user_shader_base_release ( struct ttm_base_object * * p_base )
{
struct ttm_base_object * base = * p_base ;
struct vmw_resource * res = vmw_user_shader_base_to_res ( base ) ;
* p_base = NULL ;
vmw_resource_unreference ( & res ) ;
}
int vmw_shader_destroy_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_vmw_shader_arg * arg = ( struct drm_vmw_shader_arg * ) data ;
struct ttm_object_file * tfile = vmw_fpriv ( file_priv ) - > tfile ;
return ttm_ref_object_base_unref ( tfile , arg - > handle ,
TTM_REF_USAGE ) ;
}
2014-06-09 14:39:22 +04:00
static int vmw_user_shader_alloc ( struct vmw_private * dev_priv ,
struct vmw_dma_buffer * buffer ,
size_t shader_size ,
size_t offset ,
SVGA3dShaderType shader_type ,
2015-08-10 20:39:35 +03:00
uint8_t num_input_sig ,
uint8_t num_output_sig ,
2014-06-09 14:39:22 +04:00
struct ttm_object_file * tfile ,
u32 * handle )
2014-01-31 13:12:10 +04:00
{
struct vmw_user_shader * ushader ;
struct vmw_resource * res , * tmp ;
int ret ;
/*
* Approximate idr memory usage with 128 bytes . It will be limited
* by maximum number_of shaders anyway .
*/
if ( unlikely ( vmw_user_shader_size = = 0 ) )
vmw_user_shader_size =
ttm_round_pot ( sizeof ( struct vmw_user_shader ) ) + 128 ;
ret = ttm_mem_global_alloc ( vmw_mem_glob ( dev_priv ) ,
vmw_user_shader_size ,
false , true ) ;
if ( unlikely ( ret ! = 0 ) ) {
if ( ret ! = - ERESTARTSYS )
DRM_ERROR ( " Out of graphics memory for shader "
" creation. \n " ) ;
goto out ;
}
ushader = kzalloc ( sizeof ( * ushader ) , GFP_KERNEL ) ;
if ( unlikely ( ushader = = NULL ) ) {
ttm_mem_global_free ( vmw_mem_glob ( dev_priv ) ,
vmw_user_shader_size ) ;
ret = - ENOMEM ;
goto out ;
}
res = & ushader - > shader . res ;
ushader - > base . shareable = false ;
ushader - > base . tfile = NULL ;
/*
* From here on , the destructor takes over resource freeing .
*/
ret = vmw_gb_shader_init ( dev_priv , res , shader_size ,
2015-08-10 20:39:35 +03:00
offset , shader_type , num_input_sig ,
num_output_sig , buffer ,
2014-01-31 13:12:10 +04:00
vmw_user_shader_free ) ;
if ( unlikely ( ret ! = 0 ) )
goto out ;
tmp = vmw_resource_reference ( res ) ;
ret = ttm_base_object_init ( tfile , & ushader - > base , false ,
VMW_RES_SHADER ,
& vmw_user_shader_base_release , NULL ) ;
if ( unlikely ( ret ! = 0 ) ) {
vmw_resource_unreference ( & tmp ) ;
goto out_err ;
}
if ( handle )
* handle = ushader - > base . hash . key ;
out_err :
vmw_resource_unreference ( & res ) ;
out :
return ret ;
}
2015-04-02 12:39:45 +03:00
static struct vmw_resource * vmw_shader_alloc ( struct vmw_private * dev_priv ,
struct vmw_dma_buffer * buffer ,
size_t shader_size ,
size_t offset ,
SVGA3dShaderType shader_type )
2014-06-09 14:39:22 +04:00
{
struct vmw_shader * shader ;
struct vmw_resource * res ;
int ret ;
/*
* Approximate idr memory usage with 128 bytes . It will be limited
* by maximum number_of shaders anyway .
*/
if ( unlikely ( vmw_shader_size = = 0 ) )
vmw_shader_size =
ttm_round_pot ( sizeof ( struct vmw_shader ) ) + 128 ;
ret = ttm_mem_global_alloc ( vmw_mem_glob ( dev_priv ) ,
vmw_shader_size ,
false , true ) ;
if ( unlikely ( ret ! = 0 ) ) {
if ( ret ! = - ERESTARTSYS )
DRM_ERROR ( " Out of graphics memory for shader "
" creation. \n " ) ;
goto out_err ;
}
shader = kzalloc ( sizeof ( * shader ) , GFP_KERNEL ) ;
if ( unlikely ( shader = = NULL ) ) {
ttm_mem_global_free ( vmw_mem_glob ( dev_priv ) ,
vmw_shader_size ) ;
ret = - ENOMEM ;
goto out_err ;
}
res = & shader - > res ;
/*
* From here on , the destructor takes over resource freeing .
*/
ret = vmw_gb_shader_init ( dev_priv , res , shader_size ,
2015-08-10 20:39:35 +03:00
offset , shader_type , 0 , 0 , buffer ,
2014-06-09 14:39:22 +04:00
vmw_shader_free ) ;
out_err :
return ret ? ERR_PTR ( ret ) : res ;
}
2015-08-10 20:39:35 +03:00
static int vmw_shader_define ( struct drm_device * dev , struct drm_file * file_priv ,
enum drm_vmw_shader_type shader_type_drm ,
u32 buffer_handle , size_t size , size_t offset ,
uint8_t num_input_sig , uint8_t num_output_sig ,
uint32_t * shader_handle )
2012-11-21 15:10:26 +04:00
{
struct vmw_private * dev_priv = vmw_priv ( dev ) ;
struct ttm_object_file * tfile = vmw_fpriv ( file_priv ) - > tfile ;
struct vmw_dma_buffer * buffer = NULL ;
SVGA3dShaderType shader_type ;
int ret ;
2015-08-10 20:39:35 +03:00
if ( buffer_handle ! = SVGA3D_INVALID_ID ) {
ret = vmw_user_dmabuf_lookup ( tfile , buffer_handle ,
2015-09-14 11:13:11 +03:00
& buffer , NULL ) ;
2012-11-21 15:10:26 +04:00
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Could not find buffer for shader "
" creation. \n " ) ;
return ret ;
}
if ( ( u64 ) buffer - > base . num_pages * PAGE_SIZE <
2015-08-10 20:39:35 +03:00
( u64 ) size + ( u64 ) offset ) {
2012-11-21 15:10:26 +04:00
DRM_ERROR ( " Illegal buffer- or shader size. \n " ) ;
ret = - EINVAL ;
goto out_bad_arg ;
}
}
2015-08-10 20:39:35 +03:00
switch ( shader_type_drm ) {
2012-11-21 15:10:26 +04:00
case drm_vmw_shader_type_vs :
shader_type = SVGA3D_SHADERTYPE_VS ;
break ;
case drm_vmw_shader_type_ps :
shader_type = SVGA3D_SHADERTYPE_PS ;
break ;
default :
DRM_ERROR ( " Illegal shader type. \n " ) ;
ret = - EINVAL ;
goto out_bad_arg ;
}
2014-02-27 15:34:51 +04:00
ret = ttm_read_lock ( & dev_priv - > reservation_sem , true ) ;
2014-01-31 13:12:10 +04:00
if ( unlikely ( ret ! = 0 ) )
goto out_bad_arg ;
2012-11-21 15:10:26 +04:00
2015-08-10 20:39:35 +03:00
ret = vmw_user_shader_alloc ( dev_priv , buffer , size , offset ,
shader_type , num_input_sig ,
num_output_sig , tfile , shader_handle ) ;
2012-11-21 15:10:26 +04:00
2014-02-27 15:34:51 +04:00
ttm_read_unlock ( & dev_priv - > reservation_sem ) ;
2014-01-31 13:12:10 +04:00
out_bad_arg :
vmw_dmabuf_unreference ( & buffer ) ;
return ret ;
}
/**
2015-08-10 20:39:35 +03:00
* vmw_shader_id_ok - Check whether a compat shader user key and
2014-06-09 14:39:22 +04:00
* shader type are within valid bounds .
2014-01-31 13:12:10 +04:00
*
2014-06-09 14:39:22 +04:00
* @ user_key : User space id of the shader .
* @ shader_type : Shader type .
2014-01-31 13:12:10 +04:00
*
2014-06-09 14:39:22 +04:00
* Returns true if valid false if not .
2014-01-31 13:12:10 +04:00
*/
2015-08-10 20:39:35 +03:00
static bool vmw_shader_id_ok ( u32 user_key , SVGA3dShaderType shader_type )
2014-01-31 13:12:10 +04:00
{
2014-06-09 14:39:22 +04:00
return user_key < = ( ( 1 < < 20 ) - 1 ) & & ( unsigned ) shader_type < 16 ;
2014-01-31 13:12:10 +04:00
}
2012-11-21 15:10:26 +04:00
2014-01-31 13:12:10 +04:00
/**
2015-08-10 20:39:35 +03:00
* vmw_shader_key - Compute a hash key suitable for a compat shader .
2014-01-31 13:12:10 +04:00
*
2014-06-09 14:39:22 +04:00
* @ user_key : User space id of the shader .
* @ shader_type : Shader type .
2014-01-31 13:12:10 +04:00
*
2014-06-09 14:39:22 +04:00
* Returns a hash key suitable for a command buffer managed resource
* manager hash table .
2014-01-31 13:12:10 +04:00
*/
2015-08-10 20:39:35 +03:00
static u32 vmw_shader_key ( u32 user_key , SVGA3dShaderType shader_type )
2014-01-31 13:12:10 +04:00
{
2014-06-09 14:39:22 +04:00
return user_key | ( shader_type < < 20 ) ;
2014-01-31 13:12:10 +04:00
}
2012-11-21 15:10:26 +04:00
2014-01-31 13:12:10 +04:00
/**
2015-08-10 20:39:35 +03:00
* vmw_shader_remove - Stage a compat shader for removal .
2014-01-31 13:12:10 +04:00
*
2014-06-09 14:39:22 +04:00
* @ man : Pointer to the compat shader manager identifying the shader namespace .
2014-01-31 13:12:10 +04:00
* @ user_key : The key that is used to identify the shader . The key is
* unique to the shader type .
* @ shader_type : Shader type .
2014-06-09 14:39:22 +04:00
* @ list : Caller ' s list of staged command buffer resource actions .
2014-01-31 13:12:10 +04:00
*/
2015-08-10 20:39:35 +03:00
int vmw_shader_remove ( struct vmw_cmdbuf_res_manager * man ,
u32 user_key , SVGA3dShaderType shader_type ,
struct list_head * list )
2014-01-31 13:12:10 +04:00
{
2015-08-10 20:39:35 +03:00
struct vmw_resource * dummy ;
if ( ! vmw_shader_id_ok ( user_key , shader_type ) )
2014-01-31 13:12:10 +04:00
return - EINVAL ;
2012-11-21 15:10:26 +04:00
2015-08-10 20:39:35 +03:00
return vmw_cmdbuf_res_remove ( man , vmw_cmdbuf_res_shader ,
vmw_shader_key ( user_key , shader_type ) ,
list , & dummy ) ;
2014-01-31 13:12:10 +04:00
}
/**
2014-06-09 14:39:22 +04:00
* vmw_compat_shader_add - Create a compat shader and stage it for addition
* as a command buffer managed resource .
2014-01-31 13:12:10 +04:00
*
2014-06-09 14:39:22 +04:00
* @ man : Pointer to the compat shader manager identifying the shader namespace .
2014-01-31 13:12:10 +04:00
* @ user_key : The key that is used to identify the shader . The key is
* unique to the shader type .
* @ bytecode : Pointer to the bytecode of the shader .
* @ shader_type : Shader type .
* @ tfile : Pointer to a struct ttm_object_file that the guest - backed shader is
* to be created with .
2014-06-09 14:39:22 +04:00
* @ list : Caller ' s list of staged command buffer resource actions .
2014-01-31 13:12:10 +04:00
*
*/
2014-06-09 14:39:22 +04:00
int vmw_compat_shader_add ( struct vmw_private * dev_priv ,
struct vmw_cmdbuf_res_manager * man ,
2014-01-31 13:12:10 +04:00
u32 user_key , const void * bytecode ,
SVGA3dShaderType shader_type ,
size_t size ,
struct list_head * list )
{
struct vmw_dma_buffer * buf ;
struct ttm_bo_kmap_obj map ;
bool is_iomem ;
int ret ;
2014-06-09 14:39:22 +04:00
struct vmw_resource * res ;
2014-01-31 13:12:10 +04:00
2015-08-10 20:39:35 +03:00
if ( ! vmw_shader_id_ok ( user_key , shader_type ) )
2014-01-31 13:12:10 +04:00
return - EINVAL ;
/* Allocate and pin a DMA buffer */
buf = kzalloc ( sizeof ( * buf ) , GFP_KERNEL ) ;
if ( unlikely ( buf = = NULL ) )
return - ENOMEM ;
2014-06-09 14:39:22 +04:00
ret = vmw_dmabuf_init ( dev_priv , buf , size , & vmw_sys_ne_placement ,
2014-01-31 13:12:10 +04:00
true , vmw_dmabuf_bo_free ) ;
2012-11-21 15:10:26 +04:00
if ( unlikely ( ret ! = 0 ) )
2014-01-31 13:12:10 +04:00
goto out ;
2012-11-21 15:10:26 +04:00
2014-01-31 13:12:10 +04:00
ret = ttm_bo_reserve ( & buf - > base , false , true , false , NULL ) ;
if ( unlikely ( ret ! = 0 ) )
goto no_reserve ;
2012-11-21 15:10:26 +04:00
2014-01-31 13:12:10 +04:00
/* Map and copy shader bytecode. */
ret = ttm_bo_kmap ( & buf - > base , 0 , PAGE_ALIGN ( size ) > > PAGE_SHIFT ,
& map ) ;
2012-11-21 15:10:26 +04:00
if ( unlikely ( ret ! = 0 ) ) {
2014-01-31 13:12:10 +04:00
ttm_bo_unreserve ( & buf - > base ) ;
goto no_reserve ;
2012-11-21 15:10:26 +04:00
}
2014-01-31 13:12:10 +04:00
memcpy ( ttm_kmap_obj_virtual ( & map , & is_iomem ) , bytecode , size ) ;
WARN_ON ( is_iomem ) ;
ttm_bo_kunmap ( & map ) ;
ret = ttm_bo_validate ( & buf - > base , & vmw_sys_placement , false , true ) ;
WARN_ON ( ret ! = 0 ) ;
ttm_bo_unreserve ( & buf - > base ) ;
2014-06-09 14:39:22 +04:00
res = vmw_shader_alloc ( dev_priv , buf , size , 0 , shader_type ) ;
2014-01-31 13:12:10 +04:00
if ( unlikely ( ret ! = 0 ) )
goto no_reserve ;
2015-08-10 20:39:35 +03:00
ret = vmw_cmdbuf_res_add ( man , vmw_cmdbuf_res_shader ,
vmw_shader_key ( user_key , shader_type ) ,
2014-06-09 14:39:22 +04:00
res , list ) ;
vmw_resource_unreference ( & res ) ;
2014-01-31 13:12:10 +04:00
no_reserve :
2014-06-09 14:39:22 +04:00
vmw_dmabuf_unreference ( & buf ) ;
2014-01-31 13:12:10 +04:00
out :
2012-11-21 15:10:26 +04:00
return ret ;
2014-01-31 13:12:10 +04:00
}
/**
2015-08-10 20:39:35 +03:00
* vmw_shader_lookup - Look up a compat shader
2014-01-31 13:12:10 +04:00
*
2014-06-09 14:39:22 +04:00
* @ man : Pointer to the command buffer managed resource manager identifying
* the shader namespace .
* @ user_key : The user space id of the shader .
* @ shader_type : The shader type .
2014-01-31 13:12:10 +04:00
*
2014-06-09 14:39:22 +04:00
* Returns a refcounted pointer to a struct vmw_resource if the shader was
* found . An error pointer otherwise .
2014-01-31 13:12:10 +04:00
*/
2014-06-09 14:39:22 +04:00
struct vmw_resource *
2015-08-10 20:39:35 +03:00
vmw_shader_lookup ( struct vmw_cmdbuf_res_manager * man ,
u32 user_key ,
SVGA3dShaderType shader_type )
2014-01-31 13:12:10 +04:00
{
2015-08-10 20:39:35 +03:00
if ( ! vmw_shader_id_ok ( user_key , shader_type ) )
2014-06-09 14:39:22 +04:00
return ERR_PTR ( - EINVAL ) ;
2012-11-21 15:10:26 +04:00
2015-08-10 20:39:35 +03:00
return vmw_cmdbuf_res_lookup ( man , vmw_cmdbuf_res_shader ,
vmw_shader_key ( user_key , shader_type ) ) ;
}
int vmw_shader_define_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_vmw_shader_create_arg * arg =
( struct drm_vmw_shader_create_arg * ) data ;
return vmw_shader_define ( dev , file_priv , arg - > shader_type ,
arg - > buffer_handle ,
arg - > size , arg - > offset ,
0 , 0 ,
& arg - > shader_handle ) ;
2012-11-21 15:10:26 +04:00
}