2018-05-07 01:16:26 +02:00
// SPDX-License-Identifier: GPL-2.0 OR MIT
2009-12-10 00:19:58 +00:00
/**************************************************************************
*
2018-05-07 01:16:26 +02:00
* Copyright 2009 - 2015 VMware , Inc . , Palo Alto , CA . , USA
2009-12-10 00:19:58 +00:00
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2017-07-05 01:45:40 -07:00
# include <linux/sync_file.h>
2009-12-10 00:19:58 +00:00
# include "vmwgfx_drv.h"
# include "vmwgfx_reg.h"
2012-10-02 18:01:07 +01:00
# include <drm/ttm/ttm_bo_api.h>
# include <drm/ttm/ttm_placement.h>
2015-08-10 10:39:35 -07:00
# include "vmwgfx_so.h"
# include "vmwgfx_binding.h"
2021-06-09 13:23:00 -04:00
# include "vmwgfx_mksstat.h"
2009-12-10 00:19:58 +00:00
2012-11-20 12:19:35 +00:00
# define VMW_RES_HT_ORDER 12
2019-02-08 12:53:57 -08:00
/*
* Helper macro to get dx_ctx_node if available otherwise print an error
* message . This is for use in command verifier function where if dx_ctx_node
* is not set then command is invalid .
*/
# define VMW_GET_CTX_NODE(__sw_context) \
( { \
__sw_context - > dx_ctx_node ? __sw_context - > dx_ctx_node : ( { \
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " SM context is not set at %s \n " , __func__ ) ; \
2019-02-08 12:53:57 -08:00
__sw_context - > dx_ctx_node ; \
} ) ; \
} )
2019-02-08 15:50:40 -08:00
# define VMW_DECLARE_CMD_VAR(__var, __type) \
struct { \
SVGA3dCmdHeader header ; \
__type body ; \
} __var
2019-02-13 13:20:42 -08:00
/**
2018-09-26 15:36:52 +02:00
* struct vmw_relocation - Buffer object relocation
*
* @ head : List head for the command submission context ' s relocation list
2018-09-26 15:38:13 +02:00
* @ vbo : Non ref - counted pointer to buffer object
2018-09-26 15:36:52 +02:00
* @ mob_loc : Pointer to location for mob id to be modified
* @ location : Pointer to location for guest pointer to be modified
*/
struct vmw_relocation {
struct list_head head ;
struct vmw_buffer_object * vbo ;
2018-09-26 15:38:13 +02:00
union {
SVGAMobId * mob_loc ;
SVGAGuestPtr * location ;
} ;
2018-09-26 15:36:52 +02:00
} ;
2016-10-10 11:06:45 -07:00
/**
* enum vmw_resource_relocation_type - Relocation type for resources
*
* @ vmw_res_rel_normal : Traditional relocation . The resource id in the
* command stream is replaced with the actual id after validation .
* @ vmw_res_rel_nop : NOP relocation . The command is unconditionally replaced
* with a NOP .
2019-02-13 13:20:42 -08:00
* @ vmw_res_rel_cond_nop : Conditional NOP relocation . If the resource id after
* validation is - 1 , the command is replaced with a NOP . Otherwise no action .
2021-01-15 18:12:36 +00:00
* @ vmw_res_rel_max : Last value in the enum - used for error checking
*/
2016-10-10 11:06:45 -07:00
enum vmw_resource_relocation_type {
vmw_res_rel_normal ,
vmw_res_rel_nop ,
vmw_res_rel_cond_nop ,
vmw_res_rel_max
} ;
2012-11-20 12:19:35 +00:00
/**
* struct vmw_resource_relocation - Relocation info for resources
*
* @ head : List head for the software context ' s relocation list .
* @ res : Non - ref - counted pointer to the resource .
2019-02-13 13:20:42 -08:00
* @ offset : Offset of single byte entries into the command buffer where the id
* that needs fixup is located .
2016-10-10 11:06:45 -07:00
* @ rel_type : Type of relocation .
2012-11-20 12:19:35 +00:00
*/
struct vmw_resource_relocation {
struct list_head head ;
const struct vmw_resource * res ;
2016-10-10 11:06:45 -07:00
u32 offset : 29 ;
enum vmw_resource_relocation_type rel_type : 3 ;
2012-11-20 12:19:35 +00:00
} ;
2019-02-13 13:20:42 -08:00
/**
2018-09-26 15:28:55 +02:00
* struct vmw_ctx_validation_info - Extra validation metadata for contexts
2019-02-13 13:20:42 -08:00
*
2018-09-26 15:28:55 +02:00
* @ head : List head of context list
* @ ctx : The context resource
* @ cur : The context ' s persistent binding state
* @ staged : The binding state changes of this command buffer
2012-11-20 12:19:35 +00:00
*/
2018-09-26 15:28:55 +02:00
struct vmw_ctx_validation_info {
2012-11-20 12:19:35 +00:00
struct list_head head ;
2018-09-26 15:28:55 +02:00
struct vmw_resource * ctx ;
struct vmw_ctx_binding_state * cur ;
struct vmw_ctx_binding_state * staged ;
2012-11-20 12:19:35 +00:00
} ;
2012-11-21 12:22:35 +01:00
/**
* struct vmw_cmd_entry - Describe a command for the verifier
*
2021-01-15 18:12:36 +00:00
* @ func : Call - back to handle the command .
2012-11-21 12:22:35 +01:00
* @ user_allow : Whether allowed from the execbuf ioctl .
* @ gb_disable : Whether disabled if guest - backed objects are available .
* @ gb_enable : Whether enabled iff guest - backed objects are available .
2021-01-15 18:12:36 +00:00
* @ cmd_name : Name of the command .
2012-11-21 12:22:35 +01:00
*/
struct vmw_cmd_entry {
int ( * func ) ( struct vmw_private * , struct vmw_sw_context * ,
SVGA3dCmdHeader * ) ;
bool user_allow ;
bool gb_disable ;
bool gb_enable ;
2017-08-24 08:06:29 +02:00
const char * cmd_name ;
2012-11-21 12:22:35 +01:00
} ;
# define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
[ ( _cmd ) - SVGA_3D_CMD_BASE ] = { ( _func ) , ( _user_allow ) , \
2017-08-24 08:06:29 +02:00
( _gb_disable ) , ( _gb_enable ) , # _cmd }
2012-11-21 12:22:35 +01:00
2015-08-10 10:39:35 -07:00
static int vmw_resource_context_res_add ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
struct vmw_resource * ctx ) ;
2015-08-10 10:56:15 -07:00
static int vmw_translate_mob_ptr ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGAMobId * id ,
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * * vmw_bo_p ) ;
2016-10-10 10:44:00 -07:00
/**
* vmw_ptr_diff - Compute the offset from a to b in bytes
*
* @ a : A starting pointer .
* @ b : A pointer offset in the same address space .
*
* Returns : The offset in bytes between the two pointers .
*/
static size_t vmw_ptr_diff ( void * a , void * b )
{
return ( unsigned long ) b - ( unsigned long ) a ;
}
2015-08-10 10:39:35 -07:00
2012-11-20 12:19:35 +00:00
/**
2018-09-26 15:28:55 +02:00
* vmw_execbuf_bindings_commit - Commit modified binding state
2019-02-13 13:20:42 -08:00
*
2018-09-26 15:28:55 +02:00
* @ sw_context : The command submission context
2019-02-13 13:20:42 -08:00
* @ backoff : Whether this is part of the error path and binding state changes
* should be ignored
2012-11-20 12:19:35 +00:00
*/
2018-09-26 15:28:55 +02:00
static void vmw_execbuf_bindings_commit ( struct vmw_sw_context * sw_context ,
bool backoff )
2012-11-20 12:19:35 +00:00
{
2018-09-26 15:36:52 +02:00
struct vmw_ctx_validation_info * entry ;
2012-11-20 12:19:35 +00:00
2018-09-26 15:36:52 +02:00
list_for_each_entry ( entry , & sw_context - > ctx_list , head ) {
2018-09-26 15:28:55 +02:00
if ( ! backoff )
vmw_binding_state_commit ( entry - > cur , entry - > staged ) ;
2019-02-13 13:20:42 -08:00
2018-09-26 15:28:55 +02:00
if ( entry - > staged ! = sw_context - > staged_bindings )
vmw_binding_state_free ( entry - > staged ) ;
else
sw_context - > staged_bindings_inuse = false ;
2012-11-20 12:19:35 +00:00
}
2018-09-26 15:36:52 +02:00
/* List entries are freed with the validation context */
INIT_LIST_HEAD ( & sw_context - > ctx_list ) ;
2012-11-20 12:19:35 +00:00
}
2018-09-26 15:28:55 +02:00
/**
* vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
2019-02-13 13:20:42 -08:00
*
2018-09-26 15:28:55 +02:00
* @ sw_context : The command submission context
*/
static void vmw_bind_dx_query_mob ( struct vmw_sw_context * sw_context )
{
if ( sw_context - > dx_query_mob )
vmw_context_bind_dx_query ( sw_context - > dx_query_ctx ,
sw_context - > dx_query_mob ) ;
}
2015-08-10 10:39:35 -07:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
* the validate list .
2015-08-10 10:39:35 -07:00
*
* @ dev_priv : Pointer to the device private :
2018-09-26 15:28:55 +02:00
* @ sw_context : The command submission context
2021-01-15 18:12:36 +00:00
* @ res : Pointer to the resource
2018-09-26 15:28:55 +02:00
* @ node : The validation node holding the context resource metadata
2015-08-10 10:39:35 -07:00
*/
static int vmw_cmd_ctx_first_setup ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
2018-09-26 15:28:55 +02:00
struct vmw_resource * res ,
struct vmw_ctx_validation_info * node )
2015-08-10 10:39:35 -07:00
{
int ret ;
2018-09-26 15:28:55 +02:00
ret = vmw_resource_context_res_add ( dev_priv , sw_context , res ) ;
2015-08-10 10:39:35 -07:00
if ( unlikely ( ret ! = 0 ) )
goto out_err ;
if ( ! sw_context - > staged_bindings ) {
2019-02-13 13:20:42 -08:00
sw_context - > staged_bindings = vmw_binding_state_alloc ( dev_priv ) ;
2015-08-10 10:39:35 -07:00
if ( IS_ERR ( sw_context - > staged_bindings ) ) {
ret = PTR_ERR ( sw_context - > staged_bindings ) ;
sw_context - > staged_bindings = NULL ;
goto out_err ;
}
}
if ( sw_context - > staged_bindings_inuse ) {
2018-09-26 15:28:55 +02:00
node - > staged = vmw_binding_state_alloc ( dev_priv ) ;
if ( IS_ERR ( node - > staged ) ) {
ret = PTR_ERR ( node - > staged ) ;
node - > staged = NULL ;
2015-08-10 10:39:35 -07:00
goto out_err ;
}
} else {
2018-09-26 15:28:55 +02:00
node - > staged = sw_context - > staged_bindings ;
2015-08-10 10:39:35 -07:00
sw_context - > staged_bindings_inuse = true ;
}
2018-09-26 15:28:55 +02:00
node - > ctx = res ;
node - > cur = vmw_context_binding_state ( res ) ;
list_add_tail ( & node - > head , & sw_context - > ctx_list ) ;
2015-08-10 10:39:35 -07:00
return 0 ;
2019-02-13 13:20:42 -08:00
2015-08-10 10:39:35 -07:00
out_err :
return ret ;
}
2012-11-20 12:19:35 +00:00
/**
2019-02-13 13:20:42 -08:00
* vmw_execbuf_res_size - calculate extra size fore the resource validation node
*
2018-09-26 16:32:40 +02:00
* @ dev_priv : Pointer to the device private struct .
* @ res_type : The resource type .
2012-11-20 12:19:35 +00:00
*
2019-02-13 13:20:42 -08:00
* Guest - backed contexts and DX contexts require extra size to store execbuf
* private information in the validation node . Typically the binding manager
* associated data structures .
2018-09-26 16:32:40 +02:00
*
* Returns : The extra size requirement based on resource type .
*/
static unsigned int vmw_execbuf_res_size ( struct vmw_private * dev_priv ,
enum vmw_res_type res_type )
{
return ( res_type = = vmw_res_dx_context | |
( res_type = = vmw_res_context & & dev_priv - > has_mob ) ) ?
sizeof ( struct vmw_ctx_validation_info ) : 0 ;
}
/**
* vmw_execbuf_rcache_update - Update a resource - node cache entry
*
* @ rcache : Pointer to the entry to update .
2012-11-20 12:19:35 +00:00
* @ res : Pointer to the resource .
2019-02-13 13:20:42 -08:00
* @ private : Pointer to the execbuf - private space in the resource validation
* node .
2012-11-20 12:19:35 +00:00
*/
2018-09-26 16:32:40 +02:00
static void vmw_execbuf_rcache_update ( struct vmw_res_cache_entry * rcache ,
struct vmw_resource * res ,
void * private )
{
rcache - > res = res ;
rcache - > private = private ;
rcache - > valid = 1 ;
rcache - > valid_handle = 0 ;
}
/**
2019-02-13 13:20:42 -08:00
* vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
* rcu - protected pointer to the validation list .
*
2018-09-26 16:32:40 +02:00
* @ sw_context : Pointer to the software context .
* @ res : Unreferenced rcu - protected pointer to the resource .
2019-02-20 08:21:26 +01:00
* @ dirty : Whether to change dirty status .
2018-09-26 16:32:40 +02:00
*
2019-02-13 13:20:42 -08:00
* Returns : 0 on success . Negative error code on failure . Typical error codes
* are % - EINVAL on inconsistency and % - ESRCH if the resource was doomed .
2018-09-26 16:32:40 +02:00
*/
static int vmw_execbuf_res_noref_val_add ( struct vmw_sw_context * sw_context ,
2019-02-20 08:21:26 +01:00
struct vmw_resource * res ,
u32 dirty )
2012-11-20 12:19:35 +00:00
{
2015-08-10 10:39:35 -07:00
struct vmw_private * dev_priv = res - > dev_priv ;
2012-11-20 12:19:35 +00:00
int ret ;
2018-09-26 15:28:55 +02:00
enum vmw_res_type res_type = vmw_res_type ( res ) ;
struct vmw_res_cache_entry * rcache ;
struct vmw_ctx_validation_info * ctx_info ;
bool first_usage ;
2018-09-26 16:32:40 +02:00
unsigned int priv_size ;
2012-11-20 12:19:35 +00:00
2018-09-26 16:32:40 +02:00
rcache = & sw_context - > res_cache [ res_type ] ;
if ( likely ( rcache - > valid & & rcache - > res = = res ) ) {
2019-02-20 08:21:26 +01:00
if ( dirty )
vmw_validation_res_set_dirty ( sw_context - > ctx ,
rcache - > private , dirty ) ;
2018-09-26 16:32:40 +02:00
vmw_user_resource_noref_release ( ) ;
return 0 ;
}
2012-11-20 12:19:35 +00:00
2018-09-26 16:32:40 +02:00
priv_size = vmw_execbuf_res_size ( dev_priv , res_type ) ;
2018-09-26 15:28:55 +02:00
ret = vmw_validation_add_resource ( sw_context - > ctx , res , priv_size ,
2019-02-20 08:21:26 +01:00
dirty , ( void * * ) & ctx_info ,
& first_usage ) ;
2018-09-26 16:32:40 +02:00
vmw_user_resource_noref_release ( ) ;
2018-09-26 15:28:55 +02:00
if ( ret )
2012-11-20 12:19:35 +00:00
return ret ;
2018-09-26 15:28:55 +02:00
if ( priv_size & & first_usage ) {
ret = vmw_cmd_ctx_first_setup ( dev_priv , sw_context , res ,
ctx_info ) ;
2019-02-11 14:59:57 -08:00
if ( ret ) {
VMW_DEBUG_USER ( " Failed first usage context setup. \n " ) ;
2018-09-26 15:28:55 +02:00
return ret ;
2019-02-11 14:59:57 -08:00
}
2015-08-10 10:39:35 -07:00
}
2018-09-26 16:32:40 +02:00
vmw_execbuf_rcache_update ( rcache , res , ctx_info ) ;
return 0 ;
}
/**
* vmw_execbuf_res_noctx_val_add - Add a non - context resource to the resource
* validation list if it ' s not already on it
2019-02-13 13:20:42 -08:00
*
2018-09-26 16:32:40 +02:00
* @ sw_context : Pointer to the software context .
* @ res : Pointer to the resource .
2019-02-20 08:21:26 +01:00
* @ dirty : Whether to change dirty status .
2018-09-26 16:32:40 +02:00
*
* Returns : Zero on success . Negative error code on failure .
*/
static int vmw_execbuf_res_noctx_val_add ( struct vmw_sw_context * sw_context ,
2019-02-20 08:21:26 +01:00
struct vmw_resource * res ,
u32 dirty )
2018-09-26 16:32:40 +02:00
{
struct vmw_res_cache_entry * rcache ;
enum vmw_res_type res_type = vmw_res_type ( res ) ;
void * ptr ;
int ret ;
2018-09-26 15:28:55 +02:00
rcache = & sw_context - > res_cache [ res_type ] ;
2019-02-20 08:21:26 +01:00
if ( likely ( rcache - > valid & & rcache - > res = = res ) ) {
if ( dirty )
vmw_validation_res_set_dirty ( sw_context - > ctx ,
rcache - > private , dirty ) ;
2018-09-26 16:32:40 +02:00
return 0 ;
2019-02-20 08:21:26 +01:00
}
2015-08-10 10:39:35 -07:00
2019-02-20 08:21:26 +01:00
ret = vmw_validation_add_resource ( sw_context - > ctx , res , 0 , dirty ,
& ptr , NULL ) ;
2018-09-26 16:32:40 +02:00
if ( ret )
return ret ;
vmw_execbuf_rcache_update ( rcache , res , ptr ) ;
return 0 ;
2015-08-10 10:39:35 -07:00
}
/**
2019-02-13 13:20:42 -08:00
* vmw_view_res_val_add - Add a view and the surface it ' s pointing to to the
* validation list
2015-08-10 10:39:35 -07:00
*
* @ sw_context : The software context holding the validation list .
* @ view : Pointer to the view resource .
*
* Returns 0 if success , negative error code otherwise .
*/
static int vmw_view_res_val_add ( struct vmw_sw_context * sw_context ,
struct vmw_resource * view )
{
int ret ;
/*
2019-02-13 13:20:42 -08:00
* First add the resource the view is pointing to , otherwise it may be
* swapped out when the view is validated .
2015-08-10 10:39:35 -07:00
*/
2019-02-20 08:21:26 +01:00
ret = vmw_execbuf_res_noctx_val_add ( sw_context , vmw_view_srf ( view ) ,
vmw_view_dirtying ( view ) ) ;
2015-08-10 10:39:35 -07:00
if ( ret )
return ret ;
2019-02-20 08:21:26 +01:00
return vmw_execbuf_res_noctx_val_add ( sw_context , view ,
VMW_RES_DIRTY_NONE ) ;
2015-08-10 10:39:35 -07:00
}
/**
2019-02-13 13:20:42 -08:00
* vmw_view_id_val_add - Look up a view and add it and the surface it ' s pointing
* to to the validation list .
2015-08-10 10:39:35 -07:00
*
* @ sw_context : The software context holding the validation list .
* @ view_type : The view type to look up .
* @ id : view id of the view .
*
2019-02-13 13:20:42 -08:00
* The view is represented by a view id and the DX context it ' s created on , or
* scheduled for creation on . If there is no DX context set , the function will
* return an - EINVAL error pointer .
2018-09-26 16:28:45 +02:00
*
* Returns : Unreferenced pointer to the resource on success , negative error
* pointer on failure .
2015-08-10 10:39:35 -07:00
*/
2018-09-26 16:28:45 +02:00
static struct vmw_resource *
vmw_view_id_val_add ( struct vmw_sw_context * sw_context ,
enum vmw_view_type view_type , u32 id )
2015-08-10 10:39:35 -07:00
{
2018-09-26 15:28:55 +02:00
struct vmw_ctx_validation_info * ctx_node = sw_context - > dx_ctx_node ;
2015-08-10 10:39:35 -07:00
struct vmw_resource * view ;
int ret ;
2019-02-11 14:59:57 -08:00
if ( ! ctx_node )
2018-09-26 16:28:45 +02:00
return ERR_PTR ( - EINVAL ) ;
2015-08-10 10:39:35 -07:00
view = vmw_view_lookup ( sw_context - > man , view_type , id ) ;
if ( IS_ERR ( view ) )
2018-09-26 16:28:45 +02:00
return view ;
2015-08-10 10:39:35 -07:00
ret = vmw_view_res_val_add ( sw_context , view ) ;
2018-09-26 16:28:45 +02:00
if ( ret )
return ERR_PTR ( ret ) ;
2015-08-10 10:39:35 -07:00
2018-09-26 16:28:45 +02:00
return view ;
2012-11-20 12:19:35 +00:00
}
2014-02-05 08:13:56 +01:00
/**
* vmw_resource_context_res_add - Put resources previously bound to a context on
* the validation list
*
* @ dev_priv : Pointer to a device private structure
* @ sw_context : Pointer to a software context used for this command submission
* @ ctx : Pointer to the context resource
*
2019-02-13 13:20:42 -08:00
* This function puts all resources that were previously bound to @ ctx on the
* resource validation list . This is part of the context state reemission
2014-02-05 08:13:56 +01:00
*/
static int vmw_resource_context_res_add ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
struct vmw_resource * ctx )
{
struct list_head * binding_list ;
2015-08-10 10:39:35 -07:00
struct vmw_ctx_bindinfo * entry ;
2014-02-05 08:13:56 +01:00
int ret = 0 ;
struct vmw_resource * res ;
2015-08-10 10:39:35 -07:00
u32 i ;
2018-12-13 13:51:08 -08:00
u32 cotable_max = has_sm5_context ( ctx - > dev_priv ) ?
SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX ;
2015-08-10 10:39:35 -07:00
/* Add all cotables to the validation list. */
2018-12-13 11:44:42 -08:00
if ( has_sm4_context ( dev_priv ) & &
vmw_res_type ( ctx ) = = vmw_res_dx_context ) {
2018-12-13 13:51:08 -08:00
for ( i = 0 ; i < cotable_max ; + + i ) {
2015-08-10 10:39:35 -07:00
res = vmw_context_cotable ( ctx , i ) ;
if ( IS_ERR ( res ) )
continue ;
2019-02-20 08:21:26 +01:00
ret = vmw_execbuf_res_noctx_val_add ( sw_context , res ,
VMW_RES_DIRTY_SET ) ;
2015-08-10 10:39:35 -07:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
}
}
/* Add all resources bound to the context to the validation list */
2014-02-05 08:13:56 +01:00
mutex_lock ( & dev_priv - > binding_mutex ) ;
binding_list = vmw_context_binding_list ( ctx ) ;
list_for_each_entry ( entry , binding_list , ctx_list ) {
2015-08-10 10:39:35 -07:00
if ( vmw_res_type ( entry - > res ) = = vmw_res_view )
ret = vmw_view_res_val_add ( sw_context , entry - > res ) ;
else
2019-02-20 08:21:26 +01:00
ret = vmw_execbuf_res_noctx_val_add
( sw_context , entry - > res ,
vmw_binding_dirtying ( entry - > bt ) ) ;
2014-02-05 08:13:56 +01:00
if ( unlikely ( ret ! = 0 ) )
break ;
}
2018-12-13 11:44:42 -08:00
if ( has_sm4_context ( dev_priv ) & &
vmw_res_type ( ctx ) = = vmw_res_dx_context ) {
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * dx_query_mob ;
2015-08-10 10:56:15 -07:00
dx_query_mob = vmw_context_get_dx_query_mob ( ctx ) ;
if ( dx_query_mob )
2018-09-26 15:28:55 +02:00
ret = vmw_validation_add_bo ( sw_context - > ctx ,
dx_query_mob , true , false ) ;
2015-08-10 10:56:15 -07:00
}
2014-02-05 08:13:56 +01:00
mutex_unlock ( & dev_priv - > binding_mutex ) ;
return ret ;
}
2012-11-20 12:19:35 +00:00
/**
* vmw_resource_relocation_add - Add a relocation to the relocation list
*
2021-01-15 18:12:36 +00:00
* @ sw_context : Pointer to the software context .
2012-11-20 12:19:35 +00:00
* @ res : The resource .
2019-02-13 13:20:42 -08:00
* @ offset : Offset into the command buffer currently being parsed where the id
* that needs fixup is located . Granularity is one byte .
2016-10-10 11:06:45 -07:00
* @ rel_type : Relocation type .
2012-11-20 12:19:35 +00:00
*/
2018-09-26 15:36:52 +02:00
static int vmw_resource_relocation_add ( struct vmw_sw_context * sw_context ,
2012-11-20 12:19:35 +00:00
const struct vmw_resource * res ,
2016-10-10 11:06:45 -07:00
unsigned long offset ,
enum vmw_resource_relocation_type
rel_type )
2012-11-20 12:19:35 +00:00
{
struct vmw_resource_relocation * rel ;
2018-09-26 15:36:52 +02:00
rel = vmw_validation_mem_alloc ( sw_context - > ctx , sizeof ( * rel ) ) ;
2016-11-08 17:30:31 +05:30
if ( unlikely ( ! rel ) ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Failed to allocate a resource relocation. \n " ) ;
2012-11-20 12:19:35 +00:00
return - ENOMEM ;
}
rel - > res = res ;
rel - > offset = offset ;
2016-10-10 11:06:45 -07:00
rel - > rel_type = rel_type ;
2018-09-26 15:36:52 +02:00
list_add_tail ( & rel - > head , & sw_context - > res_relocations ) ;
2012-11-20 12:19:35 +00:00
return 0 ;
}
/**
* vmw_resource_relocations_free - Free all relocations on a list
*
2018-09-26 15:36:52 +02:00
* @ list : Pointer to the head of the relocation list
2012-11-20 12:19:35 +00:00
*/
static void vmw_resource_relocations_free ( struct list_head * list )
{
2018-09-26 15:36:52 +02:00
/* Memory is validation context memory, so no need to free it */
INIT_LIST_HEAD ( list ) ;
2012-11-20 12:19:35 +00:00
}
/**
* vmw_resource_relocations_apply - Apply all relocations on a list
*
2019-02-13 13:20:42 -08:00
* @ cb : Pointer to the start of the command buffer bein patch . This need not be
* the same buffer as the one being parsed when the relocation list was built ,
* but the contents must be the same modulo the resource ids .
2012-11-20 12:19:35 +00:00
* @ list : Pointer to the head of the relocation list .
*/
static void vmw_resource_relocations_apply ( uint32_t * cb ,
struct list_head * list )
{
struct vmw_resource_relocation * rel ;
2016-10-10 11:06:45 -07:00
/* Validate the struct vmw_resource_relocation member size */
BUILD_BUG_ON ( SVGA_CB_MAX_SIZE > = ( 1 < < 29 ) ) ;
BUILD_BUG_ON ( vmw_res_rel_max > = ( 1 < < 3 ) ) ;
2014-01-31 10:12:10 +01:00
list_for_each_entry ( rel , list , head ) {
2016-10-10 10:44:00 -07:00
u32 * addr = ( u32 * ) ( ( unsigned long ) cb + rel - > offset ) ;
2016-10-10 11:06:45 -07:00
switch ( rel - > rel_type ) {
case vmw_res_rel_normal :
2016-10-10 10:44:00 -07:00
* addr = rel - > res - > id ;
2016-10-10 11:06:45 -07:00
break ;
case vmw_res_rel_nop :
2016-10-10 10:44:00 -07:00
* addr = SVGA_3D_CMD_NOP ;
2016-10-10 11:06:45 -07:00
break ;
default :
if ( rel - > res - > id = = - 1 )
* addr = SVGA_3D_CMD_NOP ;
break ;
}
2014-01-31 10:12:10 +01:00
}
2012-11-20 12:19:35 +00:00
}
2009-12-10 00:19:58 +00:00
static int vmw_cmd_invalid ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2017-07-17 23:28:36 -07:00
return - EINVAL ;
2009-12-10 00:19:58 +00:00
}
static int vmw_cmd_ok ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
return 0 ;
}
2012-11-20 12:19:35 +00:00
/**
2019-02-13 13:20:42 -08:00
* vmw_resources_reserve - Reserve all resources on the sw_context ' s resource
* list .
2012-11-20 12:19:35 +00:00
*
* @ sw_context : Pointer to the software context .
*
2019-02-13 13:20:42 -08:00
* Note that since vmware ' s command submission currently is protected by the
* cmdbuf mutex , no fancy deadlock avoidance is required for resources , since
* only a single thread at once will attempt this .
2012-11-20 12:19:35 +00:00
*/
static int vmw_resources_reserve ( struct vmw_sw_context * sw_context )
2009-12-10 00:19:58 +00:00
{
2018-09-26 15:28:55 +02:00
int ret ;
2012-11-20 12:19:35 +00:00
2018-09-26 15:28:55 +02:00
ret = vmw_validation_res_reserve ( sw_context - > ctx , true ) ;
if ( ret )
return ret ;
2015-08-10 10:45:11 -07:00
2015-08-10 10:56:15 -07:00
if ( sw_context - > dx_query_mob ) {
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * expected_dx_query_mob ;
2015-08-10 10:56:15 -07:00
expected_dx_query_mob =
vmw_context_get_dx_query_mob ( sw_context - > dx_query_ctx ) ;
if ( expected_dx_query_mob & &
expected_dx_query_mob ! = sw_context - > dx_query_mob ) {
ret = - EINVAL ;
}
}
return ret ;
2012-11-20 12:19:35 +00:00
}
2009-12-10 00:19:58 +00:00
2012-11-20 12:19:35 +00:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_res_check - Check that a resource is present and if so , put it on the
* resource validate list unless it ' s already there .
2012-11-20 12:19:35 +00:00
*
* @ dev_priv : Pointer to a device private structure .
* @ sw_context : Pointer to the software context .
* @ res_type : Resource type .
2019-02-20 08:21:26 +01:00
* @ dirty : Whether to change dirty status .
2012-11-20 12:19:35 +00:00
* @ converter : User - space visisble type specific information .
2019-02-13 13:20:42 -08:00
* @ id_loc : Pointer to the location in the command buffer currently being parsed
* from where the user - space resource id handle is located .
2021-01-15 18:12:36 +00:00
* @ p_res : Pointer to pointer to resource validalidation node . Populated on
2019-02-13 13:20:42 -08:00
* exit .
2012-11-20 12:19:35 +00:00
*/
2014-01-31 10:12:10 +01:00
static int
2014-06-09 12:39:22 +02:00
vmw_cmd_res_check ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
enum vmw_res_type res_type ,
2019-02-20 08:21:26 +01:00
u32 dirty ,
2014-06-09 12:39:22 +02:00
const struct vmw_user_resource_conv * converter ,
uint32_t * id_loc ,
2018-09-26 15:28:55 +02:00
struct vmw_resource * * p_res )
2009-12-10 00:19:58 +00:00
{
2018-09-26 16:32:40 +02:00
struct vmw_res_cache_entry * rcache = & sw_context - > res_cache [ res_type ] ;
2011-08-31 07:42:54 +00:00
struct vmw_resource * res ;
2012-11-20 12:19:35 +00:00
int ret ;
2011-08-31 07:42:54 +00:00
2018-09-26 15:28:55 +02:00
if ( p_res )
* p_res = NULL ;
2014-06-09 12:39:22 +02:00
if ( * id_loc = = SVGA3D_INVALID_ID ) {
2013-10-08 02:27:17 -07:00
if ( res_type = = vmw_res_context ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Illegal context invalid id. \n " ) ;
2013-10-08 02:27:17 -07:00
return - EINVAL ;
}
2009-12-22 16:53:41 +01:00
return 0 ;
2013-10-08 02:27:17 -07:00
}
2009-12-22 16:53:41 +01:00
2018-09-26 15:28:55 +02:00
if ( likely ( rcache - > valid_handle & & * id_loc = = rcache - > handle ) ) {
2018-09-26 16:32:40 +02:00
res = rcache - > res ;
2019-02-20 08:21:26 +01:00
if ( dirty )
vmw_validation_res_set_dirty ( sw_context - > ctx ,
rcache - > private , dirty ) ;
2018-09-26 16:32:40 +02:00
} else {
unsigned int size = vmw_execbuf_res_size ( dev_priv , res_type ) ;
2012-11-20 12:19:35 +00:00
2018-09-26 16:32:40 +02:00
ret = vmw_validation_preload_res ( sw_context - > ctx , size ) ;
if ( ret )
return ret ;
2012-11-20 12:19:35 +00:00
2018-09-26 16:32:40 +02:00
res = vmw_user_resource_noref_lookup_handle
( dev_priv , sw_context - > fp - > tfile , * id_loc , converter ) ;
2019-03-01 10:14:06 -08:00
if ( IS_ERR ( res ) ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Could not find/use resource 0x%08x. \n " ,
( unsigned int ) * id_loc ) ;
2018-09-26 16:32:40 +02:00
return PTR_ERR ( res ) ;
}
2011-08-31 07:42:54 +00:00
2019-02-20 08:21:26 +01:00
ret = vmw_execbuf_res_noref_val_add ( sw_context , res , dirty ) ;
2018-09-26 16:32:40 +02:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2011-10-04 20:13:33 +02:00
2018-09-26 16:32:40 +02:00
if ( rcache - > valid & & rcache - > res = = res ) {
rcache - > valid_handle = true ;
rcache - > handle = * id_loc ;
}
}
2011-10-04 20:13:31 +02:00
2018-09-26 16:32:40 +02:00
ret = vmw_resource_relocation_add ( sw_context , res ,
vmw_ptr_diff ( sw_context - > buf_start ,
id_loc ) ,
vmw_res_rel_normal ) ;
2018-09-26 15:28:55 +02:00
if ( p_res )
* p_res = res ;
2011-10-04 20:13:31 +02:00
return 0 ;
2009-12-10 00:19:58 +00:00
}
2015-08-10 10:56:15 -07:00
/**
2021-05-05 15:10:07 -04:00
* vmw_rebind_all_dx_query - Rebind DX query associated with the context
2015-08-10 10:56:15 -07:00
*
* @ ctx_res : context the query belongs to
*
* This function assumes binding_mutex is held .
*/
static int vmw_rebind_all_dx_query ( struct vmw_resource * ctx_res )
{
struct vmw_private * dev_priv = ctx_res - > dev_priv ;
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * dx_query_mob ;
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDXBindAllQuery ) ;
2015-08-10 10:56:15 -07:00
dx_query_mob = vmw_context_get_dx_query_mob ( ctx_res ) ;
if ( ! dx_query_mob | | dx_query_mob - > dx_query_ctx )
return 0 ;
2020-11-18 12:54:19 -05:00
cmd = VMW_CMD_CTX_RESERVE ( dev_priv , sizeof ( * cmd ) , ctx_res - > id ) ;
2019-02-11 14:59:57 -08:00
if ( cmd = = NULL )
2015-08-10 10:56:15 -07:00
return - ENOMEM ;
cmd - > header . id = SVGA_3D_CMD_DX_BIND_ALL_QUERY ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . cid = ctx_res - > id ;
2021-04-12 15:11:47 +02:00
cmd - > body . mobid = dx_query_mob - > base . resource - > start ;
2020-11-18 12:54:19 -05:00
vmw_cmd_commit ( dev_priv , sizeof ( * cmd ) ) ;
2015-08-10 10:56:15 -07:00
vmw_context_bind_dx_query ( ctx_res , dx_query_mob ) ;
return 0 ;
}
2014-02-05 08:13:56 +01:00
/**
2019-02-13 13:20:42 -08:00
* vmw_rebind_contexts - Rebind all resources previously bound to referenced
* contexts .
2014-02-05 08:13:56 +01:00
*
* @ sw_context : Pointer to the software context .
*
* Rebind context binding points that have been scrubbed because of eviction .
*/
static int vmw_rebind_contexts ( struct vmw_sw_context * sw_context )
{
2018-09-26 15:28:55 +02:00
struct vmw_ctx_validation_info * val ;
2014-02-05 08:13:56 +01:00
int ret ;
2018-09-26 15:28:55 +02:00
list_for_each_entry ( val , & sw_context - > ctx_list , head ) {
ret = vmw_binding_rebind_all ( val - > cur ) ;
2014-02-05 08:13:56 +01:00
if ( unlikely ( ret ! = 0 ) ) {
if ( ret ! = - ERESTARTSYS )
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Failed to rebind context. \n " ) ;
2014-02-05 08:13:56 +01:00
return ret ;
}
2015-08-10 10:56:15 -07:00
2018-09-26 15:28:55 +02:00
ret = vmw_rebind_all_dx_query ( val - > ctx ) ;
2019-02-11 14:59:57 -08:00
if ( ret ! = 0 ) {
VMW_DEBUG_USER ( " Failed to rebind queries. \n " ) ;
2015-08-10 10:56:15 -07:00
return ret ;
2019-02-11 14:59:57 -08:00
}
2014-02-05 08:13:56 +01:00
}
return 0 ;
}
2015-08-10 10:39:35 -07:00
/**
2019-02-13 13:20:42 -08:00
* vmw_view_bindings_add - Add an array of view bindings to a context binding
* state tracker .
2015-08-10 10:39:35 -07:00
*
* @ sw_context : The execbuf state used for this command .
* @ view_type : View type for the bindings .
* @ binding_type : Binding type for the bindings .
* @ shader_slot : The shader slot to user for the bindings .
* @ view_ids : Array of view ids to be bound .
* @ num_views : Number of view ids in @ view_ids .
* @ first_slot : The binding slot to be used for the first view id in @ view_ids .
*/
static int vmw_view_bindings_add ( struct vmw_sw_context * sw_context ,
enum vmw_view_type view_type ,
enum vmw_ctx_binding_type binding_type ,
uint32 shader_slot ,
uint32 view_ids [ ] , u32 num_views ,
u32 first_slot )
{
2019-02-08 12:53:57 -08:00
struct vmw_ctx_validation_info * ctx_node = VMW_GET_CTX_NODE ( sw_context ) ;
2015-08-10 10:39:35 -07:00
u32 i ;
2019-02-08 12:53:57 -08:00
if ( ! ctx_node )
2015-08-10 10:39:35 -07:00
return - EINVAL ;
for ( i = 0 ; i < num_views ; + + i ) {
struct vmw_ctx_bindinfo_view binding ;
struct vmw_resource * view = NULL ;
if ( view_ids [ i ] ! = SVGA3D_INVALID_ID ) {
2018-09-26 16:28:45 +02:00
view = vmw_view_id_val_add ( sw_context , view_type ,
view_ids [ i ] ) ;
2015-08-10 10:39:35 -07:00
if ( IS_ERR ( view ) ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " View not found. \n " ) ;
2015-08-10 10:39:35 -07:00
return PTR_ERR ( view ) ;
}
}
2018-09-26 15:28:55 +02:00
binding . bi . ctx = ctx_node - > ctx ;
2015-08-10 10:39:35 -07:00
binding . bi . res = view ;
binding . bi . bt = binding_type ;
binding . shader_slot = shader_slot ;
binding . slot = first_slot + i ;
2018-09-26 15:28:55 +02:00
vmw_binding_add ( ctx_node - > staged , & binding . bi ,
2015-08-10 10:39:35 -07:00
shader_slot , binding . slot ) ;
}
return 0 ;
}
2012-11-20 12:19:35 +00:00
/**
* vmw_cmd_cid_check - Check a command header for valid context information .
*
* @ dev_priv : Pointer to a device private structure .
* @ sw_context : Pointer to the software context .
* @ header : A command header with an embedded user - space context handle .
*
* Convenience function : Call vmw_cmd_res_check with the user - space context
* handle embedded in @ header .
*/
static int vmw_cmd_cid_check ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , uint32_t ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2012-11-20 12:19:35 +00:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_context ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET , user_context_converter ,
2019-02-08 15:50:40 -08:00
& cmd - > body , NULL ) ;
2012-11-20 12:19:35 +00:00
}
2009-12-10 00:19:58 +00:00
2018-09-26 15:28:55 +02:00
/**
* vmw_execbuf_info_from_res - Get the private validation metadata for a
* recently validated resource
2019-02-13 13:20:42 -08:00
*
2018-09-26 15:28:55 +02:00
* @ sw_context : Pointer to the command submission context
* @ res : The resource
*
* The resource pointed to by @ res needs to be present in the command submission
* context ' s resource cache and hence the last resource of that type to be
* processed by the validation code .
*
2019-02-13 13:20:42 -08:00
* Return : a pointer to the private metadata of the resource , or NULL if it
* wasn ' t found
2018-09-26 15:28:55 +02:00
*/
static struct vmw_ctx_validation_info *
vmw_execbuf_info_from_res ( struct vmw_sw_context * sw_context ,
struct vmw_resource * res )
{
struct vmw_res_cache_entry * rcache =
& sw_context - > res_cache [ vmw_res_type ( res ) ] ;
if ( rcache - > valid & & rcache - > res = = res )
return rcache - > private ;
WARN_ON_ONCE ( true ) ;
return NULL ;
}
2009-12-10 00:19:58 +00:00
static int vmw_cmd_set_render_target_check ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdSetRenderTarget ) ;
2018-09-26 15:28:55 +02:00
struct vmw_resource * ctx ;
struct vmw_resource * res ;
2009-12-10 00:19:58 +00:00
int ret ;
2019-02-08 15:50:40 -08:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2013-10-08 02:27:17 -07:00
2015-08-10 10:39:35 -07:00
if ( cmd - > body . type > = SVGA3D_RT_MAX ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Illegal render target type %u. \n " ,
( unsigned int ) cmd - > body . type ) ;
2015-08-10 10:39:35 -07:00
return - EINVAL ;
}
2013-10-08 02:27:17 -07:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_context ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET , user_context_converter ,
& cmd - > body . cid , & ctx ) ;
2009-12-10 00:19:58 +00:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2012-11-20 12:19:35 +00:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET , user_surface_converter ,
& cmd - > body . target . sid , & res ) ;
2018-09-26 16:32:40 +02:00
if ( unlikely ( ret ) )
2013-10-08 02:27:17 -07:00
return ret ;
if ( dev_priv - > has_mob ) {
2015-08-10 10:39:35 -07:00
struct vmw_ctx_bindinfo_view binding ;
2018-09-26 15:28:55 +02:00
struct vmw_ctx_validation_info * node ;
2013-10-08 02:27:17 -07:00
2018-09-26 15:28:55 +02:00
node = vmw_execbuf_info_from_res ( sw_context , ctx ) ;
if ( ! node )
return - EINVAL ;
binding . bi . ctx = ctx ;
binding . bi . res = res ;
2015-08-10 10:39:35 -07:00
binding . bi . bt = vmw_ctx_binding_rt ;
binding . slot = cmd - > body . type ;
2018-09-26 15:28:55 +02:00
vmw_binding_add ( node - > staged , & binding . bi , 0 , binding . slot ) ;
2013-10-08 02:27:17 -07:00
}
return 0 ;
2009-12-10 00:19:58 +00:00
}
static int vmw_cmd_surface_copy_check ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdSurfaceCopy ) ;
2009-12-10 00:19:58 +00:00
int ret ;
2019-02-08 15:50:40 -08:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2015-03-02 23:45:04 -08:00
2015-06-26 02:22:40 -07:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE , user_surface_converter ,
& cmd - > body . src . sid , NULL ) ;
2015-06-26 02:22:40 -07:00
if ( ret )
return ret ;
2015-03-02 23:45:04 -08:00
2012-11-20 12:19:35 +00:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET , user_surface_converter ,
2012-11-20 12:19:35 +00:00
& cmd - > body . dest . sid , NULL ) ;
2009-12-10 00:19:58 +00:00
}
2015-08-10 10:51:07 -07:00
static int vmw_cmd_buffer_copy_check ( struct vmw_private * dev_priv ,
2019-02-13 13:20:42 -08:00
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
2015-08-10 10:51:07 -07:00
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDXBufferCopy ) ;
2015-08-10 10:51:07 -07:00
int ret ;
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE , user_surface_converter ,
2015-08-10 10:51:07 -07:00
& cmd - > body . src , NULL ) ;
if ( ret ! = 0 )
return ret ;
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET , user_surface_converter ,
2015-08-10 10:51:07 -07:00
& cmd - > body . dest , NULL ) ;
}
static int vmw_cmd_pred_copy_check ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDXPredCopyRegion ) ;
2015-08-10 10:51:07 -07:00
int ret ;
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE , user_surface_converter ,
2015-08-10 10:51:07 -07:00
& cmd - > body . srcSid , NULL ) ;
if ( ret ! = 0 )
return ret ;
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET , user_surface_converter ,
2015-08-10 10:51:07 -07:00
& cmd - > body . dstSid , NULL ) ;
}
2009-12-10 00:19:58 +00:00
static int vmw_cmd_stretch_blt_check ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdSurfaceStretchBlt ) ;
2009-12-10 00:19:58 +00:00
int ret ;
2019-02-08 15:50:40 -08:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2012-11-20 12:19:35 +00:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE , user_surface_converter ,
2012-11-20 12:19:35 +00:00
& cmd - > body . src . sid , NULL ) ;
2009-12-10 00:19:58 +00:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2019-02-13 13:20:42 -08:00
2012-11-20 12:19:35 +00:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET , user_surface_converter ,
2012-11-20 12:19:35 +00:00
& cmd - > body . dest . sid , NULL ) ;
2009-12-10 00:19:58 +00:00
}
static int vmw_cmd_blt_surf_screen_check ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdBlitSurfaceToScreen ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2011-10-04 20:13:27 +02:00
2012-11-20 12:19:35 +00:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE , user_surface_converter ,
2012-11-20 12:19:35 +00:00
& cmd - > body . srcImage . sid , NULL ) ;
2009-12-10 00:19:58 +00:00
}
static int vmw_cmd_present_check ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdPresent ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2011-10-04 20:13:27 +02:00
2012-11-20 12:19:35 +00:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE , user_surface_converter ,
& cmd - > body . sid , NULL ) ;
2009-12-10 00:19:58 +00:00
}
2011-10-04 20:13:30 +02:00
/**
* vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries .
*
* @ dev_priv : The device private structure .
* @ new_query_bo : The new buffer holding query results .
* @ sw_context : The software context used for this command submission .
*
2019-02-13 13:20:42 -08:00
* This function checks whether @ new_query_bo is suitable for holding query
* results , and if another buffer currently is pinned for query results . If so ,
* the function prepares the state of @ sw_context for switching pinned buffers
* after successful submission of the current command batch .
2011-10-04 20:13:30 +02:00
*/
static int vmw_query_bo_switch_prepare ( struct vmw_private * dev_priv ,
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * new_query_bo ,
2011-10-04 20:13:30 +02:00
struct vmw_sw_context * sw_context )
{
2012-11-20 12:19:35 +00:00
struct vmw_res_cache_entry * ctx_entry =
& sw_context - > res_cache [ vmw_res_context ] ;
2011-10-04 20:13:30 +02:00
int ret ;
2012-11-20 12:19:35 +00:00
BUG_ON ( ! ctx_entry - > valid ) ;
sw_context - > last_query_ctx = ctx_entry - > res ;
2011-10-04 20:13:30 +02:00
if ( unlikely ( new_query_bo ! = sw_context - > cur_query_bo ) ) {
2021-04-12 15:11:47 +02:00
if ( unlikely ( new_query_bo - > base . resource - > num_pages > 4 ) ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Query buffer too large. \n " ) ;
2011-10-04 20:13:30 +02:00
return - EINVAL ;
}
if ( unlikely ( sw_context - > cur_query_bo ! = NULL ) ) {
2012-11-20 12:19:35 +00:00
sw_context - > needs_post_query_barrier = true ;
2018-09-26 15:28:55 +02:00
ret = vmw_validation_add_bo ( sw_context - > ctx ,
sw_context - > cur_query_bo ,
dev_priv - > has_mob , false ) ;
2011-10-04 20:13:30 +02:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
}
sw_context - > cur_query_bo = new_query_bo ;
2018-09-26 15:28:55 +02:00
ret = vmw_validation_add_bo ( sw_context - > ctx ,
dev_priv - > dummy_query_bo ,
dev_priv - > has_mob , false ) ;
2011-10-04 20:13:30 +02:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
}
return 0 ;
}
/**
* vmw_query_bo_switch_commit - Finalize switching pinned query buffer
*
* @ dev_priv : The device private structure .
* @ sw_context : The software context used for this command submission batch .
*
* This function will check if we ' re switching query buffers , and will then ,
* issue a dummy occlusion query wait used as a query barrier . When the fence
2019-02-13 13:20:42 -08:00
* object following that query wait has signaled , we are sure that all preceding
* queries have finished , and the old query buffer can be unpinned . However ,
* since both the new query buffer and the old one are fenced with that fence ,
* we can do an asynchronus unpin now , and be sure that the old query buffer
* won ' t be moved until the fence has signaled .
2011-10-04 20:13:30 +02:00
*
* As mentioned above , both the new - and old query buffers need to be fenced
* using a sequence emitted * after * calling this function .
*/
static void vmw_query_bo_switch_commit ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context )
{
/*
* The validate list should still hold references to all
* contexts here .
*/
2012-11-20 12:19:35 +00:00
if ( sw_context - > needs_post_query_barrier ) {
struct vmw_res_cache_entry * ctx_entry =
& sw_context - > res_cache [ vmw_res_context ] ;
struct vmw_resource * ctx ;
int ret ;
2011-10-04 20:13:30 +02:00
2012-11-20 12:19:35 +00:00
BUG_ON ( ! ctx_entry - > valid ) ;
ctx = ctx_entry - > res ;
2011-10-04 20:13:30 +02:00
2020-11-18 12:54:19 -05:00
ret = vmw_cmd_emit_dummy_query ( dev_priv , ctx - > id ) ;
2011-10-04 20:13:30 +02:00
if ( unlikely ( ret ! = 0 ) )
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Out of fifo space for dummy query. \n " ) ;
2011-10-04 20:13:30 +02:00
}
if ( dev_priv - > pinned_bo ! = sw_context - > cur_query_bo ) {
if ( dev_priv - > pinned_bo ) {
2015-06-26 00:25:37 -07:00
vmw_bo_pin_reserved ( dev_priv - > pinned_bo , false ) ;
2018-06-19 15:02:16 +02:00
vmw_bo_unreference ( & dev_priv - > pinned_bo ) ;
2011-10-04 20:13:30 +02:00
}
2012-11-20 12:19:35 +00:00
if ( ! sw_context - > needs_post_query_barrier ) {
2015-06-26 00:25:37 -07:00
vmw_bo_pin_reserved ( sw_context - > cur_query_bo , true ) ;
2011-10-04 20:13:30 +02:00
2012-11-20 12:19:35 +00:00
/*
* We pin also the dummy_query_bo buffer so that we
2019-02-13 13:20:42 -08:00
* don ' t need to validate it when emitting dummy queries
* in context destroy paths .
2012-11-20 12:19:35 +00:00
*/
2015-06-26 00:25:37 -07:00
if ( ! dev_priv - > dummy_query_bo_pinned ) {
vmw_bo_pin_reserved ( dev_priv - > dummy_query_bo ,
true ) ;
dev_priv - > dummy_query_bo_pinned = true ;
}
2011-10-04 20:13:30 +02:00
2012-11-20 12:19:35 +00:00
BUG_ON ( sw_context - > last_query_ctx = = NULL ) ;
dev_priv - > query_cid = sw_context - > last_query_ctx - > id ;
dev_priv - > query_cid_valid = true ;
dev_priv - > pinned_bo =
2018-06-19 15:02:16 +02:00
vmw_bo_reference ( sw_context - > cur_query_bo ) ;
2012-11-20 12:19:35 +00:00
}
2011-10-04 20:13:30 +02:00
}
}
2012-11-21 11:26:55 +01:00
/**
2021-05-05 15:10:07 -04:00
* vmw_translate_mob_ptr - Prepare to translate a user - space buffer handle
2019-02-13 13:20:42 -08:00
* to a MOB id .
2012-11-21 11:26:55 +01:00
*
* @ dev_priv : Pointer to a device private structure .
* @ sw_context : The software context used for this command batch validation .
* @ id : Pointer to the user - space handle to be translated .
2019-02-13 13:20:42 -08:00
* @ vmw_bo_p : Points to a location that , on successful return will carry a
* non - reference - counted pointer to the buffer object identified by the
2012-11-21 11:26:55 +01:00
* user - space handle in @ id .
*
* This function saves information needed to translate a user - space buffer
* handle to a MOB id . The translation does not take place immediately , but
2019-02-13 13:20:42 -08:00
* during a call to vmw_apply_relocations ( ) .
*
* This function builds a relocation list and a list of buffers to validate . The
* former needs to be freed using either vmw_apply_relocations ( ) or
* vmw_free_relocations ( ) . The latter needs to be freed using
* vmw_clear_validations .
2012-11-21 11:26:55 +01:00
*/
static int vmw_translate_mob_ptr ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGAMobId * id ,
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * * vmw_bo_p )
2012-11-21 11:26:55 +01:00
{
2018-09-26 16:27:54 +02:00
struct vmw_buffer_object * vmw_bo ;
2012-11-21 11:26:55 +01:00
uint32_t handle = * id ;
struct vmw_relocation * reloc ;
int ret ;
2018-09-26 16:27:54 +02:00
vmw_validation_preload_bo ( sw_context - > ctx ) ;
vmw_bo = vmw_user_bo_noref_lookup ( sw_context - > fp - > tfile , handle ) ;
if ( IS_ERR ( vmw_bo ) ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Could not find or use MOB buffer. \n " ) ;
2018-09-26 16:27:54 +02:00
return PTR_ERR ( vmw_bo ) ;
2012-11-21 11:26:55 +01:00
}
2018-09-26 16:27:54 +02:00
ret = vmw_validation_add_bo ( sw_context - > ctx , vmw_bo , true , false ) ;
vmw_user_bo_noref_release ( ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
2018-09-26 15:36:52 +02:00
reloc = vmw_validation_mem_alloc ( sw_context - > ctx , sizeof ( * reloc ) ) ;
if ( ! reloc )
2018-09-26 16:27:54 +02:00
return - ENOMEM ;
2012-11-21 11:26:55 +01:00
reloc - > mob_loc = id ;
2018-09-26 15:28:55 +02:00
reloc - > vbo = vmw_bo ;
2012-11-21 11:26:55 +01:00
* vmw_bo_p = vmw_bo ;
2018-09-26 15:36:52 +02:00
list_add_tail ( & reloc - > head , & sw_context - > bo_relocations ) ;
2012-11-21 11:26:55 +01:00
return 0 ;
}
2011-10-04 20:13:30 +02:00
/**
2021-05-05 15:10:07 -04:00
* vmw_translate_guest_ptr - Prepare to translate a user - space buffer handle
2019-02-13 13:20:42 -08:00
* to a valid SVGAGuestPtr
2011-10-04 20:13:30 +02:00
*
2012-11-20 12:19:35 +00:00
* @ dev_priv : Pointer to a device private structure .
* @ sw_context : The software context used for this command batch validation .
* @ ptr : Pointer to the user - space handle to be translated .
2019-02-13 13:20:42 -08:00
* @ vmw_bo_p : Points to a location that , on successful return will carry a
* non - reference - counted pointer to the DMA buffer identified by the user - space
* handle in @ id .
2011-10-04 20:13:30 +02:00
*
2012-11-20 12:19:35 +00:00
* This function saves information needed to translate a user - space buffer
* handle to a valid SVGAGuestPtr . The translation does not take place
* immediately , but during a call to vmw_apply_relocations ( ) .
2019-02-13 13:20:42 -08:00
*
2012-11-20 12:19:35 +00:00
* This function builds a relocation list and a list of buffers to validate .
* The former needs to be freed using either vmw_apply_relocations ( ) or
* vmw_free_relocations ( ) . The latter needs to be freed using
* vmw_clear_validations .
2011-10-04 20:13:30 +02:00
*/
2010-02-21 14:54:55 +00:00
static int vmw_translate_guest_ptr ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGAGuestPtr * ptr ,
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * * vmw_bo_p )
2009-12-10 00:19:58 +00:00
{
2018-09-26 16:27:54 +02:00
struct vmw_buffer_object * vmw_bo ;
2010-02-21 14:54:55 +00:00
uint32_t handle = ptr - > gmrId ;
2009-12-10 00:19:58 +00:00
struct vmw_relocation * reloc ;
2010-02-21 14:54:55 +00:00
int ret ;
2009-12-10 00:19:58 +00:00
2018-09-26 16:27:54 +02:00
vmw_validation_preload_bo ( sw_context - > ctx ) ;
vmw_bo = vmw_user_bo_noref_lookup ( sw_context - > fp - > tfile , handle ) ;
if ( IS_ERR ( vmw_bo ) ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Could not find or use GMR region. \n " ) ;
2018-09-26 16:27:54 +02:00
return PTR_ERR ( vmw_bo ) ;
2009-12-10 00:19:58 +00:00
}
2018-09-26 16:27:54 +02:00
ret = vmw_validation_add_bo ( sw_context - > ctx , vmw_bo , false , false ) ;
vmw_user_bo_noref_release ( ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
2018-09-26 15:36:52 +02:00
reloc = vmw_validation_mem_alloc ( sw_context - > ctx , sizeof ( * reloc ) ) ;
if ( ! reloc )
2018-09-26 16:27:54 +02:00
return - ENOMEM ;
2009-12-10 00:19:58 +00:00
2010-02-21 14:54:55 +00:00
reloc - > location = ptr ;
2018-09-26 15:28:55 +02:00
reloc - > vbo = vmw_bo ;
2010-02-21 14:54:55 +00:00
* vmw_bo_p = vmw_bo ;
2018-09-26 15:36:52 +02:00
list_add_tail ( & reloc - > head , & sw_context - > bo_relocations ) ;
2010-02-21 14:54:55 +00:00
return 0 ;
}
2015-08-10 10:56:15 -07:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command .
2015-08-10 10:56:15 -07:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context used for this command submission .
* @ header : Pointer to the command header in the command stream .
*
* This function adds the new query into the query COTABLE
*/
static int vmw_cmd_dx_define_query ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDXDefineQuery ) ;
2019-02-08 12:53:57 -08:00
struct vmw_ctx_validation_info * ctx_node = VMW_GET_CTX_NODE ( sw_context ) ;
2015-08-10 10:56:15 -07:00
struct vmw_resource * cotable_res ;
2019-02-08 15:50:40 -08:00
int ret ;
2015-08-10 10:56:15 -07:00
2019-02-08 12:53:57 -08:00
if ( ! ctx_node )
2015-08-10 10:56:15 -07:00
return - EINVAL ;
2019-02-08 15:50:40 -08:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2015-08-10 10:56:15 -07:00
2019-02-08 15:50:40 -08:00
if ( cmd - > body . type < SVGA3D_QUERYTYPE_MIN | |
cmd - > body . type > = SVGA3D_QUERYTYPE_MAX )
2015-08-10 10:56:15 -07:00
return - EINVAL ;
2018-09-26 15:28:55 +02:00
cotable_res = vmw_context_cotable ( ctx_node - > ctx , SVGA_COTABLE_DXQUERY ) ;
2019-02-08 15:50:40 -08:00
ret = vmw_cotable_notify ( cotable_res , cmd - > body . queryId ) ;
2015-08-10 10:56:15 -07:00
return ret ;
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command .
2015-08-10 10:56:15 -07:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context used for this command submission .
* @ header : Pointer to the command header in the command stream .
*
2019-02-13 13:20:42 -08:00
* The query bind operation will eventually associate the query ID with its
* backing MOB . In this function , we take the user mode MOB ID and use
* vmw_translate_mob_ptr ( ) to translate it to its kernel mode equivalent .
2015-08-10 10:56:15 -07:00
*/
static int vmw_cmd_dx_bind_query ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDXBindQuery ) ;
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * vmw_bo ;
2019-02-08 15:50:40 -08:00
int ret ;
2015-08-10 10:56:15 -07:00
2019-02-08 15:50:40 -08:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2015-08-10 10:56:15 -07:00
/*
* Look up the buffer pointed to by q . mobid , put it on the relocation
* list so its kernel mode MOB ID can be filled in later
*/
2019-02-08 15:50:40 -08:00
ret = vmw_translate_mob_ptr ( dev_priv , sw_context , & cmd - > body . mobid ,
2015-08-10 10:56:15 -07:00
& vmw_bo ) ;
if ( ret ! = 0 )
return ret ;
sw_context - > dx_query_mob = vmw_bo ;
2018-09-26 15:28:55 +02:00
sw_context - > dx_query_ctx = sw_context - > dx_ctx_node - > ctx ;
2018-09-26 16:27:54 +02:00
return 0 ;
2015-08-10 10:56:15 -07:00
}
2012-11-21 11:26:55 +01:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command .
2012-11-21 11:26:55 +01:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context used for this command submission .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_begin_gb_query ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdBeginGBQuery ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2012-11-21 11:26:55 +01:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_context ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET , user_context_converter ,
2019-02-08 15:50:40 -08:00
& cmd - > body . cid , NULL ) ;
2012-11-21 11:26:55 +01:00
}
2012-11-20 12:19:35 +00:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command .
2012-11-20 12:19:35 +00:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context used for this command submission .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_begin_query ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdBeginQuery ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2012-11-20 12:19:35 +00:00
2012-11-21 11:26:55 +01:00
if ( unlikely ( dev_priv - > has_mob ) ) {
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( gb_cmd , SVGA3dCmdBeginGBQuery ) ;
2012-11-21 11:26:55 +01:00
BUG_ON ( sizeof ( gb_cmd ) ! = sizeof ( * cmd ) ) ;
gb_cmd . header . id = SVGA_3D_CMD_BEGIN_GB_QUERY ;
gb_cmd . header . size = cmd - > header . size ;
2019-02-08 15:50:40 -08:00
gb_cmd . body . cid = cmd - > body . cid ;
gb_cmd . body . type = cmd - > body . type ;
2012-11-21 11:26:55 +01:00
memcpy ( cmd , & gb_cmd , sizeof ( * cmd ) ) ;
return vmw_cmd_begin_gb_query ( dev_priv , sw_context , header ) ;
}
2012-11-20 12:19:35 +00:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_context ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET , user_context_converter ,
2019-02-08 15:50:40 -08:00
& cmd - > body . cid , NULL ) ;
2012-11-20 12:19:35 +00:00
}
2012-11-21 11:26:55 +01:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command .
2012-11-21 11:26:55 +01:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context used for this command submission .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_end_gb_query ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * vmw_bo ;
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdEndGBQuery ) ;
2012-11-21 11:26:55 +01:00
int ret ;
2019-02-08 15:50:40 -08:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2012-11-21 11:26:55 +01:00
ret = vmw_cmd_cid_check ( dev_priv , sw_context , header ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
2019-02-13 13:20:42 -08:00
ret = vmw_translate_mob_ptr ( dev_priv , sw_context , & cmd - > body . mobid ,
2012-11-21 11:26:55 +01:00
& vmw_bo ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
2015-06-26 00:25:37 -07:00
ret = vmw_query_bo_switch_prepare ( dev_priv , vmw_bo , sw_context ) ;
2012-11-21 11:26:55 +01:00
return ret ;
}
2012-11-20 12:19:35 +00:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command .
2012-11-20 12:19:35 +00:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context used for this command submission .
* @ header : Pointer to the command header in the command stream .
*/
2010-02-21 14:54:55 +00:00
static int vmw_cmd_end_query ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * vmw_bo ;
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdEndQuery ) ;
2010-02-21 14:54:55 +00:00
int ret ;
2019-02-08 15:50:40 -08:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2012-11-21 11:26:55 +01:00
if ( dev_priv - > has_mob ) {
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( gb_cmd , SVGA3dCmdEndGBQuery ) ;
2012-11-21 11:26:55 +01:00
BUG_ON ( sizeof ( gb_cmd ) ! = sizeof ( * cmd ) ) ;
gb_cmd . header . id = SVGA_3D_CMD_END_GB_QUERY ;
gb_cmd . header . size = cmd - > header . size ;
2019-02-08 15:50:40 -08:00
gb_cmd . body . cid = cmd - > body . cid ;
gb_cmd . body . type = cmd - > body . type ;
gb_cmd . body . mobid = cmd - > body . guestResult . gmrId ;
gb_cmd . body . offset = cmd - > body . guestResult . offset ;
2012-11-21 11:26:55 +01:00
memcpy ( cmd , & gb_cmd , sizeof ( * cmd ) ) ;
return vmw_cmd_end_gb_query ( dev_priv , sw_context , header ) ;
}
2010-02-21 14:54:55 +00:00
ret = vmw_cmd_cid_check ( dev_priv , sw_context , header ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
ret = vmw_translate_guest_ptr ( dev_priv , sw_context ,
2019-02-13 13:20:42 -08:00
& cmd - > body . guestResult , & vmw_bo ) ;
2010-02-21 14:54:55 +00:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2015-06-26 00:25:37 -07:00
ret = vmw_query_bo_switch_prepare ( dev_priv , vmw_bo , sw_context ) ;
2011-10-04 20:13:30 +02:00
return ret ;
2010-02-21 14:54:55 +00:00
}
2009-12-10 00:19:58 +00:00
2012-11-21 11:26:55 +01:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command .
2012-11-21 11:26:55 +01:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context used for this command submission .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_wait_gb_query ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * vmw_bo ;
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdWaitForGBQuery ) ;
2012-11-21 11:26:55 +01:00
int ret ;
2019-02-08 15:50:40 -08:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2012-11-21 11:26:55 +01:00
ret = vmw_cmd_cid_check ( dev_priv , sw_context , header ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
2019-02-13 13:20:42 -08:00
ret = vmw_translate_mob_ptr ( dev_priv , sw_context , & cmd - > body . mobid ,
2012-11-21 11:26:55 +01:00
& vmw_bo ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
return 0 ;
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command .
2012-11-20 12:19:35 +00:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context used for this command submission .
* @ header : Pointer to the command header in the command stream .
*/
2010-02-21 14:54:55 +00:00
static int vmw_cmd_wait_query ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * vmw_bo ;
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdWaitForQuery ) ;
2010-02-21 14:54:55 +00:00
int ret ;
2019-02-08 15:50:40 -08:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2012-11-21 11:26:55 +01:00
if ( dev_priv - > has_mob ) {
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( gb_cmd , SVGA3dCmdWaitForGBQuery ) ;
2012-11-21 11:26:55 +01:00
BUG_ON ( sizeof ( gb_cmd ) ! = sizeof ( * cmd ) ) ;
gb_cmd . header . id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY ;
gb_cmd . header . size = cmd - > header . size ;
2019-02-08 15:50:40 -08:00
gb_cmd . body . cid = cmd - > body . cid ;
gb_cmd . body . type = cmd - > body . type ;
gb_cmd . body . mobid = cmd - > body . guestResult . gmrId ;
gb_cmd . body . offset = cmd - > body . guestResult . offset ;
2012-11-21 11:26:55 +01:00
memcpy ( cmd , & gb_cmd , sizeof ( * cmd ) ) ;
return vmw_cmd_wait_gb_query ( dev_priv , sw_context , header ) ;
}
2010-02-21 14:54:55 +00:00
ret = vmw_cmd_cid_check ( dev_priv , sw_context , header ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
ret = vmw_translate_guest_ptr ( dev_priv , sw_context ,
2019-02-13 13:20:42 -08:00
& cmd - > body . guestResult , & vmw_bo ) ;
2010-02-21 14:54:55 +00:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
return 0 ;
}
static int vmw_cmd_dma ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * vmw_bo = NULL ;
2010-02-21 14:54:55 +00:00
struct vmw_surface * srf = NULL ;
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdSurfaceDMA ) ;
2010-02-21 14:54:55 +00:00
int ret ;
2014-04-15 18:25:48 +02:00
SVGA3dCmdSurfaceDMASuffix * suffix ;
uint32_t bo_size ;
2019-02-20 08:21:26 +01:00
bool dirty ;
2010-02-21 14:54:55 +00:00
2019-02-08 15:50:40 -08:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
suffix = ( SVGA3dCmdSurfaceDMASuffix * ) ( ( unsigned long ) & cmd - > body +
2014-04-15 18:25:48 +02:00
header - > size - sizeof ( * suffix ) ) ;
/* Make sure device and verifier stays in sync. */
if ( unlikely ( suffix - > suffixSize ! = sizeof ( * suffix ) ) ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Invalid DMA suffix size. \n " ) ;
2014-04-15 18:25:48 +02:00
return - EINVAL ;
}
2010-02-21 14:54:55 +00:00
ret = vmw_translate_guest_ptr ( dev_priv , sw_context ,
2019-02-13 13:20:42 -08:00
& cmd - > body . guest . ptr , & vmw_bo ) ;
2010-02-21 14:54:55 +00:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2014-04-15 18:25:48 +02:00
/* Make sure DMA doesn't cross BO boundaries. */
2020-12-09 15:07:50 +01:00
bo_size = vmw_bo - > base . base . size ;
2019-02-08 15:50:40 -08:00
if ( unlikely ( cmd - > body . guest . ptr . offset > bo_size ) ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Invalid DMA offset. \n " ) ;
2014-04-15 18:25:48 +02:00
return - EINVAL ;
}
2019-02-08 15:50:40 -08:00
bo_size - = cmd - > body . guest . ptr . offset ;
2014-04-15 18:25:48 +02:00
if ( unlikely ( suffix - > maximumOffset > bo_size ) )
suffix - > maximumOffset = bo_size ;
2019-02-08 15:50:40 -08:00
dirty = ( cmd - > body . transfer = = SVGA3D_WRITE_HOST_VRAM ) ?
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET : 0 ;
2012-11-20 12:19:35 +00:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
dirty , user_surface_converter ,
2019-02-08 15:50:40 -08:00
& cmd - > body . host . sid , NULL ) ;
2011-10-04 20:13:33 +02:00
if ( unlikely ( ret ! = 0 ) ) {
2012-11-20 12:19:35 +00:00
if ( unlikely ( ret ! = - ERESTARTSYS ) )
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " could not find surface for DMA. \n " ) ;
2018-09-26 16:27:54 +02:00
return ret ;
2011-10-04 20:13:33 +02:00
}
2012-11-20 12:19:35 +00:00
srf = vmw_res_to_srf ( sw_context - > res_cache [ vmw_res_surface ] . res ) ;
2011-10-04 20:13:31 +02:00
2019-02-13 13:20:42 -08:00
vmw_kms_cursor_snoop ( srf , sw_context - > fp - > tfile , & vmw_bo - > base , header ) ;
2009-12-10 00:19:58 +00:00
2018-09-26 16:27:54 +02:00
return 0 ;
2009-12-10 00:19:58 +00:00
}
2009-12-22 16:53:41 +01:00
static int vmw_cmd_draw ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDrawPrimitives ) ;
2009-12-22 16:53:41 +01:00
SVGA3dVertexDecl * decl = ( SVGA3dVertexDecl * ) (
( unsigned long ) header + sizeof ( * cmd ) ) ;
SVGA3dPrimitiveRange * range ;
uint32_t i ;
uint32_t maxnum ;
int ret ;
ret = vmw_cmd_cid_check ( dev_priv , sw_context , header ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
2019-02-08 15:50:40 -08:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2009-12-22 16:53:41 +01:00
maxnum = ( header - > size - sizeof ( cmd - > body ) ) / sizeof ( * decl ) ;
if ( unlikely ( cmd - > body . numVertexDecls > maxnum ) ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Illegal number of vertex declarations. \n " ) ;
2009-12-22 16:53:41 +01:00
return - EINVAL ;
}
for ( i = 0 ; i < cmd - > body . numVertexDecls ; + + i , + + decl ) {
2012-11-20 12:19:35 +00:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE ,
2012-11-20 12:19:35 +00:00
user_surface_converter ,
& decl - > array . surfaceId , NULL ) ;
2009-12-22 16:53:41 +01:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
}
maxnum = ( header - > size - sizeof ( cmd - > body ) -
cmd - > body . numVertexDecls * sizeof ( * decl ) ) / sizeof ( * range ) ;
if ( unlikely ( cmd - > body . numRanges > maxnum ) ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Illegal number of index ranges. \n " ) ;
2009-12-22 16:53:41 +01:00
return - EINVAL ;
}
range = ( SVGA3dPrimitiveRange * ) decl ;
for ( i = 0 ; i < cmd - > body . numRanges ; + + i , + + range ) {
2012-11-20 12:19:35 +00:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE ,
2012-11-20 12:19:35 +00:00
user_surface_converter ,
& range - > indexArray . surfaceId , NULL ) ;
2009-12-22 16:53:41 +01:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
}
return 0 ;
}
static int vmw_cmd_tex_state ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdSetTextureState ) ;
2009-12-22 16:53:41 +01:00
SVGA3dTextureState * last_state = ( SVGA3dTextureState * )
( ( unsigned long ) header + header - > size + sizeof ( header ) ) ;
SVGA3dTextureState * cur_state = ( SVGA3dTextureState * )
2019-02-08 15:50:40 -08:00
( ( unsigned long ) header + sizeof ( * cmd ) ) ;
2018-09-26 15:28:55 +02:00
struct vmw_resource * ctx ;
struct vmw_resource * res ;
2009-12-22 16:53:41 +01:00
int ret ;
2019-02-08 15:50:40 -08:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2013-10-08 02:27:17 -07:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_context ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET , user_context_converter ,
2019-02-08 15:50:40 -08:00
& cmd - > body . cid , & ctx ) ;
2009-12-22 16:53:41 +01:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
for ( ; cur_state < last_state ; + + cur_state ) {
if ( likely ( cur_state - > name ! = SVGA3D_TS_BIND_TEXTURE ) )
continue ;
2015-08-10 10:39:35 -07:00
if ( cur_state - > stage > = SVGA3D_NUM_TEXTURE_UNITS ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Illegal texture/sampler unit %u. \n " ,
( unsigned int ) cur_state - > stage ) ;
2015-08-10 10:39:35 -07:00
return - EINVAL ;
}
2012-11-20 12:19:35 +00:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE ,
2012-11-20 12:19:35 +00:00
user_surface_converter ,
2018-09-26 15:28:55 +02:00
& cur_state - > value , & res ) ;
2009-12-22 16:53:41 +01:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2013-10-08 02:27:17 -07:00
if ( dev_priv - > has_mob ) {
2015-08-10 10:39:35 -07:00
struct vmw_ctx_bindinfo_tex binding ;
2018-09-26 15:28:55 +02:00
struct vmw_ctx_validation_info * node ;
2015-08-10 10:39:35 -07:00
2018-09-26 15:28:55 +02:00
node = vmw_execbuf_info_from_res ( sw_context , ctx ) ;
if ( ! node )
return - EINVAL ;
binding . bi . ctx = ctx ;
binding . bi . res = res ;
2015-08-10 10:39:35 -07:00
binding . bi . bt = vmw_ctx_binding_tex ;
binding . texture_stage = cur_state - > stage ;
2018-09-26 15:28:55 +02:00
vmw_binding_add ( node - > staged , & binding . bi , 0 ,
binding . texture_stage ) ;
2013-10-08 02:27:17 -07:00
}
2009-12-22 16:53:41 +01:00
}
return 0 ;
}
2011-10-04 20:13:19 +02:00
static int vmw_cmd_check_define_gmrfb ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
void * buf )
{
2018-06-19 15:02:16 +02:00
struct vmw_buffer_object * vmw_bo ;
2011-10-04 20:13:19 +02:00
struct {
uint32_t header ;
SVGAFifoCmdDefineGMRFB body ;
} * cmd = buf ;
2019-02-13 13:20:42 -08:00
return vmw_translate_guest_ptr ( dev_priv , sw_context , & cmd - > body . ptr ,
2018-09-26 16:27:54 +02:00
& vmw_bo ) ;
2011-10-04 20:13:19 +02:00
}
2015-08-10 10:39:35 -07:00
/**
* vmw_cmd_res_switch_backup - Utility function to handle backup buffer
* switching
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
2021-01-15 18:12:36 +00:00
* @ res : Pointer to the resource .
2015-08-10 10:39:35 -07:00
* @ buf_id : Pointer to the user - space backup buffer handle in the command
* stream .
* @ backup_offset : Offset of backup into MOB .
*
2019-02-13 13:20:42 -08:00
* This function prepares for registering a switch of backup buffers in the
* resource metadata just prior to unreserving . It ' s basically a wrapper around
* vmw_cmd_res_switch_backup with a different interface .
2015-08-10 10:39:35 -07:00
*/
static int vmw_cmd_res_switch_backup ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
2019-02-13 13:20:42 -08:00
struct vmw_resource * res , uint32_t * buf_id ,
2015-08-10 10:39:35 -07:00
unsigned long backup_offset )
{
2018-09-26 15:28:55 +02:00
struct vmw_buffer_object * vbo ;
void * info ;
2015-08-10 10:39:35 -07:00
int ret ;
2018-09-26 15:28:55 +02:00
info = vmw_execbuf_info_from_res ( sw_context , res ) ;
if ( ! info )
return - EINVAL ;
ret = vmw_translate_mob_ptr ( dev_priv , sw_context , buf_id , & vbo ) ;
2015-08-10 10:39:35 -07:00
if ( ret )
return ret ;
2018-09-26 15:28:55 +02:00
vmw_validation_res_switch_backup ( sw_context - > ctx , info , vbo ,
backup_offset ) ;
2015-08-10 10:39:35 -07:00
return 0 ;
}
2012-11-21 11:45:13 +01:00
/**
* vmw_cmd_switch_backup - Utility function to handle backup buffer switching
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ res_type : The resource type .
* @ converter : Information about user - space binding for this resource type .
* @ res_id : Pointer to the user - space resource handle in the command stream .
* @ buf_id : Pointer to the user - space backup buffer handle in the command
* stream .
* @ backup_offset : Offset of backup into MOB .
*
2019-02-13 13:20:42 -08:00
* This function prepares for registering a switch of backup buffers in the
* resource metadata just prior to unreserving . It ' s basically a wrapper around
* vmw_cmd_res_switch_backup with a different interface .
2012-11-21 11:45:13 +01:00
*/
static int vmw_cmd_switch_backup ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
enum vmw_res_type res_type ,
const struct vmw_user_resource_conv
2019-02-13 13:20:42 -08:00
* converter , uint32_t * res_id , uint32_t * buf_id ,
2012-11-21 11:45:13 +01:00
unsigned long backup_offset )
{
2018-09-26 15:28:55 +02:00
struct vmw_resource * res ;
2015-08-10 10:39:35 -07:00
int ret ;
2012-11-21 11:45:13 +01:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , res_type ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE , converter , res_id , & res ) ;
2015-08-10 10:39:35 -07:00
if ( ret )
2012-11-21 11:45:13 +01:00
return ret ;
2019-02-13 13:20:42 -08:00
return vmw_cmd_res_switch_backup ( dev_priv , sw_context , res , buf_id ,
backup_offset ) ;
2012-11-21 11:45:13 +01:00
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
2012-11-21 11:45:13 +01:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_bind_gb_surface ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdBindGBSurface ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2012-11-21 11:45:13 +01:00
return vmw_cmd_switch_backup ( dev_priv , sw_context , vmw_res_surface ,
2019-02-13 13:20:42 -08:00
user_surface_converter , & cmd - > body . sid ,
& cmd - > body . mobid , 0 ) ;
2012-11-21 11:45:13 +01:00
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
2012-11-21 11:45:13 +01:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_update_gb_image ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdUpdateGBImage ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2012-11-21 11:45:13 +01:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE , user_surface_converter ,
2012-11-21 11:45:13 +01:00
& cmd - > body . image . sid , NULL ) ;
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
2012-11-21 11:45:13 +01:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_update_gb_surface ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdUpdateGBSurface ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2012-11-21 11:45:13 +01:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_CLEAR , user_surface_converter ,
2012-11-21 11:45:13 +01:00
& cmd - > body . sid , NULL ) ;
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
2012-11-21 11:45:13 +01:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_readback_gb_image ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdReadbackGBImage ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2012-11-21 11:45:13 +01:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE , user_surface_converter ,
2012-11-21 11:45:13 +01:00
& cmd - > body . image . sid , NULL ) ;
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
2012-11-21 11:45:13 +01:00
* command
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_readback_gb_surface ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdReadbackGBSurface ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2012-11-21 11:45:13 +01:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_CLEAR , user_surface_converter ,
2012-11-21 11:45:13 +01:00
& cmd - > body . sid , NULL ) ;
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2012-11-21 11:45:13 +01:00
* command
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_invalidate_gb_image ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdInvalidateGBImage ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2012-11-21 11:45:13 +01:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE , user_surface_converter ,
2012-11-21 11:45:13 +01:00
& cmd - > body . image . sid , NULL ) ;
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
* command
2012-11-21 11:45:13 +01:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_invalidate_gb_surface ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdInvalidateGBSurface ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2012-11-21 11:45:13 +01:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_CLEAR , user_surface_converter ,
2012-11-21 11:45:13 +01:00
& cmd - > body . sid , NULL ) ;
}
2014-01-31 10:12:10 +01:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
2014-01-31 10:12:10 +01:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_shader_define ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDefineShader ) ;
2014-01-31 10:12:10 +01:00
int ret ;
size_t size ;
2018-09-26 15:28:55 +02:00
struct vmw_resource * ctx ;
2014-01-31 10:12:10 +01:00
2019-02-08 15:50:40 -08:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2014-01-31 10:12:10 +01:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_context ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET , user_context_converter ,
& cmd - > body . cid , & ctx ) ;
2014-01-31 10:12:10 +01:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
if ( unlikely ( ! dev_priv - > has_mob ) )
return 0 ;
size = cmd - > header . size - sizeof ( cmd - > body ) ;
2019-02-13 13:20:42 -08:00
ret = vmw_compat_shader_add ( dev_priv , vmw_context_res_man ( ctx ) ,
cmd - > body . shid , cmd + 1 , cmd - > body . type ,
size , & sw_context - > staged_cmd_res ) ;
2014-01-31 10:12:10 +01:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2019-02-13 13:20:42 -08:00
return vmw_resource_relocation_add ( sw_context , NULL ,
2016-10-10 10:44:00 -07:00
vmw_ptr_diff ( sw_context - > buf_start ,
2016-10-10 11:06:45 -07:00
& cmd - > header . id ) ,
vmw_res_rel_nop ) ;
2014-01-31 10:12:10 +01:00
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
2014-01-31 10:12:10 +01:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_shader_destroy ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDestroyShader ) ;
2014-01-31 10:12:10 +01:00
int ret ;
2018-09-26 15:28:55 +02:00
struct vmw_resource * ctx ;
2014-01-31 10:12:10 +01:00
2019-02-08 15:50:40 -08:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2014-01-31 10:12:10 +01:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_context ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET , user_context_converter ,
& cmd - > body . cid , & ctx ) ;
2014-01-31 10:12:10 +01:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
if ( unlikely ( ! dev_priv - > has_mob ) )
return 0 ;
2019-02-13 13:20:42 -08:00
ret = vmw_shader_remove ( vmw_context_res_man ( ctx ) , cmd - > body . shid ,
cmd - > body . type , & sw_context - > staged_cmd_res ) ;
2014-01-31 10:12:10 +01:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2019-02-13 13:20:42 -08:00
return vmw_resource_relocation_add ( sw_context , NULL ,
2016-10-10 10:44:00 -07:00
vmw_ptr_diff ( sw_context - > buf_start ,
2016-10-10 11:06:45 -07:00
& cmd - > header . id ) ,
vmw_res_rel_nop ) ;
2014-01-31 10:12:10 +01:00
}
2012-11-20 12:19:35 +00:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
2012-11-20 12:19:35 +00:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_set_shader ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdSetShader ) ;
2015-08-10 10:39:35 -07:00
struct vmw_ctx_bindinfo_shader binding ;
2018-09-26 15:28:55 +02:00
struct vmw_resource * ctx , * res = NULL ;
struct vmw_ctx_validation_info * ctx_info ;
2012-11-20 12:19:35 +00:00
int ret ;
2019-02-08 15:50:40 -08:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2012-11-20 12:19:35 +00:00
2015-08-10 10:39:35 -07:00
if ( cmd - > body . type > = SVGA3D_SHADERTYPE_PREDX_MAX ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Illegal shader type %u. \n " ,
( unsigned int ) cmd - > body . type ) ;
2015-08-10 10:39:35 -07:00
return - EINVAL ;
}
2013-10-08 02:27:17 -07:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_context ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET , user_context_converter ,
& cmd - > body . cid , & ctx ) ;
2012-11-20 12:19:35 +00:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2014-06-09 12:39:22 +02:00
if ( ! dev_priv - > has_mob )
return 0 ;
if ( cmd - > body . shid ! = SVGA3D_INVALID_ID ) {
2019-04-04 13:25:43 +00:00
/*
* This is the compat shader path - Per device guest - backed
* shaders , but user - space thinks it ' s per context host -
* backed shaders .
*/
2018-09-26 15:28:55 +02:00
res = vmw_shader_lookup ( vmw_context_res_man ( ctx ) ,
2019-02-13 13:20:42 -08:00
cmd - > body . shid , cmd - > body . type ) ;
2014-06-09 12:39:22 +02:00
if ( ! IS_ERR ( res ) ) {
2019-02-20 08:21:26 +01:00
ret = vmw_execbuf_res_noctx_val_add ( sw_context , res ,
VMW_RES_DIRTY_NONE ) ;
2014-06-09 12:39:22 +02:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2019-04-04 13:25:43 +00:00
ret = vmw_resource_relocation_add
( sw_context , res ,
vmw_ptr_diff ( sw_context - > buf_start ,
& cmd - > body . shid ) ,
vmw_res_rel_normal ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
2014-06-09 12:39:22 +02:00
}
}
2018-09-26 15:28:55 +02:00
if ( IS_ERR_OR_NULL ( res ) ) {
2019-02-13 13:20:42 -08:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_shader ,
VMW_RES_DIRTY_NONE ,
user_shader_converter , & cmd - > body . shid ,
& res ) ;
2013-10-08 02:27:17 -07:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
}
2012-11-21 12:10:26 +01:00
2018-09-26 15:28:55 +02:00
ctx_info = vmw_execbuf_info_from_res ( sw_context , ctx ) ;
if ( ! ctx_info )
return - EINVAL ;
binding . bi . ctx = ctx ;
binding . bi . res = res ;
2015-08-10 10:39:35 -07:00
binding . bi . bt = vmw_ctx_binding_shader ;
binding . shader_slot = cmd - > body . type - SVGA3D_SHADERTYPE_MIN ;
2019-02-13 13:20:42 -08:00
vmw_binding_add ( ctx_info - > staged , & binding . bi , binding . shader_slot , 0 ) ;
2015-08-10 10:39:35 -07:00
return 0 ;
2012-11-20 12:19:35 +00:00
}
2014-01-30 11:13:43 +01:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2014-01-30 11:13:43 +01:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_set_shader_const ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdSetShaderConst ) ;
2014-01-30 11:13:43 +01:00
int ret ;
2019-02-08 15:50:40 -08:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2014-01-30 11:13:43 +01:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_context ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET , user_context_converter ,
& cmd - > body . cid , NULL ) ;
2014-01-30 11:13:43 +01:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
if ( dev_priv - > has_mob )
header - > id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE ;
return 0 ;
}
2012-11-21 12:10:26 +01:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2012-11-21 12:10:26 +01:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_bind_gb_shader ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdBindGBShader ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2012-11-21 12:10:26 +01:00
return vmw_cmd_switch_backup ( dev_priv , sw_context , vmw_res_shader ,
2019-02-13 13:20:42 -08:00
user_shader_converter , & cmd - > body . shid ,
& cmd - > body . mobid , cmd - > body . offsetInBytes ) ;
2012-11-21 12:10:26 +01:00
}
2015-08-10 10:39:35 -07:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_dx_set_single_constant_buffer - Validate
2015-08-10 10:39:35 -07:00
* SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command .
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int
vmw_cmd_dx_set_single_constant_buffer ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
2011-10-04 20:13:19 +02:00
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDXSetSingleConstantBuffer ) ;
2018-12-13 13:43:20 -08:00
SVGA3dShaderType max_shader_num = has_sm5_context ( dev_priv ) ?
SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10 ;
2018-09-26 15:28:55 +02:00
struct vmw_resource * res = NULL ;
2019-02-08 12:53:57 -08:00
struct vmw_ctx_validation_info * ctx_node = VMW_GET_CTX_NODE ( sw_context ) ;
2015-08-10 10:39:35 -07:00
struct vmw_ctx_bindinfo_cb binding ;
int ret ;
2011-10-04 20:13:19 +02:00
2019-02-08 12:53:57 -08:00
if ( ! ctx_node )
2011-10-04 20:13:19 +02:00
return - EINVAL ;
2015-08-10 10:39:35 -07:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE , user_surface_converter ,
2018-09-26 15:28:55 +02:00
& cmd - > body . sid , & res ) ;
2015-08-10 10:39:35 -07:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2011-10-04 20:13:19 +02:00
2018-09-26 15:28:55 +02:00
binding . bi . ctx = ctx_node - > ctx ;
binding . bi . res = res ;
2015-08-10 10:39:35 -07:00
binding . bi . bt = vmw_ctx_binding_cb ;
binding . shader_slot = cmd - > body . type - SVGA3D_SHADERTYPE_MIN ;
binding . offset = cmd - > body . offsetInBytes ;
binding . size = cmd - > body . sizeInBytes ;
binding . slot = cmd - > body . slot ;
2018-12-13 13:43:20 -08:00
if ( binding . shader_slot > = max_shader_num | |
2015-08-10 10:39:35 -07:00
binding . slot > = SVGA3D_DX_MAX_CONSTBUFFERS ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Illegal const buffer shader %u slot %u. \n " ,
( unsigned int ) cmd - > body . type ,
( unsigned int ) binding . slot ) ;
2015-08-10 10:39:35 -07:00
return - EINVAL ;
2011-10-04 20:13:19 +02:00
}
2019-02-13 13:20:42 -08:00
vmw_binding_add ( ctx_node - > staged , & binding . bi , binding . shader_slot ,
binding . slot ) ;
2011-10-04 20:13:19 +02:00
return 0 ;
}
2009-12-10 00:19:58 +00:00
2015-08-10 10:39:35 -07:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
* command
2015-08-10 10:39:35 -07:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_dx_set_shader_res ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDXSetShaderResources ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2018-12-13 13:43:20 -08:00
SVGA3dShaderType max_allowed = has_sm5_context ( dev_priv ) ?
SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX ;
2015-08-10 10:39:35 -07:00
u32 num_sr_view = ( cmd - > header . size - sizeof ( cmd - > body ) ) /
sizeof ( SVGA3dShaderResourceViewId ) ;
if ( ( u64 ) cmd - > body . startView + ( u64 ) num_sr_view >
( u64 ) SVGA3D_DX_MAX_SRVIEWS | |
2018-12-13 13:43:20 -08:00
cmd - > body . type > = max_allowed ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Invalid shader binding. \n " ) ;
2015-08-10 10:39:35 -07:00
return - EINVAL ;
}
return vmw_view_bindings_add ( sw_context , vmw_view_sr ,
vmw_ctx_binding_sr ,
cmd - > body . type - SVGA3D_SHADERTYPE_MIN ,
( void * ) & cmd [ 1 ] , num_sr_view ,
cmd - > body . startView ) ;
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2015-08-10 10:39:35 -07:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_dx_set_shader ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDXSetShader ) ;
2018-12-13 13:43:20 -08:00
SVGA3dShaderType max_allowed = has_sm5_context ( dev_priv ) ?
SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX ;
2015-08-10 10:39:35 -07:00
struct vmw_resource * res = NULL ;
2019-02-08 12:53:57 -08:00
struct vmw_ctx_validation_info * ctx_node = VMW_GET_CTX_NODE ( sw_context ) ;
2015-08-10 10:39:35 -07:00
struct vmw_ctx_bindinfo_shader binding ;
int ret = 0 ;
2019-02-08 12:53:57 -08:00
if ( ! ctx_node )
2015-08-10 10:39:35 -07:00
return - EINVAL ;
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2018-12-13 13:43:20 -08:00
if ( cmd - > body . type > = max_allowed | |
2019-05-20 21:57:34 +12:00
cmd - > body . type < SVGA3D_SHADERTYPE_MIN ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Illegal shader type %u. \n " ,
( unsigned int ) cmd - > body . type ) ;
2015-08-10 10:39:35 -07:00
return - EINVAL ;
}
if ( cmd - > body . shaderId ! = SVGA3D_INVALID_ID ) {
res = vmw_shader_lookup ( sw_context - > man , cmd - > body . shaderId , 0 ) ;
if ( IS_ERR ( res ) ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Could not find shader for binding. \n " ) ;
2015-08-10 10:39:35 -07:00
return PTR_ERR ( res ) ;
}
2019-02-20 08:21:26 +01:00
ret = vmw_execbuf_res_noctx_val_add ( sw_context , res ,
VMW_RES_DIRTY_NONE ) ;
2015-08-10 10:39:35 -07:00
if ( ret )
2018-09-26 16:28:45 +02:00
return ret ;
2015-08-10 10:39:35 -07:00
}
2018-09-26 15:28:55 +02:00
binding . bi . ctx = ctx_node - > ctx ;
2015-08-10 10:39:35 -07:00
binding . bi . res = res ;
binding . bi . bt = vmw_ctx_binding_dx_shader ;
binding . shader_slot = cmd - > body . type - SVGA3D_SHADERTYPE_MIN ;
2019-02-13 13:20:42 -08:00
vmw_binding_add ( ctx_node - > staged , & binding . bi , binding . shader_slot , 0 ) ;
2015-08-10 10:39:35 -07:00
2018-09-26 16:28:45 +02:00
return 0 ;
2015-08-10 10:39:35 -07:00
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
* command
2015-08-10 10:39:35 -07:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_dx_set_vertex_buffers ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 12:53:57 -08:00
struct vmw_ctx_validation_info * ctx_node = VMW_GET_CTX_NODE ( sw_context ) ;
2015-08-10 10:39:35 -07:00
struct vmw_ctx_bindinfo_vb binding ;
2018-09-26 15:28:55 +02:00
struct vmw_resource * res ;
2015-08-10 10:39:35 -07:00
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXSetVertexBuffers body ;
SVGA3dVertexBuffer buf [ ] ;
} * cmd ;
int i , ret , num ;
2019-02-08 12:53:57 -08:00
if ( ! ctx_node )
2015-08-10 10:39:35 -07:00
return - EINVAL ;
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
num = ( cmd - > header . size - sizeof ( cmd - > body ) ) /
sizeof ( SVGA3dVertexBuffer ) ;
if ( ( u64 ) num + ( u64 ) cmd - > body . startBuffer >
( u64 ) SVGA3D_DX_MAX_VERTEXBUFFERS ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Invalid number of vertex buffers. \n " ) ;
2015-08-10 10:39:35 -07:00
return - EINVAL ;
}
for ( i = 0 ; i < num ; i + + ) {
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE ,
2015-08-10 10:39:35 -07:00
user_surface_converter ,
2018-09-26 15:28:55 +02:00
& cmd - > buf [ i ] . sid , & res ) ;
2015-08-10 10:39:35 -07:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2018-09-26 15:28:55 +02:00
binding . bi . ctx = ctx_node - > ctx ;
2015-08-10 10:39:35 -07:00
binding . bi . bt = vmw_ctx_binding_vb ;
2018-09-26 15:28:55 +02:00
binding . bi . res = res ;
2015-08-10 10:39:35 -07:00
binding . offset = cmd - > buf [ i ] . offset ;
binding . stride = cmd - > buf [ i ] . stride ;
binding . slot = i + cmd - > body . startBuffer ;
2019-02-13 13:20:42 -08:00
vmw_binding_add ( ctx_node - > staged , & binding . bi , 0 , binding . slot ) ;
2015-08-10 10:39:35 -07:00
}
return 0 ;
}
/**
2021-05-05 15:10:07 -04:00
* vmw_cmd_dx_set_index_buffer - Validate
2017-07-17 07:36:10 -07:00
* SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command .
2015-08-10 10:39:35 -07:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_dx_set_index_buffer ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 12:53:57 -08:00
struct vmw_ctx_validation_info * ctx_node = VMW_GET_CTX_NODE ( sw_context ) ;
2015-08-10 10:39:35 -07:00
struct vmw_ctx_bindinfo_ib binding ;
2018-09-26 15:28:55 +02:00
struct vmw_resource * res ;
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDXSetIndexBuffer ) ;
2015-08-10 10:39:35 -07:00
int ret ;
2019-02-08 12:53:57 -08:00
if ( ! ctx_node )
2015-08-10 10:39:35 -07:00
return - EINVAL ;
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE , user_surface_converter ,
2018-09-26 15:28:55 +02:00
& cmd - > body . sid , & res ) ;
2015-08-10 10:39:35 -07:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2018-09-26 15:28:55 +02:00
binding . bi . ctx = ctx_node - > ctx ;
binding . bi . res = res ;
2015-08-10 10:39:35 -07:00
binding . bi . bt = vmw_ctx_binding_ib ;
binding . offset = cmd - > body . offset ;
binding . format = cmd - > body . format ;
2018-09-26 15:28:55 +02:00
vmw_binding_add ( ctx_node - > staged , & binding . bi , 0 , 0 ) ;
2015-08-10 10:39:35 -07:00
return 0 ;
}
/**
2021-05-05 15:10:07 -04:00
* vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2019-02-13 13:20:42 -08:00
* command
2015-08-10 10:39:35 -07:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_dx_set_rendertargets ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDXSetRenderTargets ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2015-08-10 10:39:35 -07:00
u32 num_rt_view = ( cmd - > header . size - sizeof ( cmd - > body ) ) /
sizeof ( SVGA3dRenderTargetViewId ) ;
2019-02-08 15:50:40 -08:00
int ret ;
2015-08-10 10:39:35 -07:00
2021-06-15 14:23:33 -04:00
if ( num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Invalid DX Rendertarget binding. \n " ) ;
2015-08-10 10:39:35 -07:00
return - EINVAL ;
}
2019-02-13 13:20:42 -08:00
ret = vmw_view_bindings_add ( sw_context , vmw_view_ds , vmw_ctx_binding_ds ,
0 , & cmd - > body . depthStencilViewId , 1 , 0 ) ;
2015-08-10 10:39:35 -07:00
if ( ret )
return ret ;
return vmw_view_bindings_add ( sw_context , vmw_view_rt ,
2019-02-13 13:20:42 -08:00
vmw_ctx_binding_dx_rt , 0 , ( void * ) & cmd [ 1 ] ,
num_rt_view , 0 ) ;
2015-08-10 10:39:35 -07:00
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_dx_clear_rendertarget_view - Validate
2015-08-10 10:39:35 -07:00
* SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_dx_clear_rendertarget_view ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDXClearRenderTargetView ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2019-12-08 11:53:28 +01:00
struct vmw_resource * ret ;
2015-08-10 10:39:35 -07:00
2019-12-08 11:53:28 +01:00
ret = vmw_view_id_val_add ( sw_context , vmw_view_rt ,
cmd - > body . renderTargetViewId ) ;
return PTR_ERR_OR_ZERO ( ret ) ;
2015-08-10 10:39:35 -07:00
}
/**
2021-05-05 15:10:07 -04:00
* vmw_cmd_dx_clear_depthstencil_view - Validate
2015-08-10 10:39:35 -07:00
* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_dx_clear_depthstencil_view ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDXClearDepthStencilView ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2019-12-08 11:53:28 +01:00
struct vmw_resource * ret ;
ret = vmw_view_id_val_add ( sw_context , vmw_view_ds ,
cmd - > body . depthStencilViewId ) ;
2015-08-10 10:39:35 -07:00
2019-12-08 11:53:28 +01:00
return PTR_ERR_OR_ZERO ( ret ) ;
2015-08-10 10:39:35 -07:00
}
static int vmw_cmd_dx_view_define ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 12:53:57 -08:00
struct vmw_ctx_validation_info * ctx_node = VMW_GET_CTX_NODE ( sw_context ) ;
2018-09-26 15:28:55 +02:00
struct vmw_resource * srf ;
2015-08-10 10:39:35 -07:00
struct vmw_resource * res ;
enum vmw_view_type view_type ;
int ret ;
/*
2019-02-13 13:20:42 -08:00
* This is based on the fact that all affected define commands have the
* same initial command body layout .
2015-08-10 10:39:35 -07:00
*/
struct {
SVGA3dCmdHeader header ;
uint32 defined_id ;
uint32 sid ;
} * cmd ;
2019-02-08 12:53:57 -08:00
if ( ! ctx_node )
2015-08-10 10:39:35 -07:00
return - EINVAL ;
view_type = vmw_view_cmd_to_type ( header - > id ) ;
2018-01-10 12:40:04 +03:00
if ( view_type = = vmw_view_max )
return - EINVAL ;
2019-02-13 13:20:42 -08:00
2015-08-10 10:39:35 -07:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2019-05-11 18:01:37 +12:00
if ( unlikely ( cmd - > sid = = SVGA3D_INVALID_ID ) ) {
VMW_DEBUG_USER ( " Invalid surface id. \n " ) ;
return - EINVAL ;
}
2015-08-10 10:39:35 -07:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE , user_surface_converter ,
2018-09-26 15:28:55 +02:00
& cmd - > sid , & srf ) ;
2015-08-10 10:39:35 -07:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2018-09-26 15:28:55 +02:00
res = vmw_context_cotable ( ctx_node - > ctx , vmw_view_cotables [ view_type ] ) ;
2015-08-10 10:39:35 -07:00
ret = vmw_cotable_notify ( res , cmd - > defined_id ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
2019-02-13 13:20:42 -08:00
return vmw_view_add ( sw_context - > man , ctx_node - > ctx , srf , view_type ,
cmd - > defined_id , header ,
2015-08-10 10:39:35 -07:00
header - > size + sizeof ( * header ) ,
& sw_context - > staged_cmd_res ) ;
}
2015-08-10 10:45:11 -07:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command .
2015-08-10 10:45:11 -07:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_dx_set_so_targets ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 12:53:57 -08:00
struct vmw_ctx_validation_info * ctx_node = VMW_GET_CTX_NODE ( sw_context ) ;
2018-12-18 10:13:13 -08:00
struct vmw_ctx_bindinfo_so_target binding ;
2018-09-26 15:28:55 +02:00
struct vmw_resource * res ;
2015-08-10 10:45:11 -07:00
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXSetSOTargets body ;
SVGA3dSoTarget targets [ ] ;
} * cmd ;
int i , ret , num ;
2019-02-08 12:53:57 -08:00
if ( ! ctx_node )
2015-08-10 10:45:11 -07:00
return - EINVAL ;
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
2019-02-13 13:20:42 -08:00
num = ( cmd - > header . size - sizeof ( cmd - > body ) ) / sizeof ( SVGA3dSoTarget ) ;
2015-08-10 10:45:11 -07:00
if ( num > SVGA3D_DX_MAX_SOTARGETS ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Invalid DX SO binding. \n " ) ;
2015-08-10 10:45:11 -07:00
return - EINVAL ;
}
for ( i = 0 ; i < num ; i + + ) {
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET ,
2015-08-10 10:45:11 -07:00
user_surface_converter ,
2018-09-26 15:28:55 +02:00
& cmd - > targets [ i ] . sid , & res ) ;
2015-08-10 10:45:11 -07:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2018-09-26 15:28:55 +02:00
binding . bi . ctx = ctx_node - > ctx ;
binding . bi . res = res ;
2020-12-11 16:57:51 +08:00
binding . bi . bt = vmw_ctx_binding_so_target ;
2015-08-10 10:45:11 -07:00
binding . offset = cmd - > targets [ i ] . offset ;
binding . size = cmd - > targets [ i ] . sizeInBytes ;
binding . slot = i ;
2019-02-13 13:20:42 -08:00
vmw_binding_add ( ctx_node - > staged , & binding . bi , 0 , binding . slot ) ;
2015-08-10 10:45:11 -07:00
}
return 0 ;
}
2015-08-10 10:39:35 -07:00
static int vmw_cmd_dx_so_define ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 12:53:57 -08:00
struct vmw_ctx_validation_info * ctx_node = VMW_GET_CTX_NODE ( sw_context ) ;
2015-08-10 10:39:35 -07:00
struct vmw_resource * res ;
/*
* This is based on the fact that all affected define commands have
* the same initial command body layout .
*/
struct {
SVGA3dCmdHeader header ;
uint32 defined_id ;
} * cmd ;
enum vmw_so_type so_type ;
int ret ;
2019-02-08 12:53:57 -08:00
if ( ! ctx_node )
2015-08-10 10:39:35 -07:00
return - EINVAL ;
so_type = vmw_so_cmd_to_type ( header - > id ) ;
2018-09-26 15:28:55 +02:00
res = vmw_context_cotable ( ctx_node - > ctx , vmw_so_cotables [ so_type ] ) ;
2021-06-09 13:23:02 -04:00
if ( IS_ERR ( res ) )
return PTR_ERR ( res ) ;
2015-08-10 10:39:35 -07:00
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
ret = vmw_cotable_notify ( res , cmd - > defined_id ) ;
return ret ;
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_ [ X ] _SUBRESOURCE
* command
2015-08-10 10:39:35 -07:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_dx_check_subresource ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct {
SVGA3dCmdHeader header ;
union {
SVGA3dCmdDXReadbackSubResource r_body ;
SVGA3dCmdDXInvalidateSubResource i_body ;
SVGA3dCmdDXUpdateSubResource u_body ;
SVGA3dSurfaceId sid ;
} ;
} * cmd ;
BUILD_BUG_ON ( offsetof ( typeof ( * cmd ) , r_body . sid ) ! =
offsetof ( typeof ( * cmd ) , sid ) ) ;
BUILD_BUG_ON ( offsetof ( typeof ( * cmd ) , i_body . sid ) ! =
offsetof ( typeof ( * cmd ) , sid ) ) ;
BUILD_BUG_ON ( offsetof ( typeof ( * cmd ) , u_body . sid ) ! =
offsetof ( typeof ( * cmd ) , sid ) ) ;
cmd = container_of ( header , typeof ( * cmd ) , header ) ;
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE , user_surface_converter ,
2015-08-10 10:39:35 -07:00
& cmd - > sid , NULL ) ;
}
static int vmw_cmd_dx_cid_check ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 12:53:57 -08:00
struct vmw_ctx_validation_info * ctx_node = VMW_GET_CTX_NODE ( sw_context ) ;
2015-08-10 10:39:35 -07:00
2019-02-08 12:53:57 -08:00
if ( ! ctx_node )
2015-08-10 10:39:35 -07:00
return - EINVAL ;
return 0 ;
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
* resource for removal .
2015-08-10 10:39:35 -07:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*
2019-02-13 13:20:42 -08:00
* Check that the view exists , and if it was not created using this command
* batch , conditionally make this command a NOP .
2015-08-10 10:39:35 -07:00
*/
static int vmw_cmd_dx_view_remove ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 12:53:57 -08:00
struct vmw_ctx_validation_info * ctx_node = VMW_GET_CTX_NODE ( sw_context ) ;
2015-08-10 10:39:35 -07:00
struct {
SVGA3dCmdHeader header ;
union vmw_view_destroy body ;
} * cmd = container_of ( header , typeof ( * cmd ) , header ) ;
enum vmw_view_type view_type = vmw_view_cmd_to_type ( header - > id ) ;
struct vmw_resource * view ;
int ret ;
2019-02-08 12:53:57 -08:00
if ( ! ctx_node )
2015-08-10 10:39:35 -07:00
return - EINVAL ;
2019-02-13 13:20:42 -08:00
ret = vmw_view_remove ( sw_context - > man , cmd - > body . view_id , view_type ,
& sw_context - > staged_cmd_res , & view ) ;
2015-08-10 10:39:35 -07:00
if ( ret | | ! view )
return ret ;
/*
2016-10-10 11:06:45 -07:00
* If the view wasn ' t created during this command batch , it might
* have been removed due to a context swapout , so add a
* relocation to conditionally make this command a NOP to avoid
* device errors .
2015-08-10 10:39:35 -07:00
*/
2019-02-13 13:20:42 -08:00
return vmw_resource_relocation_add ( sw_context , view ,
2016-10-10 11:06:45 -07:00
vmw_ptr_diff ( sw_context - > buf_start ,
& cmd - > header . id ) ,
vmw_res_rel_cond_nop ) ;
2015-08-10 10:39:35 -07:00
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2015-08-10 10:39:35 -07:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_dx_define_shader ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 12:53:57 -08:00
struct vmw_ctx_validation_info * ctx_node = VMW_GET_CTX_NODE ( sw_context ) ;
2015-08-10 10:39:35 -07:00
struct vmw_resource * res ;
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDXDefineShader ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2015-08-10 10:39:35 -07:00
int ret ;
2019-02-08 12:53:57 -08:00
if ( ! ctx_node )
2015-08-10 10:39:35 -07:00
return - EINVAL ;
2018-09-26 15:28:55 +02:00
res = vmw_context_cotable ( ctx_node - > ctx , SVGA_COTABLE_DXSHADER ) ;
2015-08-10 10:39:35 -07:00
ret = vmw_cotable_notify ( res , cmd - > body . shaderId ) ;
if ( ret )
return ret ;
2018-09-26 15:28:55 +02:00
return vmw_dx_shader_add ( sw_context - > man , ctx_node - > ctx ,
2015-08-10 10:39:35 -07:00
cmd - > body . shaderId , cmd - > body . type ,
& sw_context - > staged_cmd_res ) ;
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2015-08-10 10:39:35 -07:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_dx_destroy_shader ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 12:53:57 -08:00
struct vmw_ctx_validation_info * ctx_node = VMW_GET_CTX_NODE ( sw_context ) ;
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDXDestroyShader ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2015-08-10 10:39:35 -07:00
int ret ;
2019-02-08 12:53:57 -08:00
if ( ! ctx_node )
2015-08-10 10:39:35 -07:00
return - EINVAL ;
ret = vmw_shader_remove ( sw_context - > man , cmd - > body . shaderId , 0 ,
& sw_context - > staged_cmd_res ) ;
return ret ;
}
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2015-08-10 10:39:35 -07:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_dx_bind_shader ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2018-09-26 15:28:55 +02:00
struct vmw_resource * ctx ;
2015-08-10 10:39:35 -07:00
struct vmw_resource * res ;
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDXBindShader ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2015-08-10 10:39:35 -07:00
int ret ;
if ( cmd - > body . cid ! = SVGA3D_INVALID_ID ) {
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_context ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET ,
user_context_converter , & cmd - > body . cid ,
& ctx ) ;
2015-08-10 10:39:35 -07:00
if ( ret )
return ret ;
} else {
2019-02-08 12:53:57 -08:00
struct vmw_ctx_validation_info * ctx_node =
VMW_GET_CTX_NODE ( sw_context ) ;
if ( ! ctx_node )
2015-08-10 10:39:35 -07:00
return - EINVAL ;
2019-02-08 12:53:57 -08:00
ctx = ctx_node - > ctx ;
2015-08-10 10:39:35 -07:00
}
2019-02-13 13:20:42 -08:00
res = vmw_shader_lookup ( vmw_context_res_man ( ctx ) , cmd - > body . shid , 0 ) ;
2015-08-10 10:39:35 -07:00
if ( IS_ERR ( res ) ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Could not find shader to bind. \n " ) ;
2015-08-10 10:39:35 -07:00
return PTR_ERR ( res ) ;
}
2019-02-20 08:21:26 +01:00
ret = vmw_execbuf_res_noctx_val_add ( sw_context , res ,
VMW_RES_DIRTY_NONE ) ;
2015-08-10 10:39:35 -07:00
if ( ret ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Error creating resource validation node. \n " ) ;
2018-09-26 16:28:45 +02:00
return ret ;
2015-08-10 10:39:35 -07:00
}
2018-09-26 16:28:45 +02:00
return vmw_cmd_res_switch_backup ( dev_priv , sw_context , res ,
& cmd - > body . mobid ,
cmd - > body . offsetInBytes ) ;
2015-08-10 10:39:35 -07:00
}
2016-02-12 08:11:56 +01:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2016-02-12 08:11:56 +01:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_dx_genmips ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDXGenMips ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2021-05-04 23:57:36 -04:00
struct vmw_resource * view ;
struct vmw_res_cache_entry * rcache ;
2019-12-08 11:53:28 +01:00
2021-05-04 23:57:36 -04:00
view = vmw_view_id_val_add ( sw_context , vmw_view_sr ,
cmd - > body . shaderResourceViewId ) ;
if ( IS_ERR ( view ) )
return PTR_ERR ( view ) ;
2016-02-12 08:11:56 +01:00
2021-05-04 23:57:36 -04:00
/*
* Normally the shader - resource view is not gpu - dirtying , but for
* this particular command it is . . .
* So mark the last looked - up surface , which is the surface
* the view points to , gpu - dirty .
*/
rcache = & sw_context - > res_cache [ vmw_res_surface ] ;
vmw_validation_res_set_dirty ( sw_context - > ctx , rcache - > private ,
VMW_RES_DIRTY_SET ) ;
return 0 ;
2016-02-12 08:11:56 +01:00
}
2016-10-10 10:37:03 -07:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_dx_transfer_from_buffer - Validate
* SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2016-10-10 10:37:03 -07:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_dx_transfer_from_buffer ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdDXTransferFromBuffer ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2016-10-10 10:37:03 -07:00
int ret ;
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_NONE , user_surface_converter ,
2016-10-10 10:37:03 -07:00
& cmd - > body . srcSid , NULL ) ;
if ( ret ! = 0 )
return ret ;
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET , user_surface_converter ,
2016-10-10 10:37:03 -07:00
& cmd - > body . destSid , NULL ) ;
}
2018-06-18 17:14:56 -07:00
/**
2019-02-13 13:20:42 -08:00
* vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2018-06-18 17:14:56 -07:00
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_intra_surface_copy ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
2019-02-08 15:50:40 -08:00
VMW_DECLARE_CMD_VAR ( * cmd , SVGA3dCmdIntraSurfaceCopy ) =
container_of ( header , typeof ( * cmd ) , header ) ;
2018-06-18 17:14:56 -07:00
if ( ! ( dev_priv - > capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY ) )
return - EINVAL ;
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
2019-02-20 08:21:26 +01:00
VMW_RES_DIRTY_SET , user_surface_converter ,
& cmd - > body . surface . sid , NULL ) ;
2018-06-18 17:14:56 -07:00
}
2018-12-13 14:00:18 -08:00
static int vmw_cmd_sm5 ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
if ( ! has_sm5_context ( dev_priv ) )
return - EINVAL ;
return 0 ;
}
2018-12-13 13:51:08 -08:00
static int vmw_cmd_sm5_view_define ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
if ( ! has_sm5_context ( dev_priv ) )
return - EINVAL ;
return vmw_cmd_dx_view_define ( dev_priv , sw_context , header ) ;
}
static int vmw_cmd_sm5_view_remove ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
if ( ! has_sm5_context ( dev_priv ) )
return - EINVAL ;
return vmw_cmd_dx_view_remove ( dev_priv , sw_context , header ) ;
}
static int vmw_cmd_clear_uav_uint ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXClearUAViewUint body ;
} * cmd = container_of ( header , typeof ( * cmd ) , header ) ;
struct vmw_resource * ret ;
if ( ! has_sm5_context ( dev_priv ) )
return - EINVAL ;
ret = vmw_view_id_val_add ( sw_context , vmw_view_ua ,
cmd - > body . uaViewId ) ;
return PTR_ERR_OR_ZERO ( ret ) ;
}
static int vmw_cmd_clear_uav_float ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXClearUAViewFloat body ;
} * cmd = container_of ( header , typeof ( * cmd ) , header ) ;
struct vmw_resource * ret ;
if ( ! has_sm5_context ( dev_priv ) )
return - EINVAL ;
ret = vmw_view_id_val_add ( sw_context , vmw_view_ua ,
cmd - > body . uaViewId ) ;
return PTR_ERR_OR_ZERO ( ret ) ;
}
static int vmw_cmd_set_uav ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXSetUAViews body ;
} * cmd = container_of ( header , typeof ( * cmd ) , header ) ;
u32 num_uav = ( cmd - > header . size - sizeof ( cmd - > body ) ) /
sizeof ( SVGA3dUAViewId ) ;
int ret ;
if ( ! has_sm5_context ( dev_priv ) )
return - EINVAL ;
if ( num_uav > SVGA3D_MAX_UAVIEWS ) {
VMW_DEBUG_USER ( " Invalid UAV binding. \n " ) ;
return - EINVAL ;
}
ret = vmw_view_bindings_add ( sw_context , vmw_view_ua ,
vmw_ctx_binding_uav , 0 , ( void * ) & cmd [ 1 ] ,
num_uav , 0 ) ;
if ( ret )
return ret ;
vmw_binding_add_uav_index ( sw_context - > dx_ctx_node - > staged , 0 ,
cmd - > body . uavSpliceIndex ) ;
return ret ;
}
static int vmw_cmd_set_cs_uav ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXSetCSUAViews body ;
} * cmd = container_of ( header , typeof ( * cmd ) , header ) ;
u32 num_uav = ( cmd - > header . size - sizeof ( cmd - > body ) ) /
sizeof ( SVGA3dUAViewId ) ;
int ret ;
if ( ! has_sm5_context ( dev_priv ) )
return - EINVAL ;
if ( num_uav > SVGA3D_MAX_UAVIEWS ) {
VMW_DEBUG_USER ( " Invalid UAV binding. \n " ) ;
return - EINVAL ;
}
ret = vmw_view_bindings_add ( sw_context , vmw_view_ua ,
vmw_ctx_binding_cs_uav , 0 , ( void * ) & cmd [ 1 ] ,
num_uav , 0 ) ;
if ( ret )
return ret ;
vmw_binding_add_uav_index ( sw_context - > dx_ctx_node - > staged , 1 ,
cmd - > body . startIndex ) ;
return ret ;
}
2018-12-13 14:04:31 -08:00
static int vmw_cmd_dx_define_streamoutput ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_ctx_validation_info * ctx_node = sw_context - > dx_ctx_node ;
struct vmw_resource * res ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXDefineStreamOutputWithMob body ;
} * cmd = container_of ( header , typeof ( * cmd ) , header ) ;
int ret ;
if ( ! has_sm5_context ( dev_priv ) )
return - EINVAL ;
if ( ! ctx_node ) {
DRM_ERROR ( " DX Context not set. \n " ) ;
return - EINVAL ;
}
res = vmw_context_cotable ( ctx_node - > ctx , SVGA_COTABLE_STREAMOUTPUT ) ;
ret = vmw_cotable_notify ( res , cmd - > body . soid ) ;
if ( ret )
return ret ;
return vmw_dx_streamoutput_add ( sw_context - > man , ctx_node - > ctx ,
cmd - > body . soid ,
& sw_context - > staged_cmd_res ) ;
}
static int vmw_cmd_dx_destroy_streamoutput ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_ctx_validation_info * ctx_node = sw_context - > dx_ctx_node ;
struct vmw_resource * res ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXDestroyStreamOutput body ;
} * cmd = container_of ( header , typeof ( * cmd ) , header ) ;
if ( ! ctx_node ) {
DRM_ERROR ( " DX Context not set. \n " ) ;
return - EINVAL ;
}
/*
* When device does not support SM5 then streamoutput with mob command is
* not available to user - space . Simply return in this case .
*/
if ( ! has_sm5_context ( dev_priv ) )
return 0 ;
/*
* With SM5 capable device if lookup fails then user - space probably used
* old streamoutput define command . Return without an error .
*/
res = vmw_dx_streamoutput_lookup ( vmw_context_res_man ( ctx_node - > ctx ) ,
cmd - > body . soid ) ;
if ( IS_ERR ( res ) )
return 0 ;
return vmw_dx_streamoutput_remove ( sw_context - > man , cmd - > body . soid ,
& sw_context - > staged_cmd_res ) ;
}
static int vmw_cmd_dx_bind_streamoutput ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_ctx_validation_info * ctx_node = sw_context - > dx_ctx_node ;
struct vmw_resource * res ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXBindStreamOutput body ;
} * cmd = container_of ( header , typeof ( * cmd ) , header ) ;
int ret ;
if ( ! has_sm5_context ( dev_priv ) )
return - EINVAL ;
if ( ! ctx_node ) {
DRM_ERROR ( " DX Context not set. \n " ) ;
return - EINVAL ;
}
res = vmw_dx_streamoutput_lookup ( vmw_context_res_man ( ctx_node - > ctx ) ,
cmd - > body . soid ) ;
if ( IS_ERR ( res ) ) {
2020-08-05 12:31:55 +01:00
DRM_ERROR ( " Could not find streamoutput to bind. \n " ) ;
2018-12-13 14:04:31 -08:00
return PTR_ERR ( res ) ;
}
vmw_dx_streamoutput_set_size ( res , cmd - > body . sizeInBytes ) ;
ret = vmw_execbuf_res_noctx_val_add ( sw_context , res ,
VMW_RES_DIRTY_NONE ) ;
if ( ret ) {
DRM_ERROR ( " Error creating resource validation node. \n " ) ;
return ret ;
}
return vmw_cmd_res_switch_backup ( dev_priv , sw_context , res ,
& cmd - > body . mobid ,
cmd - > body . offsetInBytes ) ;
}
static int vmw_cmd_dx_set_streamoutput ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_ctx_validation_info * ctx_node = sw_context - > dx_ctx_node ;
struct vmw_resource * res ;
struct vmw_ctx_bindinfo_so binding ;
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdDXSetStreamOutput body ;
} * cmd = container_of ( header , typeof ( * cmd ) , header ) ;
int ret ;
if ( ! ctx_node ) {
DRM_ERROR ( " DX Context not set. \n " ) ;
return - EINVAL ;
}
if ( cmd - > body . soid = = SVGA3D_INVALID_ID )
return 0 ;
/*
* When device does not support SM5 then streamoutput with mob command is
* not available to user - space . Simply return in this case .
*/
if ( ! has_sm5_context ( dev_priv ) )
return 0 ;
/*
* With SM5 capable device if lookup fails then user - space probably used
* old streamoutput define command . Return without an error .
*/
res = vmw_dx_streamoutput_lookup ( vmw_context_res_man ( ctx_node - > ctx ) ,
cmd - > body . soid ) ;
if ( IS_ERR ( res ) ) {
return 0 ;
}
ret = vmw_execbuf_res_noctx_val_add ( sw_context , res ,
VMW_RES_DIRTY_NONE ) ;
if ( ret ) {
DRM_ERROR ( " Error creating resource validation node. \n " ) ;
return ret ;
}
binding . bi . ctx = ctx_node - > ctx ;
binding . bi . res = res ;
binding . bi . bt = vmw_ctx_binding_so ;
binding . slot = 0 ; /* Only one SO set to context at a time. */
vmw_binding_add ( sw_context - > dx_ctx_node - > staged , & binding . bi , 0 ,
binding . slot ) ;
return ret ;
}
2018-12-13 14:00:18 -08:00
static int vmw_cmd_indexed_instanced_indirect ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_draw_indexed_instanced_indirect_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdDXDrawIndexedInstancedIndirect body ;
} * cmd = container_of ( header , typeof ( * cmd ) , header ) ;
if ( ! has_sm5_context ( dev_priv ) )
return - EINVAL ;
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
VMW_RES_DIRTY_NONE , user_surface_converter ,
& cmd - > body . argsBufferSid , NULL ) ;
}
static int vmw_cmd_instanced_indirect ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_draw_instanced_indirect_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdDXDrawInstancedIndirect body ;
} * cmd = container_of ( header , typeof ( * cmd ) , header ) ;
if ( ! has_sm5_context ( dev_priv ) )
return - EINVAL ;
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
VMW_RES_DIRTY_NONE , user_surface_converter ,
& cmd - > body . argsBufferSid , NULL ) ;
}
static int vmw_cmd_dispatch_indirect ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_dispatch_indirect_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdDXDispatchIndirect body ;
} * cmd = container_of ( header , typeof ( * cmd ) , header ) ;
if ( ! has_sm5_context ( dev_priv ) )
return - EINVAL ;
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
VMW_RES_DIRTY_NONE , user_surface_converter ,
& cmd - > body . argsBufferSid , NULL ) ;
}
2015-08-10 10:39:35 -07:00
static int vmw_cmd_check_not_3d ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
void * buf , uint32_t * size )
{
uint32_t size_remaining = * size ;
uint32_t cmd_id ;
cmd_id = ( ( uint32_t * ) buf ) [ 0 ] ;
switch ( cmd_id ) {
case SVGA_CMD_UPDATE :
* size = sizeof ( uint32_t ) + sizeof ( SVGAFifoCmdUpdate ) ;
break ;
case SVGA_CMD_DEFINE_GMRFB :
* size = sizeof ( uint32_t ) + sizeof ( SVGAFifoCmdDefineGMRFB ) ;
break ;
case SVGA_CMD_BLIT_GMRFB_TO_SCREEN :
* size = sizeof ( uint32_t ) + sizeof ( SVGAFifoCmdBlitGMRFBToScreen ) ;
break ;
case SVGA_CMD_BLIT_SCREEN_TO_GMRFB :
* size = sizeof ( uint32_t ) + sizeof ( SVGAFifoCmdBlitGMRFBToScreen ) ;
break ;
default :
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Unsupported SVGA command: %u. \n " , cmd_id ) ;
2015-08-10 10:39:35 -07:00
return - EINVAL ;
}
if ( * size > size_remaining ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Invalid SVGA command (size mismatch): %u. \n " ,
cmd_id ) ;
2015-08-10 10:39:35 -07:00
return - EINVAL ;
}
if ( unlikely ( ! sw_context - > kernel ) ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Kernel only SVGA command: %u. \n " , cmd_id ) ;
2015-08-10 10:39:35 -07:00
return - EPERM ;
}
if ( cmd_id = = SVGA_CMD_DEFINE_GMRFB )
return vmw_cmd_check_define_gmrfb ( dev_priv , sw_context , buf ) ;
return 0 ;
}
static const struct vmw_cmd_entry vmw_cmd_entries [ SVGA_3D_CMD_MAX ] = {
VMW_CMD_DEF ( SVGA_3D_CMD_SURFACE_DEFINE , & vmw_cmd_invalid ,
false , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SURFACE_DESTROY , & vmw_cmd_invalid ,
false , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SURFACE_COPY , & vmw_cmd_surface_copy_check ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SURFACE_STRETCHBLT , & vmw_cmd_stretch_blt_check ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SURFACE_DMA , & vmw_cmd_dma ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_CONTEXT_DEFINE , & vmw_cmd_invalid ,
false , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_CONTEXT_DESTROY , & vmw_cmd_invalid ,
false , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETTRANSFORM , & vmw_cmd_cid_check ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETZRANGE , & vmw_cmd_cid_check ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETRENDERSTATE , & vmw_cmd_cid_check ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETRENDERTARGET ,
& vmw_cmd_set_render_target_check , true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETTEXTURESTATE , & vmw_cmd_tex_state ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETMATERIAL , & vmw_cmd_cid_check ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETLIGHTDATA , & vmw_cmd_cid_check ,
true , false , false ) ,
2012-11-21 12:22:35 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_SETLIGHTENABLED , & vmw_cmd_cid_check ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETVIEWPORT , & vmw_cmd_cid_check ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETCLIPPLANE , & vmw_cmd_cid_check ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_CLEAR , & vmw_cmd_cid_check ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_PRESENT , & vmw_cmd_present_check ,
false , false , false ) ,
2014-01-31 10:12:10 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_SHADER_DEFINE , & vmw_cmd_shader_define ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SHADER_DESTROY , & vmw_cmd_shader_destroy ,
true , false , false ) ,
2012-11-21 12:22:35 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_SET_SHADER , & vmw_cmd_set_shader ,
true , false , false ) ,
2014-01-30 11:13:43 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_SET_SHADER_CONST , & vmw_cmd_set_shader_const ,
true , false , false ) ,
2012-11-21 12:22:35 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_DRAW_PRIMITIVES , & vmw_cmd_draw ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETSCISSORRECT , & vmw_cmd_cid_check ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_BEGIN_QUERY , & vmw_cmd_begin_query ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_END_QUERY , & vmw_cmd_end_query ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_WAIT_FOR_QUERY , & vmw_cmd_wait_query ,
true , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_PRESENT_READBACK , & vmw_cmd_ok ,
true , false , false ) ,
2009-12-10 00:19:58 +00:00
VMW_CMD_DEF ( SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN ,
2012-11-21 12:22:35 +01:00
& vmw_cmd_blt_surf_screen_check , false , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SURFACE_DEFINE_V2 , & vmw_cmd_invalid ,
false , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_GENERATE_MIPMAPS , & vmw_cmd_invalid ,
false , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_ACTIVATE_SURFACE , & vmw_cmd_invalid ,
false , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DEACTIVATE_SURFACE , & vmw_cmd_invalid ,
false , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SCREEN_DMA , & vmw_cmd_invalid ,
false , false , false ) ,
2018-06-13 13:53:28 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_DEAD1 , & vmw_cmd_invalid ,
2012-11-21 12:22:35 +01:00
false , false , false ) ,
2018-06-13 13:53:28 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_DEAD2 , & vmw_cmd_invalid ,
2012-11-21 12:22:35 +01:00
false , false , false ) ,
2018-12-13 11:55:57 -08:00
VMW_CMD_DEF ( SVGA_3D_CMD_DEAD12 , & vmw_cmd_invalid , false , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DEAD13 , & vmw_cmd_invalid , false , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DEAD14 , & vmw_cmd_invalid , false , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DEAD15 , & vmw_cmd_invalid , false , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DEAD16 , & vmw_cmd_invalid , false , false , false ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DEAD17 , & vmw_cmd_invalid , false , false , false ) ,
2012-11-21 12:22:35 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_SET_OTABLE_BASE , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_READBACK_OTABLE , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DEFINE_GB_MOB , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DESTROY_GB_MOB , & vmw_cmd_invalid ,
false , false , true ) ,
2015-08-10 10:56:15 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_REDEFINE_GB_MOB64 , & vmw_cmd_invalid ,
false , false , true ) ,
2012-11-21 12:22:35 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DEFINE_GB_SURFACE , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DESTROY_GB_SURFACE , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_BIND_GB_SURFACE , & vmw_cmd_bind_gb_surface ,
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_COND_BIND_GB_SURFACE , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_UPDATE_GB_IMAGE , & vmw_cmd_update_gb_image ,
true , false , true ) ,
2012-11-21 11:45:13 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_UPDATE_GB_SURFACE ,
2012-11-21 12:22:35 +01:00
& vmw_cmd_update_gb_surface , true , false , true ) ,
2012-11-21 11:45:13 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_READBACK_GB_IMAGE ,
2012-11-21 12:22:35 +01:00
& vmw_cmd_readback_gb_image , true , false , true ) ,
2012-11-21 11:45:13 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_READBACK_GB_SURFACE ,
2012-11-21 12:22:35 +01:00
& vmw_cmd_readback_gb_surface , true , false , true ) ,
2012-11-21 11:45:13 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_INVALIDATE_GB_IMAGE ,
2012-11-21 12:22:35 +01:00
& vmw_cmd_invalidate_gb_image , true , false , true ) ,
2012-11-21 11:45:13 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_INVALIDATE_GB_SURFACE ,
2012-11-21 12:22:35 +01:00
& vmw_cmd_invalidate_gb_surface , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DEFINE_GB_CONTEXT , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DESTROY_GB_CONTEXT , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_BIND_GB_CONTEXT , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_READBACK_GB_CONTEXT , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_INVALIDATE_GB_CONTEXT , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DEFINE_GB_SHADER , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_BIND_GB_SHADER , & vmw_cmd_bind_gb_shader ,
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DESTROY_GB_SHADER , & vmw_cmd_invalid ,
false , false , true ) ,
2014-01-15 10:04:07 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_SET_OTABLE_BASE64 , & vmw_cmd_invalid ,
2013-10-08 02:25:35 -07:00
false , false , false ) ,
2012-11-21 12:22:35 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_BEGIN_GB_QUERY , & vmw_cmd_begin_gb_query ,
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_END_GB_QUERY , & vmw_cmd_end_gb_query ,
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_WAIT_FOR_GB_QUERY , & vmw_cmd_wait_gb_query ,
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_NOP , & vmw_cmd_ok ,
true , false , true ) ,
2017-08-24 08:06:30 +02:00
VMW_CMD_DEF ( SVGA_3D_CMD_NOP_ERROR , & vmw_cmd_ok ,
true , false , true ) ,
2012-11-21 12:22:35 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_ENABLE_GART , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DISABLE_GART , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_MAP_MOB_INTO_GART , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_UNMAP_GART_RANGE , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DEFINE_GB_SCREENTARGET , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DESTROY_GB_SCREENTARGET , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_BIND_GB_SCREENTARGET , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_UPDATE_GB_SCREENTARGET , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE , & vmw_cmd_cid_check ,
2015-08-10 10:39:35 -07:00
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_GB_SCREEN_DMA , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_GB_MOB_FENCE , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 , & vmw_cmd_invalid ,
false , false , true ) ,
2019-02-13 13:20:42 -08:00
/* SM commands */
2015-08-10 10:39:35 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DEFINE_CONTEXT , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DESTROY_CONTEXT , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_BIND_CONTEXT , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_READBACK_CONTEXT , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_INVALIDATE_CONTEXT , & vmw_cmd_invalid ,
false , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER ,
& vmw_cmd_dx_set_single_constant_buffer , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_SHADER_RESOURCES ,
& vmw_cmd_dx_set_shader_res , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_SHADER , & vmw_cmd_dx_set_shader ,
true , false , true ) ,
2015-08-10 10:45:11 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_SAMPLERS , & vmw_cmd_dx_cid_check ,
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DRAW , & vmw_cmd_dx_cid_check ,
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DRAW_INDEXED , & vmw_cmd_dx_cid_check ,
2015-08-10 10:39:35 -07:00
true , false , true ) ,
2015-08-10 10:45:11 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DRAW_INSTANCED , & vmw_cmd_dx_cid_check ,
2015-08-10 10:39:35 -07:00
true , false , true ) ,
2015-08-10 10:45:11 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED ,
& vmw_cmd_dx_cid_check , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DRAW_AUTO , & vmw_cmd_dx_cid_check ,
2015-08-10 10:39:35 -07:00
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS ,
& vmw_cmd_dx_set_vertex_buffers , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_INDEX_BUFFER ,
& vmw_cmd_dx_set_index_buffer , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_RENDERTARGETS ,
& vmw_cmd_dx_set_rendertargets , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_BLEND_STATE , & vmw_cmd_dx_cid_check ,
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE ,
2015-08-10 10:45:11 -07:00
& vmw_cmd_dx_cid_check , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_RASTERIZER_STATE ,
& vmw_cmd_dx_cid_check , true , false , true ) ,
2015-08-10 10:56:15 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DEFINE_QUERY , & vmw_cmd_dx_define_query ,
2015-08-10 10:39:35 -07:00
true , false , true ) ,
2016-04-12 08:19:08 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DESTROY_QUERY , & vmw_cmd_dx_cid_check ,
2015-08-10 10:39:35 -07:00
true , false , true ) ,
2015-08-10 10:56:15 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_BIND_QUERY , & vmw_cmd_dx_bind_query ,
2015-08-10 10:39:35 -07:00
true , false , true ) ,
2015-08-10 10:56:15 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_QUERY_OFFSET ,
2016-04-12 08:19:08 -07:00
& vmw_cmd_dx_cid_check , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_BEGIN_QUERY , & vmw_cmd_dx_cid_check ,
2015-08-10 10:39:35 -07:00
true , false , true ) ,
2016-04-12 08:19:08 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_END_QUERY , & vmw_cmd_dx_cid_check ,
2015-08-10 10:39:35 -07:00
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_READBACK_QUERY , & vmw_cmd_invalid ,
true , false , true ) ,
2016-04-12 08:14:23 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_PREDICATION , & vmw_cmd_dx_cid_check ,
2015-08-10 10:39:35 -07:00
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_VIEWPORTS , & vmw_cmd_dx_cid_check ,
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_SCISSORRECTS , & vmw_cmd_dx_cid_check ,
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW ,
& vmw_cmd_dx_clear_rendertarget_view , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW ,
& vmw_cmd_dx_clear_depthstencil_view , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_PRED_COPY , & vmw_cmd_invalid ,
true , false , true ) ,
2016-02-12 08:11:56 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_GENMIPS , & vmw_cmd_dx_genmips ,
2015-08-10 10:39:35 -07:00
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE ,
& vmw_cmd_dx_check_subresource , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_READBACK_SUBRESOURCE ,
& vmw_cmd_dx_check_subresource , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE ,
& vmw_cmd_dx_check_subresource , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW ,
& vmw_cmd_dx_view_define , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW ,
& vmw_cmd_dx_view_remove , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW ,
& vmw_cmd_dx_view_define , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW ,
& vmw_cmd_dx_view_remove , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW ,
& vmw_cmd_dx_view_define , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW ,
& vmw_cmd_dx_view_remove , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT ,
& vmw_cmd_dx_so_define , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT ,
& vmw_cmd_dx_cid_check , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DEFINE_BLEND_STATE ,
& vmw_cmd_dx_so_define , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DESTROY_BLEND_STATE ,
& vmw_cmd_dx_cid_check , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE ,
& vmw_cmd_dx_so_define , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE ,
& vmw_cmd_dx_cid_check , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE ,
& vmw_cmd_dx_so_define , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE ,
& vmw_cmd_dx_cid_check , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE ,
& vmw_cmd_dx_so_define , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE ,
& vmw_cmd_dx_cid_check , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DEFINE_SHADER ,
& vmw_cmd_dx_define_shader , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DESTROY_SHADER ,
& vmw_cmd_dx_destroy_shader , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_BIND_SHADER ,
& vmw_cmd_dx_bind_shader , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT ,
& vmw_cmd_dx_so_define , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT ,
2018-12-13 14:04:31 -08:00
& vmw_cmd_dx_destroy_streamoutput , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_STREAMOUTPUT ,
& vmw_cmd_dx_set_streamoutput , true , false , true ) ,
2015-08-10 10:45:11 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_SOTARGETS ,
& vmw_cmd_dx_set_so_targets , true , false , true ) ,
2015-08-10 10:39:35 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_INPUT_LAYOUT ,
& vmw_cmd_dx_cid_check , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_TOPOLOGY ,
& vmw_cmd_dx_cid_check , true , false , true ) ,
2015-08-10 10:51:07 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_BUFFER_COPY ,
& vmw_cmd_buffer_copy_check , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_PRED_COPY_REGION ,
& vmw_cmd_pred_copy_check , true , false , true ) ,
2016-10-10 10:37:03 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER ,
& vmw_cmd_dx_transfer_from_buffer ,
true , false , true ) ,
2018-06-18 17:14:56 -07:00
VMW_CMD_DEF ( SVGA_3D_CMD_INTRA_SURFACE_COPY , & vmw_cmd_intra_surface_copy ,
true , false , true ) ,
2018-12-13 13:51:08 -08:00
/*
* SM5 commands
*/
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DEFINE_UA_VIEW , & vmw_cmd_sm5_view_define ,
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DESTROY_UA_VIEW , & vmw_cmd_sm5_view_remove ,
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT , & vmw_cmd_clear_uav_uint ,
true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT ,
& vmw_cmd_clear_uav_float , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT , & vmw_cmd_invalid , true ,
false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_UA_VIEWS , & vmw_cmd_set_uav , true , false ,
true ) ,
2018-12-13 14:00:18 -08:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT ,
& vmw_cmd_indexed_instanced_indirect , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT ,
& vmw_cmd_instanced_indirect , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DISPATCH , & vmw_cmd_sm5 , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DISPATCH_INDIRECT ,
& vmw_cmd_dispatch_indirect , true , false , true ) ,
2018-12-13 13:51:08 -08:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_SET_CS_UA_VIEWS , & vmw_cmd_set_cs_uav , true ,
false , true ) ,
2018-12-13 14:00:18 -08:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2 ,
& vmw_cmd_sm5_view_define , true , false , true ) ,
2018-12-13 14:04:31 -08:00
VMW_CMD_DEF ( SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB ,
& vmw_cmd_dx_define_streamoutput , true , false , true ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DX_BIND_STREAMOUTPUT ,
& vmw_cmd_dx_bind_streamoutput , true , false , true ) ,
2009-12-10 00:19:58 +00:00
} ;
2017-08-24 08:06:29 +02:00
bool vmw_cmd_describe ( const void * buf , u32 * size , char const * * cmd )
{
u32 cmd_id = ( ( u32 * ) buf ) [ 0 ] ;
if ( cmd_id > = SVGA_CMD_MAX ) {
SVGA3dCmdHeader * header = ( SVGA3dCmdHeader * ) buf ;
const struct vmw_cmd_entry * entry ;
* size = header - > size + sizeof ( SVGA3dCmdHeader ) ;
cmd_id = header - > id ;
if ( cmd_id > = SVGA_3D_CMD_MAX )
return false ;
cmd_id - = SVGA_3D_CMD_BASE ;
entry = & vmw_cmd_entries [ cmd_id ] ;
* cmd = entry - > cmd_name ;
return true ;
}
switch ( cmd_id ) {
case SVGA_CMD_UPDATE :
* cmd = " SVGA_CMD_UPDATE " ;
* size = sizeof ( u32 ) + sizeof ( SVGAFifoCmdUpdate ) ;
break ;
case SVGA_CMD_DEFINE_GMRFB :
* cmd = " SVGA_CMD_DEFINE_GMRFB " ;
* size = sizeof ( u32 ) + sizeof ( SVGAFifoCmdDefineGMRFB ) ;
break ;
case SVGA_CMD_BLIT_GMRFB_TO_SCREEN :
* cmd = " SVGA_CMD_BLIT_GMRFB_TO_SCREEN " ;
* size = sizeof ( u32 ) + sizeof ( SVGAFifoCmdBlitGMRFBToScreen ) ;
break ;
case SVGA_CMD_BLIT_SCREEN_TO_GMRFB :
* cmd = " SVGA_CMD_BLIT_SCREEN_TO_GMRFB " ;
* size = sizeof ( u32 ) + sizeof ( SVGAFifoCmdBlitGMRFBToScreen ) ;
break ;
default :
* cmd = " UNKNOWN " ;
* size = 0 ;
return false ;
}
return true ;
}
2009-12-10 00:19:58 +00:00
static int vmw_cmd_check ( struct vmw_private * dev_priv ,
2019-02-13 13:20:42 -08:00
struct vmw_sw_context * sw_context , void * buf ,
uint32_t * size )
2009-12-10 00:19:58 +00:00
{
uint32_t cmd_id ;
2009-12-22 16:53:41 +01:00
uint32_t size_remaining = * size ;
2009-12-10 00:19:58 +00:00
SVGA3dCmdHeader * header = ( SVGA3dCmdHeader * ) buf ;
int ret ;
2012-11-21 12:22:35 +01:00
const struct vmw_cmd_entry * entry ;
bool gb = dev_priv - > capabilities & SVGA_CAP_GBOBJECTS ;
2009-12-10 00:19:58 +00:00
2015-04-02 02:39:45 -07:00
cmd_id = ( ( uint32_t * ) buf ) [ 0 ] ;
2011-10-04 20:13:19 +02:00
/* Handle any none 3D commands */
if ( unlikely ( cmd_id < SVGA_CMD_MAX ) )
return vmw_cmd_check_not_3d ( dev_priv , sw_context , buf , size ) ;
2009-12-10 00:19:58 +00:00
2015-04-02 02:39:45 -07:00
cmd_id = header - > id ;
* size = header - > size + sizeof ( SVGA3dCmdHeader ) ;
2009-12-10 00:19:58 +00:00
cmd_id - = SVGA_3D_CMD_BASE ;
2009-12-22 16:53:41 +01:00
if ( unlikely ( * size > size_remaining ) )
2012-11-21 12:22:35 +01:00
goto out_invalid ;
2009-12-22 16:53:41 +01:00
2009-12-10 00:19:58 +00:00
if ( unlikely ( cmd_id > = SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE ) )
2012-11-21 12:22:35 +01:00
goto out_invalid ;
entry = & vmw_cmd_entries [ cmd_id ] ;
2014-02-12 13:19:36 +01:00
if ( unlikely ( ! entry - > func ) )
goto out_invalid ;
2012-11-21 12:22:35 +01:00
if ( unlikely ( ! entry - > user_allow & & ! sw_context - > kernel ) )
goto out_privileged ;
if ( unlikely ( entry - > gb_disable & & gb ) )
goto out_old ;
if ( unlikely ( entry - > gb_enable & & ! gb ) )
goto out_new ;
2009-12-10 00:19:58 +00:00
2012-11-21 12:22:35 +01:00
ret = entry - > func ( dev_priv , sw_context , header ) ;
2019-02-11 12:57:38 -08:00
if ( unlikely ( ret ! = 0 ) ) {
VMW_DEBUG_USER ( " SVGA3D command: %d failed with error %d \n " ,
cmd_id + SVGA_3D_CMD_BASE , ret ) ;
return ret ;
}
2009-12-10 00:19:58 +00:00
return 0 ;
2012-11-21 12:22:35 +01:00
out_invalid :
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Invalid SVGA3D command: %d \n " ,
cmd_id + SVGA_3D_CMD_BASE ) ;
2012-11-21 12:22:35 +01:00
return - EINVAL ;
out_privileged :
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Privileged SVGA3D command: %d \n " ,
cmd_id + SVGA_3D_CMD_BASE ) ;
2012-11-21 12:22:35 +01:00
return - EPERM ;
out_old :
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Deprecated (disallowed) SVGA3D command: %d \n " ,
cmd_id + SVGA_3D_CMD_BASE ) ;
2012-11-21 12:22:35 +01:00
return - EINVAL ;
out_new :
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " SVGA3D command: %d not supported by virtual device. \n " ,
cmd_id + SVGA_3D_CMD_BASE ) ;
2009-12-10 00:19:58 +00:00
return - EINVAL ;
}
static int vmw_cmd_check_all ( struct vmw_private * dev_priv ,
2019-02-13 13:20:42 -08:00
struct vmw_sw_context * sw_context , void * buf ,
2011-08-31 07:42:54 +00:00
uint32_t size )
2009-12-10 00:19:58 +00:00
{
int32_t cur_size = size ;
int ret ;
2012-11-20 12:19:35 +00:00
sw_context - > buf_start = buf ;
2009-12-10 00:19:58 +00:00
while ( cur_size > 0 ) {
2009-12-22 16:53:41 +01:00
size = cur_size ;
2009-12-10 00:19:58 +00:00
ret = vmw_cmd_check ( dev_priv , sw_context , buf , & size ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
buf = ( void * ) ( ( unsigned long ) buf + size ) ;
cur_size - = size ;
}
if ( unlikely ( cur_size ! = 0 ) ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Command verifier out of sync. \n " ) ;
2009-12-10 00:19:58 +00:00
return - EINVAL ;
}
return 0 ;
}
static void vmw_free_relocations ( struct vmw_sw_context * sw_context )
{
2018-09-26 15:36:52 +02:00
/* Memory is validation context memory, so no need to free it */
INIT_LIST_HEAD ( & sw_context - > bo_relocations ) ;
2009-12-10 00:19:58 +00:00
}
static void vmw_apply_relocations ( struct vmw_sw_context * sw_context )
{
struct vmw_relocation * reloc ;
struct ttm_buffer_object * bo ;
2018-09-26 15:36:52 +02:00
list_for_each_entry ( reloc , & sw_context - > bo_relocations , head ) {
2018-09-26 15:28:55 +02:00
bo = & reloc - > vbo - > base ;
2021-04-12 15:11:47 +02:00
switch ( bo - > resource - > mem_type ) {
2012-11-20 12:19:35 +00:00
case TTM_PL_VRAM :
2021-04-12 15:11:47 +02:00
reloc - > location - > offset + = bo - > resource - > start < < PAGE_SHIFT ;
2010-10-26 21:21:47 +02:00
reloc - > location - > gmrId = SVGA_GMR_FRAMEBUFFER ;
2012-11-20 12:19:35 +00:00
break ;
case VMW_PL_GMR :
2021-04-12 15:11:47 +02:00
reloc - > location - > gmrId = bo - > resource - > start ;
2012-11-20 12:19:35 +00:00
break ;
2012-11-21 11:26:55 +01:00
case VMW_PL_MOB :
2021-04-12 15:11:47 +02:00
* reloc - > mob_loc = bo - > resource - > start ;
2012-11-21 11:26:55 +01:00
break ;
2012-11-20 12:19:35 +00:00
default :
BUG ( ) ;
}
2009-12-10 00:19:58 +00:00
}
vmw_free_relocations ( sw_context ) ;
}
2011-08-31 07:42:54 +00:00
static int vmw_resize_cmd_bounce ( struct vmw_sw_context * sw_context ,
uint32_t size )
{
if ( likely ( sw_context - > cmd_bounce_size > = size ) )
return 0 ;
if ( sw_context - > cmd_bounce_size = = 0 )
sw_context - > cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE ;
while ( sw_context - > cmd_bounce_size < size ) {
sw_context - > cmd_bounce_size =
PAGE_ALIGN ( sw_context - > cmd_bounce_size +
( sw_context - > cmd_bounce_size > > 1 ) ) ;
}
2016-07-22 13:31:00 +02:00
vfree ( sw_context - > cmd_bounce ) ;
2011-08-31 07:42:54 +00:00
sw_context - > cmd_bounce = vmalloc ( sw_context - > cmd_bounce_size ) ;
if ( sw_context - > cmd_bounce = = NULL ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Failed to allocate command bounce buffer. \n " ) ;
2011-08-31 07:42:54 +00:00
sw_context - > cmd_bounce_size = 0 ;
return - ENOMEM ;
}
return 0 ;
}
2021-01-15 18:12:36 +00:00
/*
2011-09-01 20:18:44 +00:00
* vmw_execbuf_fence_commands - create and submit a command stream fence
*
* Creates a fence object and submits a command stream marker .
* If this fails for some reason , We sync the fifo and return NULL .
* It is then safe to fence buffers with a NULL pointer .
2011-10-04 20:13:16 +02:00
*
2019-02-13 13:20:42 -08:00
* If @ p_handle is not NULL @ file_priv must also not be NULL . Creates a
* userspace handle if @ p_handle is not NULL , otherwise not .
2011-09-01 20:18:44 +00:00
*/
int vmw_execbuf_fence_commands ( struct drm_file * file_priv ,
struct vmw_private * dev_priv ,
struct vmw_fence_obj * * p_fence ,
uint32_t * p_handle )
{
uint32_t sequence ;
int ret ;
bool synced = false ;
2011-10-04 20:13:16 +02:00
/* p_handle implies file_priv. */
BUG_ON ( p_handle ! = NULL & & file_priv = = NULL ) ;
2011-09-01 20:18:44 +00:00
2020-11-18 12:54:19 -05:00
ret = vmw_cmd_send_fence ( dev_priv , & sequence ) ;
2011-09-01 20:18:44 +00:00
if ( unlikely ( ret ! = 0 ) ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Fence submission error. Syncing. \n " ) ;
2011-09-01 20:18:44 +00:00
synced = true ;
}
if ( p_handle ! = NULL )
ret = vmw_user_fence_create ( file_priv , dev_priv - > fman ,
2014-03-26 13:06:24 +01:00
sequence , p_fence , p_handle ) ;
2011-09-01 20:18:44 +00:00
else
2014-03-26 13:06:24 +01:00
ret = vmw_fence_create ( dev_priv - > fman , sequence , p_fence ) ;
2011-09-01 20:18:44 +00:00
if ( unlikely ( ret ! = 0 & & ! synced ) ) {
2019-02-13 13:20:42 -08:00
( void ) vmw_fallback_wait ( dev_priv , false , false , sequence ,
false , VMW_FENCE_WAIT_TIMEOUT ) ;
2011-09-01 20:18:44 +00:00
* p_fence = NULL ;
}
2019-01-31 10:55:37 +01:00
return ret ;
2011-09-01 20:18:44 +00:00
}
2011-10-10 12:23:25 +02:00
/**
2019-02-13 13:20:42 -08:00
* vmw_execbuf_copy_fence_user - copy fence object information to user - space .
2011-10-10 12:23:25 +02:00
*
* @ dev_priv : Pointer to a vmw_private struct .
* @ vmw_fp : Pointer to the struct vmw_fpriv representing the calling file .
* @ ret : Return value from fence object creation .
2019-02-13 13:20:42 -08:00
* @ user_fence_rep : User space address of a struct drm_vmw_fence_rep to which
* the information should be copied .
2011-10-10 12:23:25 +02:00
* @ fence : Pointer to the fenc object .
* @ fence_handle : User - space fence handle .
2017-07-05 01:49:32 -07:00
* @ out_fence_fd : exported file descriptor for the fence . - 1 if not used
* @ sync_file : Only used to clean up in case of an error in this function .
2011-10-10 12:23:25 +02:00
*
2019-02-13 13:20:42 -08:00
* This function copies fence information to user - space . If copying fails , the
* user - space struct drm_vmw_fence_rep : : error member is hopefully left
* untouched , and if it ' s preloaded with an - EFAULT by user - space , the error
* will hopefully be detected .
*
* Also if copying fails , user - space will be unable to signal the fence object
* so we wait for it immediately , and then unreference the user - space reference .
2011-10-10 12:23:25 +02:00
*/
2011-10-10 12:23:26 +02:00
void
2011-10-10 12:23:25 +02:00
vmw_execbuf_copy_fence_user ( struct vmw_private * dev_priv ,
2019-02-13 13:20:42 -08:00
struct vmw_fpriv * vmw_fp , int ret ,
2011-10-10 12:23:25 +02:00
struct drm_vmw_fence_rep __user * user_fence_rep ,
2019-02-13 13:20:42 -08:00
struct vmw_fence_obj * fence , uint32_t fence_handle ,
int32_t out_fence_fd , struct sync_file * sync_file )
2011-10-10 12:23:25 +02:00
{
struct drm_vmw_fence_rep fence_rep ;
if ( user_fence_rep = = NULL )
return ;
2011-10-18 09:10:12 +03:00
memset ( & fence_rep , 0 , sizeof ( fence_rep ) ) ;
2011-10-10 12:23:25 +02:00
fence_rep . error = ret ;
2017-07-05 01:49:32 -07:00
fence_rep . fd = out_fence_fd ;
2011-10-10 12:23:25 +02:00
if ( ret = = 0 ) {
BUG_ON ( fence = = NULL ) ;
fence_rep . handle = fence_handle ;
2014-03-26 14:07:44 +01:00
fence_rep . seqno = fence - > base . seqno ;
2021-05-05 15:10:07 -04:00
vmw_update_seqno ( dev_priv ) ;
2011-10-10 12:23:25 +02:00
fence_rep . passed_seqno = dev_priv - > last_read_seqno ;
}
/*
2019-02-13 13:20:42 -08:00
* copy_to_user errors will be detected by user space not seeing
* fence_rep : : error filled in . Typically user - space would have pre - set
* that member to - EFAULT .
2011-10-10 12:23:25 +02:00
*/
ret = copy_to_user ( user_fence_rep , & fence_rep ,
sizeof ( fence_rep ) ) ;
/*
2019-02-13 13:20:42 -08:00
* User - space lost the fence object . We need to sync and unreference the
* handle .
2011-10-10 12:23:25 +02:00
*/
if ( unlikely ( ret ! = 0 ) & & ( fence_rep . error = = 0 ) ) {
2017-07-05 01:49:32 -07:00
if ( sync_file )
fput ( sync_file - > file ) ;
if ( fence_rep . fd ! = - 1 ) {
put_unused_fd ( fence_rep . fd ) ;
fence_rep . fd = - 1 ;
}
2019-02-13 13:20:42 -08:00
ttm_ref_object_base_unref ( vmw_fp - > tfile , fence_handle ,
TTM_REF_USAGE ) ;
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Fence copy error. Syncing. \n " ) ;
2014-03-26 13:06:24 +01:00
( void ) vmw_fence_obj_wait ( fence , false , false ,
2011-10-10 12:23:25 +02:00
VMW_FENCE_WAIT_TIMEOUT ) ;
}
}
2015-06-25 11:57:56 -07:00
/**
2019-02-13 13:20:42 -08:00
* vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo .
2015-06-25 11:57:56 -07:00
*
* @ dev_priv : Pointer to a device private structure .
* @ kernel_commands : Pointer to the unpatched command batch .
* @ command_size : Size of the unpatched command batch .
* @ sw_context : Structure holding the relocation lists .
*
2019-02-13 13:20:42 -08:00
* Side effects : If this function returns 0 , then the command batch pointed to
* by @ kernel_commands will have been modified .
2015-06-25 11:57:56 -07:00
*/
static int vmw_execbuf_submit_fifo ( struct vmw_private * dev_priv ,
2019-02-13 13:20:42 -08:00
void * kernel_commands , u32 command_size ,
2015-06-25 11:57:56 -07:00
struct vmw_sw_context * sw_context )
{
2015-08-10 10:39:35 -07:00
void * cmd ;
2015-06-25 11:57:56 -07:00
2015-08-10 10:39:35 -07:00
if ( sw_context - > dx_ctx_node )
2020-11-18 12:54:19 -05:00
cmd = VMW_CMD_CTX_RESERVE ( dev_priv , command_size ,
2018-09-26 15:28:55 +02:00
sw_context - > dx_ctx_node - > ctx - > id ) ;
2015-08-10 10:39:35 -07:00
else
2020-11-18 12:54:19 -05:00
cmd = VMW_CMD_RESERVE ( dev_priv , command_size ) ;
2019-02-14 16:15:39 -08:00
if ( ! cmd )
2015-06-25 11:57:56 -07:00
return - ENOMEM ;
2014-06-09 12:39:22 +02:00
2015-06-25 11:57:56 -07:00
vmw_apply_relocations ( sw_context ) ;
memcpy ( cmd , kernel_commands , command_size ) ;
vmw_resource_relocations_apply ( cmd , & sw_context - > res_relocations ) ;
vmw_resource_relocations_free ( & sw_context - > res_relocations ) ;
2020-11-18 12:54:19 -05:00
vmw_cmd_commit ( dev_priv , command_size ) ;
2015-06-25 11:57:56 -07:00
return 0 ;
}
/**
2019-02-13 13:20:42 -08:00
* vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
* command buffer manager .
2015-06-25 11:57:56 -07:00
*
* @ dev_priv : Pointer to a device private structure .
* @ header : Opaque handle to the command buffer allocation .
* @ command_size : Size of the unpatched command batch .
* @ sw_context : Structure holding the relocation lists .
*
2019-02-13 13:20:42 -08:00
* Side effects : If this function returns 0 , then the command buffer represented
* by @ header will have been modified .
2015-06-25 11:57:56 -07:00
*/
static int vmw_execbuf_submit_cmdbuf ( struct vmw_private * dev_priv ,
struct vmw_cmdbuf_header * header ,
u32 command_size ,
struct vmw_sw_context * sw_context )
{
2018-09-26 15:28:55 +02:00
u32 id = ( ( sw_context - > dx_ctx_node ) ? sw_context - > dx_ctx_node - > ctx - > id :
2015-08-10 10:39:35 -07:00
SVGA3D_INVALID_ID ) ;
2019-02-13 13:20:42 -08:00
void * cmd = vmw_cmdbuf_reserve ( dev_priv - > cman , command_size , id , false ,
header ) ;
2015-06-25 11:57:56 -07:00
vmw_apply_relocations ( sw_context ) ;
vmw_resource_relocations_apply ( cmd , & sw_context - > res_relocations ) ;
vmw_resource_relocations_free ( & sw_context - > res_relocations ) ;
vmw_cmdbuf_commit ( dev_priv - > cman , command_size , header , false ) ;
return 0 ;
}
/**
* vmw_execbuf_cmdbuf - Prepare , if possible , a user - space command batch for
* submission using a command buffer .
*
* @ dev_priv : Pointer to a device private structure .
* @ user_commands : User - space pointer to the commands to be submitted .
* @ command_size : Size of the unpatched command batch .
* @ header : Out parameter returning the opaque pointer to the command buffer .
*
* This function checks whether we can use the command buffer manager for
2019-02-13 13:20:42 -08:00
* submission and if so , creates a command buffer of suitable size and copies
* the user data into that buffer .
2015-06-25 11:57:56 -07:00
*
* On successful return , the function returns a pointer to the data in the
* command buffer and * @ header is set to non - NULL .
2019-02-13 13:20:42 -08:00
*
2021-01-15 18:12:36 +00:00
* @ kernel_commands : If command buffers could not be used , the function will
* return the value of @ kernel_commands on function call . That value may be
* NULL . In that case , the value of * @ header will be set to NULL .
2019-02-13 13:20:42 -08:00
*
2015-06-25 11:57:56 -07:00
* If an error is encountered , the function will return a pointer error value .
* If the function is interrupted by a signal while sleeping , it will return
* - ERESTARTSYS casted to a pointer error value .
*/
2015-04-02 02:39:45 -07:00
static void * vmw_execbuf_cmdbuf ( struct vmw_private * dev_priv ,
void __user * user_commands ,
2019-02-13 13:20:42 -08:00
void * kernel_commands , u32 command_size ,
2015-04-02 02:39:45 -07:00
struct vmw_cmdbuf_header * * header )
2015-06-25 11:57:56 -07:00
{
size_t cmdbuf_size ;
int ret ;
* header = NULL ;
if ( command_size > SVGA_CB_MAX_SIZE ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Command buffer is too large. \n " ) ;
2015-06-25 11:57:56 -07:00
return ERR_PTR ( - EINVAL ) ;
}
2016-10-10 10:51:24 -07:00
if ( ! dev_priv - > cman | | kernel_commands )
return kernel_commands ;
2015-06-25 11:57:56 -07:00
/* If possible, add a little space for fencing. */
cmdbuf_size = command_size + 512 ;
cmdbuf_size = min_t ( size_t , cmdbuf_size , SVGA_CB_MAX_SIZE ) ;
2019-02-13 13:20:42 -08:00
kernel_commands = vmw_cmdbuf_alloc ( dev_priv - > cman , cmdbuf_size , true ,
header ) ;
2015-06-25 11:57:56 -07:00
if ( IS_ERR ( kernel_commands ) )
return kernel_commands ;
2019-02-13 13:20:42 -08:00
ret = copy_from_user ( kernel_commands , user_commands , command_size ) ;
2015-06-25 11:57:56 -07:00
if ( ret ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Failed copying commands. \n " ) ;
2015-06-25 11:57:56 -07:00
vmw_cmdbuf_header_free ( * header ) ;
* header = NULL ;
return ERR_PTR ( - EFAULT ) ;
}
return kernel_commands ;
}
2014-06-09 12:39:22 +02:00
2015-08-10 10:39:35 -07:00
static int vmw_execbuf_tie_context ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
uint32_t handle )
{
struct vmw_resource * res ;
int ret ;
2018-09-26 16:32:40 +02:00
unsigned int size ;
2015-08-10 10:39:35 -07:00
if ( handle = = SVGA3D_INVALID_ID )
return 0 ;
2018-09-26 16:32:40 +02:00
size = vmw_execbuf_res_size ( dev_priv , vmw_res_dx_context ) ;
ret = vmw_validation_preload_res ( sw_context - > ctx , size ) ;
if ( ret )
return ret ;
res = vmw_user_resource_noref_lookup_handle
( dev_priv , sw_context - > fp - > tfile , handle ,
user_context_converter ) ;
2019-03-01 10:14:06 -08:00
if ( IS_ERR ( res ) ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Could not find or user DX context 0x%08x. \n " ,
( unsigned int ) handle ) ;
2018-09-26 16:32:40 +02:00
return PTR_ERR ( res ) ;
2015-08-10 10:39:35 -07:00
}
2019-02-20 08:21:26 +01:00
ret = vmw_execbuf_res_noref_val_add ( sw_context , res , VMW_RES_DIRTY_SET ) ;
2015-08-10 10:39:35 -07:00
if ( unlikely ( ret ! = 0 ) )
2018-09-26 16:32:40 +02:00
return ret ;
2015-08-10 10:39:35 -07:00
2018-09-26 15:28:55 +02:00
sw_context - > dx_ctx_node = vmw_execbuf_info_from_res ( sw_context , res ) ;
2015-08-10 10:39:35 -07:00
sw_context - > man = vmw_context_res_man ( res ) ;
2018-09-26 16:32:40 +02:00
return 0 ;
2015-08-10 10:39:35 -07:00
}
2011-10-04 20:13:17 +02:00
int vmw_execbuf_process ( struct drm_file * file_priv ,
struct vmw_private * dev_priv ,
2019-02-13 13:20:42 -08:00
void __user * user_commands , void * kernel_commands ,
uint32_t command_size , uint64_t throttle_us ,
2015-08-10 10:39:35 -07:00
uint32_t dx_context_handle ,
2012-02-09 16:56:43 +01:00
struct drm_vmw_fence_rep __user * user_fence_rep ,
2019-02-13 13:20:42 -08:00
struct vmw_fence_obj * * out_fence , uint32_t flags )
2009-12-10 00:19:58 +00:00
{
struct vmw_sw_context * sw_context = & dev_priv - > ctx ;
2012-02-09 16:56:43 +01:00
struct vmw_fence_obj * fence = NULL ;
2015-06-25 11:57:56 -07:00
struct vmw_cmdbuf_header * header ;
2019-03-11 20:24:46 -07:00
uint32_t handle = 0 ;
2011-10-04 20:13:17 +02:00
int ret ;
2017-07-05 01:49:32 -07:00
int32_t out_fence_fd = - 1 ;
struct sync_file * sync_file = NULL ;
2018-09-26 15:28:55 +02:00
DECLARE_VAL_CONTEXT ( val_ctx , & sw_context - > res_ht , 1 ) ;
2017-07-05 01:49:32 -07:00
2018-12-12 11:52:08 +01:00
vmw_validation_set_val_mem ( & val_ctx , & dev_priv - > vvm ) ;
2017-07-05 01:49:32 -07:00
if ( flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD ) {
out_fence_fd = get_unused_fd_flags ( O_CLOEXEC ) ;
if ( out_fence_fd < 0 ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Failed to get a fence fd. \n " ) ;
2017-07-05 01:49:32 -07:00
return out_fence_fd ;
}
}
2009-12-10 00:19:58 +00:00
2015-08-10 10:45:11 -07:00
if ( throttle_us ) {
2020-11-10 22:14:46 -05:00
VMW_DEBUG_USER ( " Throttling is no longer supported. \n " ) ;
2015-06-25 11:57:56 -07:00
}
2015-08-10 10:45:11 -07:00
2015-06-25 11:57:56 -07:00
kernel_commands = vmw_execbuf_cmdbuf ( dev_priv , user_commands ,
kernel_commands , command_size ,
& header ) ;
2017-07-05 01:49:32 -07:00
if ( IS_ERR ( kernel_commands ) ) {
ret = PTR_ERR ( kernel_commands ) ;
goto out_free_fence_fd ;
}
2015-06-25 11:57:56 -07:00
2011-10-04 20:13:17 +02:00
ret = mutex_lock_interruptible ( & dev_priv - > cmdbuf_mutex ) ;
2015-06-25 11:57:56 -07:00
if ( ret ) {
ret = - ERESTARTSYS ;
goto out_free_header ;
}
2009-12-10 00:19:58 +00:00
2015-06-25 11:57:56 -07:00
sw_context - > kernel = false ;
2011-10-04 20:13:17 +02:00
if ( kernel_commands = = NULL ) {
ret = vmw_resize_cmd_bounce ( sw_context , command_size ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_unlock ;
2009-12-10 00:19:58 +00:00
2019-02-13 13:20:42 -08:00
ret = copy_from_user ( sw_context - > cmd_bounce , user_commands ,
command_size ) ;
2011-10-04 20:13:17 +02:00
if ( unlikely ( ret ! = 0 ) ) {
ret = - EFAULT ;
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Failed copying commands. \n " ) ;
2011-10-04 20:13:17 +02:00
goto out_unlock ;
}
2019-02-13 13:20:42 -08:00
2011-10-04 20:13:17 +02:00
kernel_commands = sw_context - > cmd_bounce ;
2019-02-13 13:20:42 -08:00
} else if ( ! header ) {
2011-10-04 20:13:17 +02:00
sw_context - > kernel = true ;
2019-02-13 13:20:42 -08:00
}
2009-12-10 00:19:58 +00:00
2014-01-31 10:12:10 +01:00
sw_context - > fp = vmw_fpriv ( file_priv ) ;
2018-09-26 15:28:55 +02:00
INIT_LIST_HEAD ( & sw_context - > ctx_list ) ;
2011-10-04 20:13:30 +02:00
sw_context - > cur_query_bo = dev_priv - > pinned_bo ;
2012-11-20 12:19:35 +00:00
sw_context - > last_query_ctx = NULL ;
sw_context - > needs_post_query_barrier = false ;
2015-08-10 10:39:35 -07:00
sw_context - > dx_ctx_node = NULL ;
2015-08-10 10:56:15 -07:00
sw_context - > dx_query_mob = NULL ;
sw_context - > dx_query_ctx = NULL ;
2012-11-20 12:19:35 +00:00
memset ( sw_context - > res_cache , 0 , sizeof ( sw_context - > res_cache ) ) ;
INIT_LIST_HEAD ( & sw_context - > res_relocations ) ;
2018-09-26 15:36:52 +02:00
INIT_LIST_HEAD ( & sw_context - > bo_relocations ) ;
2019-02-13 13:20:42 -08:00
2015-08-10 10:39:35 -07:00
if ( sw_context - > staged_bindings )
vmw_binding_state_reset ( sw_context - > staged_bindings ) ;
2012-11-20 12:19:35 +00:00
if ( ! sw_context - > res_ht_initialized ) {
ret = drm_ht_create ( & sw_context - > res_ht , VMW_RES_HT_ORDER ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_unlock ;
2019-02-13 13:20:42 -08:00
2012-11-20 12:19:35 +00:00
sw_context - > res_ht_initialized = true ;
}
2019-02-13 13:20:42 -08:00
2014-06-09 12:39:22 +02:00
INIT_LIST_HEAD ( & sw_context - > staged_cmd_res ) ;
2018-09-26 15:28:55 +02:00
sw_context - > ctx = & val_ctx ;
2015-08-10 10:39:35 -07:00
ret = vmw_execbuf_tie_context ( dev_priv , sw_context , dx_context_handle ) ;
2018-09-26 15:28:55 +02:00
if ( unlikely ( ret ! = 0 ) )
2015-08-10 10:39:35 -07:00
goto out_err_nores ;
2011-10-04 20:13:17 +02:00
ret = vmw_cmd_check_all ( dev_priv , sw_context , kernel_commands ,
command_size ) ;
2015-08-10 10:45:11 -07:00
if ( unlikely ( ret ! = 0 ) )
goto out_err_nores ;
2012-11-20 12:19:35 +00:00
ret = vmw_resources_reserve ( sw_context ) ;
if ( unlikely ( ret ! = 0 ) )
2014-01-30 10:58:19 +01:00
goto out_err_nores ;
2012-11-20 12:19:35 +00:00
2018-09-26 15:28:55 +02:00
ret = vmw_validation_bo_reserve ( & val_ctx , true ) ;
2009-12-10 00:19:58 +00:00
if ( unlikely ( ret ! = 0 ) )
2015-08-10 10:39:35 -07:00
goto out_err_nores ;
2009-12-10 00:19:58 +00:00
2018-09-26 15:28:55 +02:00
ret = vmw_validation_bo_validate ( & val_ctx , true ) ;
2009-12-10 00:19:58 +00:00
if ( unlikely ( ret ! = 0 ) )
goto out_err ;
2018-09-26 15:28:55 +02:00
ret = vmw_validation_res_validate ( & val_ctx , true ) ;
2012-11-20 12:19:35 +00:00
if ( unlikely ( ret ! = 0 ) )
goto out_err ;
2019-02-13 13:20:42 -08:00
2018-09-26 15:28:55 +02:00
vmw_validation_drop_ht ( & val_ctx ) ;
2010-05-28 11:21:57 +02:00
2013-10-08 02:32:36 -07:00
ret = mutex_lock_interruptible ( & dev_priv - > binding_mutex ) ;
if ( unlikely ( ret ! = 0 ) ) {
ret = - ERESTARTSYS ;
goto out_err ;
}
2014-02-05 08:13:56 +01:00
if ( dev_priv - > has_mob ) {
ret = vmw_rebind_contexts ( sw_context ) ;
if ( unlikely ( ret ! = 0 ) )
2014-02-11 19:03:47 +03:00
goto out_unlock_binding ;
2014-02-05 08:13:56 +01:00
}
2015-06-25 11:57:56 -07:00
if ( ! header ) {
ret = vmw_execbuf_submit_fifo ( dev_priv , kernel_commands ,
command_size , sw_context ) ;
} else {
ret = vmw_execbuf_submit_cmdbuf ( dev_priv , header , command_size ,
sw_context ) ;
header = NULL ;
2010-05-28 11:21:57 +02:00
}
2015-08-10 10:39:35 -07:00
mutex_unlock ( & dev_priv - > binding_mutex ) ;
2015-06-25 11:57:56 -07:00
if ( ret )
2015-08-10 10:39:35 -07:00
goto out_err ;
2009-12-10 00:19:58 +00:00
2011-10-04 20:13:30 +02:00
vmw_query_bo_switch_commit ( dev_priv , sw_context ) ;
2019-02-13 13:20:42 -08:00
ret = vmw_execbuf_fence_commands ( file_priv , dev_priv , & fence ,
2011-09-01 20:18:44 +00:00
( user_fence_rep ) ? & handle : NULL ) ;
2009-12-10 00:19:58 +00:00
/*
* This error is harmless , because if fence submission fails ,
2011-09-01 20:18:44 +00:00
* vmw_fifo_send_fence will sync . The error will be propagated to
* user - space in @ fence_rep
2009-12-10 00:19:58 +00:00
*/
if ( ret ! = 0 )
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Fence submission error. Syncing. \n " ) ;
2009-12-10 00:19:58 +00:00
2018-09-26 15:28:55 +02:00
vmw_execbuf_bindings_commit ( sw_context , false ) ;
vmw_bind_dx_query_mob ( sw_context ) ;
vmw_validation_res_unreserve ( & val_ctx , false ) ;
2013-10-08 02:32:36 -07:00
2018-09-26 15:28:55 +02:00
vmw_validation_bo_fence ( sw_context - > ctx , fence ) ;
2009-12-10 00:19:58 +00:00
2019-02-13 13:20:42 -08:00
if ( unlikely ( dev_priv - > pinned_bo ! = NULL & & ! dev_priv - > query_cid_valid ) )
2012-11-20 12:19:35 +00:00
__vmw_execbuf_release_pinned_bo ( dev_priv , fence ) ;
2017-07-05 01:49:32 -07:00
/*
2019-02-13 13:20:42 -08:00
* If anything fails here , give up trying to export the fence and do a
* sync since the user mode will not be able to sync the fence itself .
* This ensures we are still functionally correct .
2017-07-05 01:49:32 -07:00
*/
if ( flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD ) {
sync_file = sync_file_create ( & fence - > base ) ;
if ( ! sync_file ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Sync file create failed for fence \n " ) ;
2017-07-05 01:49:32 -07:00
put_unused_fd ( out_fence_fd ) ;
out_fence_fd = - 1 ;
( void ) vmw_fence_obj_wait ( fence , false , false ,
VMW_FENCE_WAIT_TIMEOUT ) ;
} else {
/* Link the fence with the FD created earlier */
fd_install ( out_fence_fd , sync_file - > file ) ;
}
}
2011-10-10 12:23:25 +02:00
vmw_execbuf_copy_fence_user ( dev_priv , vmw_fpriv ( file_priv ) , ret ,
2019-02-13 13:20:42 -08:00
user_fence_rep , fence , handle , out_fence_fd ,
sync_file ) ;
2009-12-10 00:19:58 +00:00
2012-02-09 16:56:43 +01:00
/* Don't unreference when handing fence out */
if ( unlikely ( out_fence ! = NULL ) ) {
* out_fence = fence ;
fence = NULL ;
} else if ( likely ( fence ! = NULL ) ) {
2011-09-01 20:18:44 +00:00
vmw_fence_obj_unreference ( & fence ) ;
2012-02-09 16:56:43 +01:00
}
2009-12-10 00:19:58 +00:00
2014-06-09 12:39:22 +02:00
vmw_cmdbuf_res_commit ( & sw_context - > staged_cmd_res ) ;
2011-10-04 20:13:17 +02:00
mutex_unlock ( & dev_priv - > cmdbuf_mutex ) ;
2012-11-20 12:19:35 +00:00
/*
2019-02-13 13:20:42 -08:00
* Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
* in resource destruction paths .
2012-11-20 12:19:35 +00:00
*/
2018-09-26 15:28:55 +02:00
vmw_validation_unref_lists ( & val_ctx ) ;
2012-11-20 12:19:35 +00:00
2009-12-10 00:19:58 +00:00
return 0 ;
2011-10-04 20:13:17 +02:00
2013-10-08 02:32:36 -07:00
out_unlock_binding :
mutex_unlock ( & dev_priv - > binding_mutex ) ;
2009-12-10 00:19:58 +00:00
out_err :
2018-09-26 15:28:55 +02:00
vmw_validation_bo_backoff ( & val_ctx ) ;
2014-01-30 10:58:19 +01:00
out_err_nores :
2018-09-26 15:28:55 +02:00
vmw_execbuf_bindings_commit ( sw_context , true ) ;
vmw_validation_res_unreserve ( & val_ctx , true ) ;
2014-01-30 10:58:19 +01:00
vmw_resource_relocations_free ( & sw_context - > res_relocations ) ;
vmw_free_relocations ( sw_context ) ;
2019-02-13 13:20:42 -08:00
if ( unlikely ( dev_priv - > pinned_bo ! = NULL & & ! dev_priv - > query_cid_valid ) )
2012-11-20 12:19:35 +00:00
__vmw_execbuf_release_pinned_bo ( dev_priv , NULL ) ;
2009-12-10 00:19:58 +00:00
out_unlock :
2014-06-09 12:39:22 +02:00
vmw_cmdbuf_res_revert ( & sw_context - > staged_cmd_res ) ;
2018-09-26 15:28:55 +02:00
vmw_validation_drop_ht ( & val_ctx ) ;
WARN_ON ( ! list_empty ( & sw_context - > ctx_list ) ) ;
2009-12-10 00:19:58 +00:00
mutex_unlock ( & dev_priv - > cmdbuf_mutex ) ;
2012-11-20 12:19:35 +00:00
/*
2019-02-13 13:20:42 -08:00
* Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
* in resource destruction paths .
2012-11-20 12:19:35 +00:00
*/
2018-09-26 15:28:55 +02:00
vmw_validation_unref_lists ( & val_ctx ) ;
2015-06-25 11:57:56 -07:00
out_free_header :
if ( header )
vmw_cmdbuf_header_free ( header ) ;
2017-07-05 01:49:32 -07:00
out_free_fence_fd :
if ( out_fence_fd > = 0 )
put_unused_fd ( out_fence_fd ) ;
2012-11-20 12:19:35 +00:00
2011-10-04 20:13:17 +02:00
return ret ;
}
2011-10-04 20:13:30 +02:00
/**
* vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer .
*
* @ dev_priv : The device private structure .
*
2019-02-13 13:20:42 -08:00
* This function is called to idle the fifo and unpin the query buffer if the
* normal way to do this hits an error , which should typically be extremely
* rare .
2011-10-04 20:13:30 +02:00
*/
static void vmw_execbuf_unpin_panic ( struct vmw_private * dev_priv )
{
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Can't unpin query buffer. Trying to recover. \n " ) ;
2011-10-04 20:13:30 +02:00
( void ) vmw_fallback_wait ( dev_priv , false , true , 0 , false , 10 * HZ ) ;
2015-06-26 00:25:37 -07:00
vmw_bo_pin_reserved ( dev_priv - > pinned_bo , false ) ;
if ( dev_priv - > dummy_query_bo_pinned ) {
vmw_bo_pin_reserved ( dev_priv - > dummy_query_bo , false ) ;
dev_priv - > dummy_query_bo_pinned = false ;
}
2011-10-04 20:13:30 +02:00
}
/**
2019-02-13 13:20:42 -08:00
* __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
* bo .
2011-10-04 20:13:30 +02:00
*
* @ dev_priv : The device private structure .
2019-02-13 13:20:42 -08:00
* @ fence : If non - NULL should point to a struct vmw_fence_obj issued _after_ a
* query barrier that flushes all queries touching the current buffer pointed to
* by @ dev_priv - > pinned_bo
2011-10-04 20:13:30 +02:00
*
2019-02-13 13:20:42 -08:00
* This function should be used to unpin the pinned query bo , or as a query
* barrier when we need to make sure that all queries have finished before the
* next fifo command . ( For example on hardware context destructions where the
* hardware may otherwise leak unfinished queries ) .
2011-10-04 20:13:30 +02:00
*
2019-02-13 13:20:42 -08:00
* This function does not return any failure codes , but make attempts to do safe
* unpinning in case of errors .
2011-10-04 20:13:30 +02:00
*
2019-02-13 13:20:42 -08:00
* The function will synchronize on the previous query barrier , and will thus
* not finish until that barrier has executed .
2012-11-20 12:19:35 +00:00
*
2019-02-13 13:20:42 -08:00
* the @ dev_priv - > cmdbuf_mutex needs to be held by the current thread before
* calling this function .
2011-10-04 20:13:30 +02:00
*/
2012-11-20 12:19:35 +00:00
void __vmw_execbuf_release_pinned_bo ( struct vmw_private * dev_priv ,
struct vmw_fence_obj * fence )
2011-10-04 20:13:30 +02:00
{
int ret = 0 ;
2012-11-20 12:19:35 +00:00
struct vmw_fence_obj * lfence = NULL ;
2018-09-26 15:28:55 +02:00
DECLARE_VAL_CONTEXT ( val_ctx , NULL , 0 ) ;
2011-10-04 20:13:30 +02:00
if ( dev_priv - > pinned_bo = = NULL )
goto out_unlock ;
2018-09-26 15:28:55 +02:00
ret = vmw_validation_add_bo ( & val_ctx , dev_priv - > pinned_bo , false ,
false ) ;
if ( ret )
goto out_no_reserve ;
2011-10-04 20:13:30 +02:00
2018-09-26 15:28:55 +02:00
ret = vmw_validation_add_bo ( & val_ctx , dev_priv - > dummy_query_bo , false ,
false ) ;
if ( ret )
goto out_no_reserve ;
2011-10-04 20:13:30 +02:00
2018-09-26 15:28:55 +02:00
ret = vmw_validation_bo_reserve ( & val_ctx , false ) ;
if ( ret )
2011-10-04 20:13:30 +02:00
goto out_no_reserve ;
2012-11-20 12:19:35 +00:00
if ( dev_priv - > query_cid_valid ) {
BUG_ON ( fence ! = NULL ) ;
2020-11-18 12:54:19 -05:00
ret = vmw_cmd_emit_dummy_query ( dev_priv , dev_priv - > query_cid ) ;
2018-09-26 15:28:55 +02:00
if ( ret )
2012-11-20 12:19:35 +00:00
goto out_no_emit ;
dev_priv - > query_cid_valid = false ;
2011-10-04 20:13:30 +02:00
}
2015-06-26 00:25:37 -07:00
vmw_bo_pin_reserved ( dev_priv - > pinned_bo , false ) ;
if ( dev_priv - > dummy_query_bo_pinned ) {
vmw_bo_pin_reserved ( dev_priv - > dummy_query_bo , false ) ;
dev_priv - > dummy_query_bo_pinned = false ;
}
2012-11-20 12:19:35 +00:00
if ( fence = = NULL ) {
( void ) vmw_execbuf_fence_commands ( NULL , dev_priv , & lfence ,
NULL ) ;
fence = lfence ;
}
2018-09-26 15:28:55 +02:00
vmw_validation_bo_fence ( & val_ctx , fence ) ;
2012-11-20 12:19:35 +00:00
if ( lfence ! = NULL )
vmw_fence_obj_unreference ( & lfence ) ;
2011-10-04 20:13:30 +02:00
2018-09-26 15:28:55 +02:00
vmw_validation_unref_lists ( & val_ctx ) ;
2018-06-19 15:02:16 +02:00
vmw_bo_unreference ( & dev_priv - > pinned_bo ) ;
2019-02-13 13:20:42 -08:00
2011-10-04 20:13:30 +02:00
out_unlock :
return ;
out_no_emit :
2018-09-26 15:28:55 +02:00
vmw_validation_bo_backoff ( & val_ctx ) ;
2011-10-04 20:13:30 +02:00
out_no_reserve :
2018-09-26 15:28:55 +02:00
vmw_validation_unref_lists ( & val_ctx ) ;
vmw_execbuf_unpin_panic ( dev_priv ) ;
2018-06-19 15:02:16 +02:00
vmw_bo_unreference ( & dev_priv - > pinned_bo ) ;
2012-11-20 12:19:35 +00:00
}
/**
2019-02-13 13:20:42 -08:00
* vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo .
2012-11-20 12:19:35 +00:00
*
* @ dev_priv : The device private structure .
*
2019-02-13 13:20:42 -08:00
* This function should be used to unpin the pinned query bo , or as a query
* barrier when we need to make sure that all queries have finished before the
* next fifo command . ( For example on hardware context destructions where the
* hardware may otherwise leak unfinished queries ) .
2012-11-20 12:19:35 +00:00
*
2019-02-13 13:20:42 -08:00
* This function does not return any failure codes , but make attempts to do safe
* unpinning in case of errors .
2012-11-20 12:19:35 +00:00
*
2019-02-13 13:20:42 -08:00
* The function will synchronize on the previous query barrier , and will thus
* not finish until that barrier has executed .
2012-11-20 12:19:35 +00:00
*/
void vmw_execbuf_release_pinned_bo ( struct vmw_private * dev_priv )
{
mutex_lock ( & dev_priv - > cmdbuf_mutex ) ;
if ( dev_priv - > query_cid_valid )
__vmw_execbuf_release_pinned_bo ( dev_priv , NULL ) ;
2011-10-04 20:13:30 +02:00
mutex_unlock ( & dev_priv - > cmdbuf_mutex ) ;
}
2019-05-22 17:41:17 +01:00
int vmw_execbuf_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
2011-10-04 20:13:17 +02:00
{
struct vmw_private * dev_priv = vmw_priv ( dev ) ;
2019-05-22 17:41:17 +01:00
struct drm_vmw_execbuf_arg * arg = data ;
2011-10-04 20:13:17 +02:00
int ret ;
2017-07-05 01:45:40 -07:00
struct dma_fence * in_fence = NULL ;
2015-08-10 10:39:35 -07:00
2021-06-09 13:23:00 -04:00
MKS_STAT_TIME_DECL ( MKSSTAT_KERN_EXECBUF ) ;
MKS_STAT_TIME_PUSH ( MKSSTAT_KERN_EXECBUF ) ;
2011-10-04 20:13:17 +02:00
/*
2019-02-13 13:20:42 -08:00
* Extend the ioctl argument while maintaining backwards compatibility :
2019-05-22 17:41:17 +01:00
* We take different code paths depending on the value of arg - > version .
*
* Note : The ioctl argument is extended and zeropadded by core DRM .
2011-10-04 20:13:17 +02:00
*/
2019-05-22 17:41:17 +01:00
if ( unlikely ( arg - > version > DRM_VMW_EXECBUF_VERSION | |
arg - > version = = 0 ) ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Incorrect execbuf version. \n " ) ;
2021-06-09 13:23:00 -04:00
ret = - EINVAL ;
goto mksstats_out ;
2011-10-04 20:13:17 +02:00
}
2019-05-22 17:41:17 +01:00
switch ( arg - > version ) {
2015-08-10 10:39:35 -07:00
case 1 :
2019-05-22 17:41:17 +01:00
/* For v1 core DRM have extended + zeropadded the data */
arg - > context_handle = ( uint32_t ) - 1 ;
2015-08-10 10:39:35 -07:00
break ;
case 2 :
default :
2019-05-22 17:41:17 +01:00
/* For v2 and later core DRM would have correctly copied it */
2015-08-10 10:39:35 -07:00
break ;
}
2017-07-05 01:45:40 -07:00
/* If imported a fence FD from elsewhere, then wait on it */
2019-05-22 17:41:17 +01:00
if ( arg - > flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD ) {
in_fence = sync_file_get_fence ( arg - > imported_fence_fd ) ;
2017-07-05 01:45:40 -07:00
if ( ! in_fence ) {
2019-02-11 11:46:27 -08:00
VMW_DEBUG_USER ( " Cannot get imported fence \n " ) ;
2021-06-09 13:23:00 -04:00
ret = - EINVAL ;
goto mksstats_out ;
2017-07-05 01:45:40 -07:00
}
ret = vmw_wait_dma_fence ( dev_priv - > fman , in_fence ) ;
if ( ret )
goto out ;
}
2011-10-04 20:13:17 +02:00
ret = vmw_execbuf_process ( file_priv , dev_priv ,
2019-05-22 17:41:17 +01:00
( void __user * ) ( unsigned long ) arg - > commands ,
NULL , arg - > command_size , arg - > throttle_us ,
arg - > context_handle ,
( void __user * ) ( unsigned long ) arg - > fence_rep ,
NULL , arg - > flags ) ;
2019-02-13 13:20:42 -08:00
2011-10-04 20:13:17 +02:00
if ( unlikely ( ret ! = 0 ) )
2017-07-05 01:45:40 -07:00
goto out ;
2011-10-04 20:13:17 +02:00
vmw_kms_cursor_post_execbuf ( dev_priv ) ;
2017-07-05 01:45:40 -07:00
out :
if ( in_fence )
dma_fence_put ( in_fence ) ;
2021-06-09 13:23:00 -04:00
mksstats_out :
MKS_STAT_TIME_POP ( MKSSTAT_KERN_EXECBUF ) ;
2017-07-05 01:45:40 -07:00
return ret ;
2009-12-10 00:19:58 +00:00
}