2009-12-10 00:19:58 +00:00
/**************************************************************************
*
* Copyright © 2009 VMware , Inc . , Palo Alto , CA . , USA
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# include "vmwgfx_drv.h"
# include "vmwgfx_reg.h"
2012-10-02 18:01:07 +01:00
# include <drm/ttm/ttm_bo_api.h>
# include <drm/ttm/ttm_placement.h>
2009-12-10 00:19:58 +00:00
2012-11-20 12:19:35 +00:00
# define VMW_RES_HT_ORDER 12
/**
* struct vmw_resource_relocation - Relocation info for resources
*
* @ head : List head for the software context ' s relocation list .
* @ res : Non - ref - counted pointer to the resource .
* @ offset : Offset of 4 byte entries into the command buffer where the
* id that needs fixup is located .
*/
struct vmw_resource_relocation {
struct list_head head ;
const struct vmw_resource * res ;
unsigned long offset ;
} ;
/**
* struct vmw_resource_val_node - Validation info for resources
*
* @ head : List head for the software context ' s resource list .
* @ hash : Hash entry for quick resouce to val_node lookup .
* @ res : Ref - counted pointer to the resource .
* @ switch_backup : Boolean whether to switch backup buffer on unreserve .
* @ new_backup : Refcounted pointer to the new backup buffer .
* @ new_backup_offset : New backup buffer offset if @ new_backup is non - NUll .
* @ first_usage : Set to true the first time the resource is referenced in
* the command stream .
* @ no_buffer_needed : Resources do not need to allocate buffer backup on
* reservation . The command stream will provide one .
*/
struct vmw_resource_val_node {
struct list_head head ;
struct drm_hash_item hash ;
struct vmw_resource * res ;
struct vmw_dma_buffer * new_backup ;
unsigned long new_backup_offset ;
bool first_usage ;
bool no_buffer_needed ;
} ;
/**
* vmw_resource_unreserve - unreserve resources previously reserved for
* command submission .
*
* @ list_head : list of resources to unreserve .
* @ backoff : Whether command submission failed .
*/
static void vmw_resource_list_unreserve ( struct list_head * list ,
bool backoff )
{
struct vmw_resource_val_node * val ;
list_for_each_entry ( val , list , head ) {
struct vmw_resource * res = val - > res ;
struct vmw_dma_buffer * new_backup =
backoff ? NULL : val - > new_backup ;
vmw_resource_unreserve ( res , new_backup ,
val - > new_backup_offset ) ;
vmw_dmabuf_unreference ( & val - > new_backup ) ;
}
}
/**
* vmw_resource_val_add - Add a resource to the software context ' s
* resource list if it ' s not already on it .
*
* @ sw_context : Pointer to the software context .
* @ res : Pointer to the resource .
* @ p_node On successful return points to a valid pointer to a
* struct vmw_resource_val_node , if non - NULL on entry .
*/
static int vmw_resource_val_add ( struct vmw_sw_context * sw_context ,
struct vmw_resource * res ,
struct vmw_resource_val_node * * p_node )
{
struct vmw_resource_val_node * node ;
struct drm_hash_item * hash ;
int ret ;
if ( likely ( drm_ht_find_item ( & sw_context - > res_ht , ( unsigned long ) res ,
& hash ) = = 0 ) ) {
node = container_of ( hash , struct vmw_resource_val_node , hash ) ;
node - > first_usage = false ;
if ( unlikely ( p_node ! = NULL ) )
* p_node = node ;
return 0 ;
}
node = kzalloc ( sizeof ( * node ) , GFP_KERNEL ) ;
if ( unlikely ( node = = NULL ) ) {
DRM_ERROR ( " Failed to allocate a resource validation "
" entry. \n " ) ;
return - ENOMEM ;
}
node - > hash . key = ( unsigned long ) res ;
ret = drm_ht_insert_item ( & sw_context - > res_ht , & node - > hash ) ;
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Failed to initialize a resource validation "
" entry. \n " ) ;
kfree ( node ) ;
return ret ;
}
list_add_tail ( & node - > head , & sw_context - > resource_list ) ;
node - > res = vmw_resource_reference ( res ) ;
node - > first_usage = true ;
if ( unlikely ( p_node ! = NULL ) )
* p_node = node ;
return 0 ;
}
/**
* vmw_resource_relocation_add - Add a relocation to the relocation list
*
* @ list : Pointer to head of relocation list .
* @ res : The resource .
* @ offset : Offset into the command buffer currently being parsed where the
* id that needs fixup is located . Granularity is 4 bytes .
*/
static int vmw_resource_relocation_add ( struct list_head * list ,
const struct vmw_resource * res ,
unsigned long offset )
{
struct vmw_resource_relocation * rel ;
rel = kmalloc ( sizeof ( * rel ) , GFP_KERNEL ) ;
if ( unlikely ( rel = = NULL ) ) {
DRM_ERROR ( " Failed to allocate a resource relocation. \n " ) ;
return - ENOMEM ;
}
rel - > res = res ;
rel - > offset = offset ;
list_add_tail ( & rel - > head , list ) ;
return 0 ;
}
/**
* vmw_resource_relocations_free - Free all relocations on a list
*
* @ list : Pointer to the head of the relocation list .
*/
static void vmw_resource_relocations_free ( struct list_head * list )
{
struct vmw_resource_relocation * rel , * n ;
list_for_each_entry_safe ( rel , n , list , head ) {
list_del ( & rel - > head ) ;
kfree ( rel ) ;
}
}
/**
* vmw_resource_relocations_apply - Apply all relocations on a list
*
* @ cb : Pointer to the start of the command buffer bein patch . This need
* not be the same buffer as the one being parsed when the relocation
* list was built , but the contents must be the same modulo the
* resource ids .
* @ list : Pointer to the head of the relocation list .
*/
static void vmw_resource_relocations_apply ( uint32_t * cb ,
struct list_head * list )
{
struct vmw_resource_relocation * rel ;
list_for_each_entry ( rel , list , head )
cb [ rel - > offset ] = rel - > res - > id ;
}
2009-12-10 00:19:58 +00:00
static int vmw_cmd_invalid ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
return capable ( CAP_SYS_ADMIN ) ? : - EINVAL ;
}
static int vmw_cmd_ok ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
return 0 ;
}
2011-10-04 20:13:30 +02:00
/**
* vmw_bo_to_validate_list - add a bo to a validate list
*
* @ sw_context : The software context used for this command submission batch .
* @ bo : The buffer object to add .
2012-11-21 11:19:53 +01:00
* @ validate_as_mob : Validate this buffer as a MOB .
2011-10-04 20:13:30 +02:00
* @ p_val_node : If non - NULL Will be updated with the validate node number
* on return .
*
* Returns - EINVAL if the limit of number of buffer objects per command
* submission is reached .
*/
static int vmw_bo_to_validate_list ( struct vmw_sw_context * sw_context ,
struct ttm_buffer_object * bo ,
2012-11-21 11:19:53 +01:00
bool validate_as_mob ,
2011-10-04 20:13:30 +02:00
uint32_t * p_val_node )
{
uint32_t val_node ;
2012-11-20 12:19:35 +00:00
struct vmw_validate_buffer * vval_buf ;
2011-10-04 20:13:30 +02:00
struct ttm_validate_buffer * val_buf ;
2012-11-20 12:19:35 +00:00
struct drm_hash_item * hash ;
int ret ;
2011-10-04 20:13:30 +02:00
2012-11-20 12:19:35 +00:00
if ( likely ( drm_ht_find_item ( & sw_context - > res_ht , ( unsigned long ) bo ,
& hash ) = = 0 ) ) {
vval_buf = container_of ( hash , struct vmw_validate_buffer ,
hash ) ;
2012-11-21 11:19:53 +01:00
if ( unlikely ( vval_buf - > validate_as_mob ! = validate_as_mob ) ) {
DRM_ERROR ( " Inconsistent buffer usage. \n " ) ;
return - EINVAL ;
}
2012-11-20 12:19:35 +00:00
val_buf = & vval_buf - > base ;
val_node = vval_buf - sw_context - > val_bufs ;
} else {
val_node = sw_context - > cur_val_buf ;
if ( unlikely ( val_node > = VMWGFX_MAX_VALIDATIONS ) ) {
DRM_ERROR ( " Max number of DMA buffers per submission "
" exceeded. \n " ) ;
return - EINVAL ;
}
vval_buf = & sw_context - > val_bufs [ val_node ] ;
vval_buf - > hash . key = ( unsigned long ) bo ;
ret = drm_ht_insert_item ( & sw_context - > res_ht , & vval_buf - > hash ) ;
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Failed to initialize a buffer validation "
" entry. \n " ) ;
return ret ;
}
+ + sw_context - > cur_val_buf ;
val_buf = & vval_buf - > base ;
2011-10-04 20:13:30 +02:00
val_buf - > bo = ttm_bo_reference ( bo ) ;
2012-11-20 12:19:35 +00:00
val_buf - > reserved = false ;
2011-10-04 20:13:30 +02:00
list_add_tail ( & val_buf - > head , & sw_context - > validate_nodes ) ;
2012-11-21 11:19:53 +01:00
vval_buf - > validate_as_mob = validate_as_mob ;
2011-10-04 20:13:30 +02:00
}
2012-10-12 15:01:43 +00:00
sw_context - > fence_flags | = DRM_VMW_FENCE_FLAG_EXEC ;
2011-10-04 20:13:30 +02:00
if ( p_val_node )
* p_val_node = val_node ;
return 0 ;
}
2012-11-20 12:19:35 +00:00
/**
* vmw_resources_reserve - Reserve all resources on the sw_context ' s
* resource list .
*
* @ sw_context : Pointer to the software context .
*
* Note that since vmware ' s command submission currently is protected by
* the cmdbuf mutex , no fancy deadlock avoidance is required for resources ,
* since only a single thread at once will attempt this .
*/
static int vmw_resources_reserve ( struct vmw_sw_context * sw_context )
2009-12-10 00:19:58 +00:00
{
2012-11-20 12:19:35 +00:00
struct vmw_resource_val_node * val ;
2009-12-10 00:19:58 +00:00
int ret ;
2012-11-20 12:19:35 +00:00
list_for_each_entry ( val , & sw_context - > resource_list , head ) {
struct vmw_resource * res = val - > res ;
2009-12-10 00:19:58 +00:00
2012-11-20 12:19:35 +00:00
ret = vmw_resource_reserve ( res , val - > no_buffer_needed ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
if ( res - > backup ) {
struct ttm_buffer_object * bo = & res - > backup - > base ;
ret = vmw_bo_to_validate_list
2012-11-21 11:19:53 +01:00
( sw_context , bo ,
vmw_resource_needs_backup ( res ) , NULL ) ;
2012-11-20 12:19:35 +00:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
}
2009-12-10 00:19:58 +00:00
}
2012-11-20 12:19:35 +00:00
return 0 ;
}
2009-12-10 00:19:58 +00:00
2012-11-20 12:19:35 +00:00
/**
* vmw_resources_validate - Validate all resources on the sw_context ' s
* resource list .
*
* @ sw_context : Pointer to the software context .
*
* Before this function is called , all resource backup buffers must have
* been validated .
*/
static int vmw_resources_validate ( struct vmw_sw_context * sw_context )
{
struct vmw_resource_val_node * val ;
int ret ;
list_for_each_entry ( val , & sw_context - > resource_list , head ) {
struct vmw_resource * res = val - > res ;
2011-10-04 20:13:31 +02:00
2012-11-20 12:19:35 +00:00
ret = vmw_resource_validate ( res ) ;
if ( unlikely ( ret ! = 0 ) ) {
if ( ret ! = - ERESTARTSYS )
DRM_ERROR ( " Failed to validate resource. \n " ) ;
return ret ;
}
}
2011-10-04 20:13:31 +02:00
return 0 ;
2009-12-10 00:19:58 +00:00
}
2012-11-20 12:19:35 +00:00
/**
* vmw_cmd_res_check - Check that a resource is present and if so , put it
* on the resource validate list unless it ' s already there .
*
* @ dev_priv : Pointer to a device private structure .
* @ sw_context : Pointer to the software context .
* @ res_type : Resource type .
* @ converter : User - space visisble type specific information .
* @ id : Pointer to the location in the command buffer currently being
* parsed from where the user - space resource id handle is located .
*/
static int vmw_cmd_res_check ( struct vmw_private * dev_priv ,
2009-12-10 00:19:58 +00:00
struct vmw_sw_context * sw_context ,
2012-11-20 12:19:35 +00:00
enum vmw_res_type res_type ,
const struct vmw_user_resource_conv * converter ,
uint32_t * id ,
struct vmw_resource_val_node * * p_val )
2009-12-10 00:19:58 +00:00
{
2012-11-20 12:19:35 +00:00
struct vmw_res_cache_entry * rcache =
& sw_context - > res_cache [ res_type ] ;
2011-08-31 07:42:54 +00:00
struct vmw_resource * res ;
2012-11-20 12:19:35 +00:00
struct vmw_resource_val_node * node ;
int ret ;
2011-08-31 07:42:54 +00:00
2012-11-20 12:19:35 +00:00
if ( * id = = SVGA3D_INVALID_ID )
2009-12-22 16:53:41 +01:00
return 0 ;
2012-11-20 12:19:35 +00:00
/*
* Fastpath in case of repeated commands referencing the same
* resource
*/
2009-12-22 16:53:41 +01:00
2012-11-20 12:19:35 +00:00
if ( likely ( rcache - > valid & & * id = = rcache - > handle ) ) {
const struct vmw_resource * res = rcache - > res ;
rcache - > node - > first_usage = false ;
if ( p_val )
* p_val = rcache - > node ;
return vmw_resource_relocation_add
( & sw_context - > res_relocations , res ,
id - sw_context - > buf_start ) ;
2011-08-31 07:42:54 +00:00
}
2012-11-20 12:19:35 +00:00
ret = vmw_user_resource_lookup_handle ( dev_priv ,
sw_context - > tfile ,
* id ,
converter ,
& res ) ;
2011-10-04 20:13:33 +02:00
if ( unlikely ( ret ! = 0 ) ) {
2012-11-20 12:19:35 +00:00
DRM_ERROR ( " Could not find or use resource 0x%08x. \n " ,
( unsigned ) * id ) ;
dump_stack ( ) ;
2011-10-04 20:13:33 +02:00
return ret ;
}
2012-11-20 12:19:35 +00:00
rcache - > valid = true ;
rcache - > res = res ;
rcache - > handle = * id ;
2011-08-31 07:42:54 +00:00
2012-11-20 12:19:35 +00:00
ret = vmw_resource_relocation_add ( & sw_context - > res_relocations ,
res ,
id - sw_context - > buf_start ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_no_reloc ;
ret = vmw_resource_val_add ( sw_context , res , & node ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_no_reloc ;
2011-10-04 20:13:31 +02:00
2012-11-20 12:19:35 +00:00
rcache - > node = node ;
if ( p_val )
* p_val = node ;
vmw_resource_unreference ( & res ) ;
2011-10-04 20:13:31 +02:00
return 0 ;
2012-11-20 12:19:35 +00:00
out_no_reloc :
BUG_ON ( sw_context - > error_resource ! = NULL ) ;
sw_context - > error_resource = res ;
return ret ;
2009-12-10 00:19:58 +00:00
}
2012-11-20 12:19:35 +00:00
/**
* vmw_cmd_cid_check - Check a command header for valid context information .
*
* @ dev_priv : Pointer to a device private structure .
* @ sw_context : Pointer to the software context .
* @ header : A command header with an embedded user - space context handle .
*
* Convenience function : Call vmw_cmd_res_check with the user - space context
* handle embedded in @ header .
*/
static int vmw_cmd_cid_check ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_cid_cmd {
SVGA3dCmdHeader header ;
__le32 cid ;
} * cmd ;
cmd = container_of ( header , struct vmw_cid_cmd , header ) ;
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_context ,
user_context_converter , & cmd - > cid , NULL ) ;
}
2009-12-10 00:19:58 +00:00
static int vmw_cmd_set_render_target_check ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdSetRenderTarget body ;
} * cmd ;
int ret ;
ret = vmw_cmd_cid_check ( dev_priv , sw_context , header ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
cmd = container_of ( header , struct vmw_sid_cmd , header ) ;
2012-11-20 12:19:35 +00:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter ,
& cmd - > body . target . sid , NULL ) ;
2009-12-22 16:53:41 +01:00
return ret ;
2009-12-10 00:19:58 +00:00
}
static int vmw_cmd_surface_copy_check ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdSurfaceCopy body ;
} * cmd ;
int ret ;
cmd = container_of ( header , struct vmw_sid_cmd , header ) ;
2012-11-20 12:19:35 +00:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter ,
& cmd - > body . src . sid , NULL ) ;
2009-12-10 00:19:58 +00:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2012-11-20 12:19:35 +00:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter ,
& cmd - > body . dest . sid , NULL ) ;
2009-12-10 00:19:58 +00:00
}
static int vmw_cmd_stretch_blt_check ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdSurfaceStretchBlt body ;
} * cmd ;
int ret ;
cmd = container_of ( header , struct vmw_sid_cmd , header ) ;
2012-11-20 12:19:35 +00:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter ,
& cmd - > body . src . sid , NULL ) ;
2009-12-10 00:19:58 +00:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2012-11-20 12:19:35 +00:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter ,
& cmd - > body . dest . sid , NULL ) ;
2009-12-10 00:19:58 +00:00
}
static int vmw_cmd_blt_surf_screen_check ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdBlitSurfaceToScreen body ;
} * cmd ;
cmd = container_of ( header , struct vmw_sid_cmd , header ) ;
2011-10-04 20:13:27 +02:00
if ( unlikely ( ! sw_context - > kernel ) ) {
DRM_ERROR ( " Kernel only SVGA3d command: %u. \n " , cmd - > header . id ) ;
return - EPERM ;
}
2012-11-20 12:19:35 +00:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter ,
& cmd - > body . srcImage . sid , NULL ) ;
2009-12-10 00:19:58 +00:00
}
static int vmw_cmd_present_check ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdPresent body ;
} * cmd ;
2011-10-04 20:13:33 +02:00
2009-12-10 00:19:58 +00:00
cmd = container_of ( header , struct vmw_sid_cmd , header ) ;
2011-10-04 20:13:27 +02:00
if ( unlikely ( ! sw_context - > kernel ) ) {
DRM_ERROR ( " Kernel only SVGA3d command: %u. \n " , cmd - > header . id ) ;
return - EPERM ;
}
2012-11-20 12:19:35 +00:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter , & cmd - > body . sid ,
NULL ) ;
2009-12-10 00:19:58 +00:00
}
2011-10-04 20:13:30 +02:00
/**
* vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries .
*
* @ dev_priv : The device private structure .
* @ new_query_bo : The new buffer holding query results .
* @ sw_context : The software context used for this command submission .
*
* This function checks whether @ new_query_bo is suitable for holding
* query results , and if another buffer currently is pinned for query
* results . If so , the function prepares the state of @ sw_context for
* switching pinned buffers after successful submission of the current
2012-11-20 12:19:35 +00:00
* command batch .
2011-10-04 20:13:30 +02:00
*/
static int vmw_query_bo_switch_prepare ( struct vmw_private * dev_priv ,
struct ttm_buffer_object * new_query_bo ,
struct vmw_sw_context * sw_context )
{
2012-11-20 12:19:35 +00:00
struct vmw_res_cache_entry * ctx_entry =
& sw_context - > res_cache [ vmw_res_context ] ;
2011-10-04 20:13:30 +02:00
int ret ;
2012-11-20 12:19:35 +00:00
BUG_ON ( ! ctx_entry - > valid ) ;
sw_context - > last_query_ctx = ctx_entry - > res ;
2011-10-04 20:13:30 +02:00
if ( unlikely ( new_query_bo ! = sw_context - > cur_query_bo ) ) {
if ( unlikely ( new_query_bo - > num_pages > 4 ) ) {
DRM_ERROR ( " Query buffer too large. \n " ) ;
return - EINVAL ;
}
if ( unlikely ( sw_context - > cur_query_bo ! = NULL ) ) {
2012-11-20 12:19:35 +00:00
sw_context - > needs_post_query_barrier = true ;
2011-10-04 20:13:30 +02:00
ret = vmw_bo_to_validate_list ( sw_context ,
sw_context - > cur_query_bo ,
2012-11-21 11:19:53 +01:00
dev_priv - > has_mob , NULL ) ;
2011-10-04 20:13:30 +02:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
}
sw_context - > cur_query_bo = new_query_bo ;
ret = vmw_bo_to_validate_list ( sw_context ,
dev_priv - > dummy_query_bo ,
2012-11-21 11:19:53 +01:00
dev_priv - > has_mob , NULL ) ;
2011-10-04 20:13:30 +02:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
}
return 0 ;
}
/**
* vmw_query_bo_switch_commit - Finalize switching pinned query buffer
*
* @ dev_priv : The device private structure .
* @ sw_context : The software context used for this command submission batch .
*
* This function will check if we ' re switching query buffers , and will then ,
* issue a dummy occlusion query wait used as a query barrier . When the fence
* object following that query wait has signaled , we are sure that all
2012-11-20 12:19:35 +00:00
* preceding queries have finished , and the old query buffer can be unpinned .
2011-10-04 20:13:30 +02:00
* However , since both the new query buffer and the old one are fenced with
* that fence , we can do an asynchronus unpin now , and be sure that the
* old query buffer won ' t be moved until the fence has signaled .
*
* As mentioned above , both the new - and old query buffers need to be fenced
* using a sequence emitted * after * calling this function .
*/
static void vmw_query_bo_switch_commit ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context )
{
/*
* The validate list should still hold references to all
* contexts here .
*/
2012-11-20 12:19:35 +00:00
if ( sw_context - > needs_post_query_barrier ) {
struct vmw_res_cache_entry * ctx_entry =
& sw_context - > res_cache [ vmw_res_context ] ;
struct vmw_resource * ctx ;
int ret ;
2011-10-04 20:13:30 +02:00
2012-11-20 12:19:35 +00:00
BUG_ON ( ! ctx_entry - > valid ) ;
ctx = ctx_entry - > res ;
2011-10-04 20:13:30 +02:00
ret = vmw_fifo_emit_dummy_query ( dev_priv , ctx - > id ) ;
if ( unlikely ( ret ! = 0 ) )
DRM_ERROR ( " Out of fifo space for dummy query. \n " ) ;
}
if ( dev_priv - > pinned_bo ! = sw_context - > cur_query_bo ) {
if ( dev_priv - > pinned_bo ) {
vmw_bo_pin ( dev_priv - > pinned_bo , false ) ;
ttm_bo_unref ( & dev_priv - > pinned_bo ) ;
}
2012-11-20 12:19:35 +00:00
if ( ! sw_context - > needs_post_query_barrier ) {
vmw_bo_pin ( sw_context - > cur_query_bo , true ) ;
2011-10-04 20:13:30 +02:00
2012-11-20 12:19:35 +00:00
/*
* We pin also the dummy_query_bo buffer so that we
* don ' t need to validate it when emitting
* dummy queries in context destroy paths .
*/
2011-10-04 20:13:30 +02:00
2012-11-20 12:19:35 +00:00
vmw_bo_pin ( dev_priv - > dummy_query_bo , true ) ;
dev_priv - > dummy_query_bo_pinned = true ;
2011-10-04 20:13:30 +02:00
2012-11-20 12:19:35 +00:00
BUG_ON ( sw_context - > last_query_ctx = = NULL ) ;
dev_priv - > query_cid = sw_context - > last_query_ctx - > id ;
dev_priv - > query_cid_valid = true ;
dev_priv - > pinned_bo =
ttm_bo_reference ( sw_context - > cur_query_bo ) ;
}
2011-10-04 20:13:30 +02:00
}
}
2012-11-21 11:26:55 +01:00
/**
* vmw_translate_mob_pointer - Prepare to translate a user - space buffer
* handle to a MOB id .
*
* @ dev_priv : Pointer to a device private structure .
* @ sw_context : The software context used for this command batch validation .
* @ id : Pointer to the user - space handle to be translated .
* @ vmw_bo_p : Points to a location that , on successful return will carry
* a reference - counted pointer to the DMA buffer identified by the
* user - space handle in @ id .
*
* This function saves information needed to translate a user - space buffer
* handle to a MOB id . The translation does not take place immediately , but
* during a call to vmw_apply_relocations ( ) . This function builds a relocation
* list and a list of buffers to validate . The former needs to be freed using
* either vmw_apply_relocations ( ) or vmw_free_relocations ( ) . The latter
* needs to be freed using vmw_clear_validations .
*/
static int vmw_translate_mob_ptr ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGAMobId * id ,
struct vmw_dma_buffer * * vmw_bo_p )
{
struct vmw_dma_buffer * vmw_bo = NULL ;
struct ttm_buffer_object * bo ;
uint32_t handle = * id ;
struct vmw_relocation * reloc ;
int ret ;
ret = vmw_user_dmabuf_lookup ( sw_context - > tfile , handle , & vmw_bo ) ;
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Could not find or use MOB buffer. \n " ) ;
return - EINVAL ;
}
bo = & vmw_bo - > base ;
if ( unlikely ( sw_context - > cur_reloc > = VMWGFX_MAX_RELOCATIONS ) ) {
DRM_ERROR ( " Max number relocations per submission "
" exceeded \n " ) ;
ret = - EINVAL ;
goto out_no_reloc ;
}
reloc = & sw_context - > relocs [ sw_context - > cur_reloc + + ] ;
reloc - > mob_loc = id ;
reloc - > location = NULL ;
ret = vmw_bo_to_validate_list ( sw_context , bo , true , & reloc - > index ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_no_reloc ;
* vmw_bo_p = vmw_bo ;
return 0 ;
out_no_reloc :
vmw_dmabuf_unreference ( & vmw_bo ) ;
vmw_bo_p = NULL ;
return ret ;
}
2011-10-04 20:13:30 +02:00
/**
2012-11-20 12:19:35 +00:00
* vmw_translate_guest_pointer - Prepare to translate a user - space buffer
* handle to a valid SVGAGuestPtr
2011-10-04 20:13:30 +02:00
*
2012-11-20 12:19:35 +00:00
* @ dev_priv : Pointer to a device private structure .
* @ sw_context : The software context used for this command batch validation .
* @ ptr : Pointer to the user - space handle to be translated .
* @ vmw_bo_p : Points to a location that , on successful return will carry
* a reference - counted pointer to the DMA buffer identified by the
* user - space handle in @ id .
2011-10-04 20:13:30 +02:00
*
2012-11-20 12:19:35 +00:00
* This function saves information needed to translate a user - space buffer
* handle to a valid SVGAGuestPtr . The translation does not take place
* immediately , but during a call to vmw_apply_relocations ( ) .
* This function builds a relocation list and a list of buffers to validate .
* The former needs to be freed using either vmw_apply_relocations ( ) or
* vmw_free_relocations ( ) . The latter needs to be freed using
* vmw_clear_validations .
2011-10-04 20:13:30 +02:00
*/
2010-02-21 14:54:55 +00:00
static int vmw_translate_guest_ptr ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGAGuestPtr * ptr ,
struct vmw_dma_buffer * * vmw_bo_p )
2009-12-10 00:19:58 +00:00
{
struct vmw_dma_buffer * vmw_bo = NULL ;
struct ttm_buffer_object * bo ;
2010-02-21 14:54:55 +00:00
uint32_t handle = ptr - > gmrId ;
2009-12-10 00:19:58 +00:00
struct vmw_relocation * reloc ;
2010-02-21 14:54:55 +00:00
int ret ;
2009-12-10 00:19:58 +00:00
ret = vmw_user_dmabuf_lookup ( sw_context - > tfile , handle , & vmw_bo ) ;
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Could not find or use GMR region. \n " ) ;
return - EINVAL ;
}
bo = & vmw_bo - > base ;
if ( unlikely ( sw_context - > cur_reloc > = VMWGFX_MAX_RELOCATIONS ) ) {
2010-02-21 14:54:55 +00:00
DRM_ERROR ( " Max number relocations per submission "
2009-12-10 00:19:58 +00:00
" exceeded \n " ) ;
ret = - EINVAL ;
goto out_no_reloc ;
}
reloc = & sw_context - > relocs [ sw_context - > cur_reloc + + ] ;
2010-02-21 14:54:55 +00:00
reloc - > location = ptr ;
2009-12-10 00:19:58 +00:00
2012-11-21 11:19:53 +01:00
ret = vmw_bo_to_validate_list ( sw_context , bo , false , & reloc - > index ) ;
2011-10-04 20:13:30 +02:00
if ( unlikely ( ret ! = 0 ) )
2009-12-10 00:19:58 +00:00
goto out_no_reloc ;
2010-02-21 14:54:55 +00:00
* vmw_bo_p = vmw_bo ;
return 0 ;
out_no_reloc :
vmw_dmabuf_unreference ( & vmw_bo ) ;
vmw_bo_p = NULL ;
return ret ;
}
2012-11-21 11:26:55 +01:00
/**
* vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command .
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context used for this command submission .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_begin_gb_query ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_begin_gb_query_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdBeginGBQuery q ;
} * cmd ;
cmd = container_of ( header , struct vmw_begin_gb_query_cmd ,
header ) ;
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_context ,
user_context_converter , & cmd - > q . cid ,
NULL ) ;
}
2012-11-20 12:19:35 +00:00
/**
* vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command .
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context used for this command submission .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_begin_query ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_begin_query_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdBeginQuery q ;
} * cmd ;
cmd = container_of ( header , struct vmw_begin_query_cmd ,
header ) ;
2012-11-21 11:26:55 +01:00
if ( unlikely ( dev_priv - > has_mob ) ) {
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdBeginGBQuery q ;
} gb_cmd ;
BUG_ON ( sizeof ( gb_cmd ) ! = sizeof ( * cmd ) ) ;
gb_cmd . header . id = SVGA_3D_CMD_BEGIN_GB_QUERY ;
gb_cmd . header . size = cmd - > header . size ;
gb_cmd . q . cid = cmd - > q . cid ;
gb_cmd . q . type = cmd - > q . type ;
memcpy ( cmd , & gb_cmd , sizeof ( * cmd ) ) ;
return vmw_cmd_begin_gb_query ( dev_priv , sw_context , header ) ;
}
2012-11-20 12:19:35 +00:00
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_context ,
user_context_converter , & cmd - > q . cid ,
NULL ) ;
}
2012-11-21 11:26:55 +01:00
/**
* vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command .
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context used for this command submission .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_end_gb_query ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_dma_buffer * vmw_bo ;
struct vmw_query_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdEndGBQuery q ;
} * cmd ;
int ret ;
cmd = container_of ( header , struct vmw_query_cmd , header ) ;
ret = vmw_cmd_cid_check ( dev_priv , sw_context , header ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
ret = vmw_translate_mob_ptr ( dev_priv , sw_context ,
& cmd - > q . mobid ,
& vmw_bo ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
ret = vmw_query_bo_switch_prepare ( dev_priv , & vmw_bo - > base , sw_context ) ;
vmw_dmabuf_unreference ( & vmw_bo ) ;
return ret ;
}
2012-11-20 12:19:35 +00:00
/**
* vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command .
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context used for this command submission .
* @ header : Pointer to the command header in the command stream .
*/
2010-02-21 14:54:55 +00:00
static int vmw_cmd_end_query ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_dma_buffer * vmw_bo ;
struct vmw_query_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdEndQuery q ;
} * cmd ;
int ret ;
cmd = container_of ( header , struct vmw_query_cmd , header ) ;
2012-11-21 11:26:55 +01:00
if ( dev_priv - > has_mob ) {
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdEndGBQuery q ;
} gb_cmd ;
BUG_ON ( sizeof ( gb_cmd ) ! = sizeof ( * cmd ) ) ;
gb_cmd . header . id = SVGA_3D_CMD_END_GB_QUERY ;
gb_cmd . header . size = cmd - > header . size ;
gb_cmd . q . cid = cmd - > q . cid ;
gb_cmd . q . type = cmd - > q . type ;
gb_cmd . q . mobid = cmd - > q . guestResult . gmrId ;
gb_cmd . q . offset = cmd - > q . guestResult . offset ;
memcpy ( cmd , & gb_cmd , sizeof ( * cmd ) ) ;
return vmw_cmd_end_gb_query ( dev_priv , sw_context , header ) ;
}
2010-02-21 14:54:55 +00:00
ret = vmw_cmd_cid_check ( dev_priv , sw_context , header ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
ret = vmw_translate_guest_ptr ( dev_priv , sw_context ,
& cmd - > q . guestResult ,
& vmw_bo ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
2012-11-20 12:19:35 +00:00
ret = vmw_query_bo_switch_prepare ( dev_priv , & vmw_bo - > base , sw_context ) ;
2011-10-04 20:13:30 +02:00
2010-02-21 14:54:55 +00:00
vmw_dmabuf_unreference ( & vmw_bo ) ;
2011-10-04 20:13:30 +02:00
return ret ;
2010-02-21 14:54:55 +00:00
}
2009-12-10 00:19:58 +00:00
2012-11-21 11:26:55 +01:00
/**
* vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command .
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context used for this command submission .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_wait_gb_query ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_dma_buffer * vmw_bo ;
struct vmw_query_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdWaitForGBQuery q ;
} * cmd ;
int ret ;
cmd = container_of ( header , struct vmw_query_cmd , header ) ;
ret = vmw_cmd_cid_check ( dev_priv , sw_context , header ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
ret = vmw_translate_mob_ptr ( dev_priv , sw_context ,
& cmd - > q . mobid ,
& vmw_bo ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
vmw_dmabuf_unreference ( & vmw_bo ) ;
return 0 ;
}
/**
2012-11-20 12:19:35 +00:00
* vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command .
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context used for this command submission .
* @ header : Pointer to the command header in the command stream .
*/
2010-02-21 14:54:55 +00:00
static int vmw_cmd_wait_query ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_dma_buffer * vmw_bo ;
struct vmw_query_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdWaitForQuery q ;
} * cmd ;
int ret ;
cmd = container_of ( header , struct vmw_query_cmd , header ) ;
2012-11-21 11:26:55 +01:00
if ( dev_priv - > has_mob ) {
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdWaitForGBQuery q ;
} gb_cmd ;
BUG_ON ( sizeof ( gb_cmd ) ! = sizeof ( * cmd ) ) ;
gb_cmd . header . id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY ;
gb_cmd . header . size = cmd - > header . size ;
gb_cmd . q . cid = cmd - > q . cid ;
gb_cmd . q . type = cmd - > q . type ;
gb_cmd . q . mobid = cmd - > q . guestResult . gmrId ;
gb_cmd . q . offset = cmd - > q . guestResult . offset ;
memcpy ( cmd , & gb_cmd , sizeof ( * cmd ) ) ;
return vmw_cmd_wait_gb_query ( dev_priv , sw_context , header ) ;
}
2010-02-21 14:54:55 +00:00
ret = vmw_cmd_cid_check ( dev_priv , sw_context , header ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
ret = vmw_translate_guest_ptr ( dev_priv , sw_context ,
& cmd - > q . guestResult ,
& vmw_bo ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
vmw_dmabuf_unreference ( & vmw_bo ) ;
return 0 ;
}
static int vmw_cmd_dma ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_dma_buffer * vmw_bo = NULL ;
struct vmw_surface * srf = NULL ;
struct vmw_dma_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdSurfaceDMA dma ;
} * cmd ;
int ret ;
cmd = container_of ( header , struct vmw_dma_cmd , header ) ;
ret = vmw_translate_guest_ptr ( dev_priv , sw_context ,
& cmd - > dma . guest . ptr ,
& vmw_bo ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
2012-11-20 12:19:35 +00:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter , & cmd - > dma . host . sid ,
NULL ) ;
2011-10-04 20:13:33 +02:00
if ( unlikely ( ret ! = 0 ) ) {
2012-11-20 12:19:35 +00:00
if ( unlikely ( ret ! = - ERESTARTSYS ) )
DRM_ERROR ( " could not find surface for DMA. \n " ) ;
goto out_no_surface ;
2011-10-04 20:13:33 +02:00
}
2012-11-20 12:19:35 +00:00
srf = vmw_res_to_srf ( sw_context - > res_cache [ vmw_res_surface ] . res ) ;
2011-10-04 20:13:31 +02:00
2012-11-20 12:19:35 +00:00
vmw_kms_cursor_snoop ( srf , sw_context - > tfile , & vmw_bo - > base , header ) ;
2009-12-10 00:19:58 +00:00
2012-11-20 12:19:35 +00:00
out_no_surface :
2009-12-10 00:19:58 +00:00
vmw_dmabuf_unreference ( & vmw_bo ) ;
return ret ;
}
2009-12-22 16:53:41 +01:00
static int vmw_cmd_draw ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_draw_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdDrawPrimitives body ;
} * cmd ;
SVGA3dVertexDecl * decl = ( SVGA3dVertexDecl * ) (
( unsigned long ) header + sizeof ( * cmd ) ) ;
SVGA3dPrimitiveRange * range ;
uint32_t i ;
uint32_t maxnum ;
int ret ;
ret = vmw_cmd_cid_check ( dev_priv , sw_context , header ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
cmd = container_of ( header , struct vmw_draw_cmd , header ) ;
maxnum = ( header - > size - sizeof ( cmd - > body ) ) / sizeof ( * decl ) ;
if ( unlikely ( cmd - > body . numVertexDecls > maxnum ) ) {
DRM_ERROR ( " Illegal number of vertex declarations. \n " ) ;
return - EINVAL ;
}
for ( i = 0 ; i < cmd - > body . numVertexDecls ; + + i , + + decl ) {
2012-11-20 12:19:35 +00:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter ,
& decl - > array . surfaceId , NULL ) ;
2009-12-22 16:53:41 +01:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
}
maxnum = ( header - > size - sizeof ( cmd - > body ) -
cmd - > body . numVertexDecls * sizeof ( * decl ) ) / sizeof ( * range ) ;
if ( unlikely ( cmd - > body . numRanges > maxnum ) ) {
DRM_ERROR ( " Illegal number of index ranges. \n " ) ;
return - EINVAL ;
}
range = ( SVGA3dPrimitiveRange * ) decl ;
for ( i = 0 ; i < cmd - > body . numRanges ; + + i , + + range ) {
2012-11-20 12:19:35 +00:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter ,
& range - > indexArray . surfaceId , NULL ) ;
2009-12-22 16:53:41 +01:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
}
return 0 ;
}
static int vmw_cmd_tex_state ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_tex_state_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdSetTextureState state ;
} ;
SVGA3dTextureState * last_state = ( SVGA3dTextureState * )
( ( unsigned long ) header + header - > size + sizeof ( header ) ) ;
SVGA3dTextureState * cur_state = ( SVGA3dTextureState * )
( ( unsigned long ) header + sizeof ( struct vmw_tex_state_cmd ) ) ;
int ret ;
ret = vmw_cmd_cid_check ( dev_priv , sw_context , header ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
for ( ; cur_state < last_state ; + + cur_state ) {
if ( likely ( cur_state - > name ! = SVGA3D_TS_BIND_TEXTURE ) )
continue ;
2012-11-20 12:19:35 +00:00
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter ,
& cur_state - > value , NULL ) ;
2009-12-22 16:53:41 +01:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
}
return 0 ;
}
2011-10-04 20:13:19 +02:00
static int vmw_cmd_check_define_gmrfb ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
void * buf )
{
struct vmw_dma_buffer * vmw_bo ;
int ret ;
struct {
uint32_t header ;
SVGAFifoCmdDefineGMRFB body ;
} * cmd = buf ;
ret = vmw_translate_guest_ptr ( dev_priv , sw_context ,
& cmd - > body . ptr ,
& vmw_bo ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
vmw_dmabuf_unreference ( & vmw_bo ) ;
return ret ;
}
2012-11-21 11:45:13 +01:00
/**
* vmw_cmd_switch_backup - Utility function to handle backup buffer switching
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ res_type : The resource type .
* @ converter : Information about user - space binding for this resource type .
* @ res_id : Pointer to the user - space resource handle in the command stream .
* @ buf_id : Pointer to the user - space backup buffer handle in the command
* stream .
* @ backup_offset : Offset of backup into MOB .
*
* This function prepares for registering a switch of backup buffers
* in the resource metadata just prior to unreserving .
*/
static int vmw_cmd_switch_backup ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
enum vmw_res_type res_type ,
const struct vmw_user_resource_conv
* converter ,
uint32_t * res_id ,
uint32_t * buf_id ,
unsigned long backup_offset )
{
int ret ;
struct vmw_dma_buffer * dma_buf ;
struct vmw_resource_val_node * val_node ;
ret = vmw_cmd_res_check ( dev_priv , sw_context , res_type ,
converter , res_id , & val_node ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
ret = vmw_translate_mob_ptr ( dev_priv , sw_context , buf_id , & dma_buf ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
if ( val_node - > first_usage )
val_node - > no_buffer_needed = true ;
vmw_dmabuf_unreference ( & val_node - > new_backup ) ;
val_node - > new_backup = dma_buf ;
val_node - > new_backup_offset = backup_offset ;
return 0 ;
}
/**
* vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
* command
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_bind_gb_surface ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_bind_gb_surface_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdBindGBSurface body ;
} * cmd ;
cmd = container_of ( header , struct vmw_bind_gb_surface_cmd , header ) ;
return vmw_cmd_switch_backup ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter ,
& cmd - > body . sid , & cmd - > body . mobid ,
0 ) ;
}
/**
* vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
* command
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_update_gb_image ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_gb_surface_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdUpdateGBImage body ;
} * cmd ;
cmd = container_of ( header , struct vmw_gb_surface_cmd , header ) ;
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter ,
& cmd - > body . image . sid , NULL ) ;
}
/**
* vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
* command
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_update_gb_surface ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_gb_surface_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdUpdateGBSurface body ;
} * cmd ;
cmd = container_of ( header , struct vmw_gb_surface_cmd , header ) ;
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter ,
& cmd - > body . sid , NULL ) ;
}
/**
* vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
* command
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_readback_gb_image ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_gb_surface_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdReadbackGBImage body ;
} * cmd ;
cmd = container_of ( header , struct vmw_gb_surface_cmd , header ) ;
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter ,
& cmd - > body . image . sid , NULL ) ;
}
/**
* vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
* command
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_readback_gb_surface ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_gb_surface_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdReadbackGBSurface body ;
} * cmd ;
cmd = container_of ( header , struct vmw_gb_surface_cmd , header ) ;
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter ,
& cmd - > body . sid , NULL ) ;
}
/**
* vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
* command
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_invalidate_gb_image ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_gb_surface_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdInvalidateGBImage body ;
} * cmd ;
cmd = container_of ( header , struct vmw_gb_surface_cmd , header ) ;
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter ,
& cmd - > body . image . sid , NULL ) ;
}
/**
* vmw_cmd_invalidate_gb_surface - Validate an
* SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_invalidate_gb_surface ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_gb_surface_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdInvalidateGBSurface body ;
} * cmd ;
cmd = container_of ( header , struct vmw_gb_surface_cmd , header ) ;
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter ,
& cmd - > body . sid , NULL ) ;
}
2012-11-20 12:19:35 +00:00
/**
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
* command
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_set_shader ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_set_shader_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdSetShader body ;
} * cmd ;
int ret ;
cmd = container_of ( header , struct vmw_set_shader_cmd ,
header ) ;
ret = vmw_cmd_cid_check ( dev_priv , sw_context , header ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
2012-11-21 12:10:26 +01:00
if ( dev_priv - > has_mob )
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_shader ,
user_shader_converter ,
& cmd - > body . shid , NULL ) ;
2012-11-20 12:19:35 +00:00
return 0 ;
}
2012-11-21 12:10:26 +01:00
/**
* vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
* command
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_bind_gb_shader ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_bind_gb_shader_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdBindGBShader body ;
} * cmd ;
cmd = container_of ( header , struct vmw_bind_gb_shader_cmd ,
header ) ;
return vmw_cmd_switch_backup ( dev_priv , sw_context , vmw_res_shader ,
user_shader_converter ,
& cmd - > body . shid , & cmd - > body . mobid ,
cmd - > body . offsetInBytes ) ;
}
2012-11-21 12:12:53 +01:00
/**
* vmw_cmd_bind_gb_shader_consts - Validate an SVGA_3D_CMD_BIND_SHADER_CONSTS
* command
*
* @ dev_priv : Pointer to a device private struct .
* @ sw_context : The software context being used for this batch .
* @ header : Pointer to the command header in the command stream .
*/
static int vmw_cmd_bind_gb_shader_consts ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
SVGA3dCmdHeader * header )
{
struct vmw_bind_gb_sc_cmd {
SVGA3dCmdHeader header ;
SVGA3dCmdBindGBShaderConsts body ;
} * cmd ;
int ret ;
cmd = container_of ( header , struct vmw_bind_gb_sc_cmd ,
header ) ;
ret = vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_context ,
user_context_converter ,
& cmd - > body . cid , NULL ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
return vmw_cmd_res_check ( dev_priv , sw_context , vmw_res_surface ,
user_surface_converter ,
& cmd - > body . sid , NULL ) ;
}
2011-10-04 20:13:19 +02:00
static int vmw_cmd_check_not_3d ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
void * buf , uint32_t * size )
{
uint32_t size_remaining = * size ;
uint32_t cmd_id ;
cmd_id = le32_to_cpu ( ( ( uint32_t * ) buf ) [ 0 ] ) ;
switch ( cmd_id ) {
case SVGA_CMD_UPDATE :
* size = sizeof ( uint32_t ) + sizeof ( SVGAFifoCmdUpdate ) ;
break ;
case SVGA_CMD_DEFINE_GMRFB :
* size = sizeof ( uint32_t ) + sizeof ( SVGAFifoCmdDefineGMRFB ) ;
break ;
case SVGA_CMD_BLIT_GMRFB_TO_SCREEN :
* size = sizeof ( uint32_t ) + sizeof ( SVGAFifoCmdBlitGMRFBToScreen ) ;
break ;
case SVGA_CMD_BLIT_SCREEN_TO_GMRFB :
* size = sizeof ( uint32_t ) + sizeof ( SVGAFifoCmdBlitGMRFBToScreen ) ;
break ;
default :
DRM_ERROR ( " Unsupported SVGA command: %u. \n " , cmd_id ) ;
return - EINVAL ;
}
if ( * size > size_remaining ) {
DRM_ERROR ( " Invalid SVGA command (size mismatch): "
" %u. \n " , cmd_id ) ;
return - EINVAL ;
}
2011-10-04 20:13:27 +02:00
if ( unlikely ( ! sw_context - > kernel ) ) {
2011-10-04 20:13:19 +02:00
DRM_ERROR ( " Kernel only SVGA command: %u. \n " , cmd_id ) ;
return - EPERM ;
}
if ( cmd_id = = SVGA_CMD_DEFINE_GMRFB )
return vmw_cmd_check_define_gmrfb ( dev_priv , sw_context , buf ) ;
return 0 ;
}
2009-12-10 00:19:58 +00:00
typedef int ( * vmw_cmd_func ) ( struct vmw_private * ,
struct vmw_sw_context * ,
SVGA3dCmdHeader * ) ;
# define VMW_CMD_DEF(cmd, func) \
[ cmd - SVGA_3D_CMD_BASE ] = func
static vmw_cmd_func vmw_cmd_funcs [ SVGA_3D_CMD_MAX ] = {
VMW_CMD_DEF ( SVGA_3D_CMD_SURFACE_DEFINE , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SURFACE_DESTROY , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SURFACE_COPY , & vmw_cmd_surface_copy_check ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SURFACE_STRETCHBLT , & vmw_cmd_stretch_blt_check ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SURFACE_DMA , & vmw_cmd_dma ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_CONTEXT_DEFINE , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_CONTEXT_DESTROY , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETTRANSFORM , & vmw_cmd_cid_check ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETZRANGE , & vmw_cmd_cid_check ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETRENDERSTATE , & vmw_cmd_cid_check ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETRENDERTARGET ,
& vmw_cmd_set_render_target_check ) ,
2009-12-22 16:53:41 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_SETTEXTURESTATE , & vmw_cmd_tex_state ) ,
2009-12-10 00:19:58 +00:00
VMW_CMD_DEF ( SVGA_3D_CMD_SETMATERIAL , & vmw_cmd_cid_check ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETLIGHTDATA , & vmw_cmd_cid_check ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETLIGHTENABLED , & vmw_cmd_cid_check ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETVIEWPORT , & vmw_cmd_cid_check ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SETCLIPPLANE , & vmw_cmd_cid_check ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_CLEAR , & vmw_cmd_cid_check ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_PRESENT , & vmw_cmd_present_check ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SHADER_DEFINE , & vmw_cmd_cid_check ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SHADER_DESTROY , & vmw_cmd_cid_check ) ,
2012-11-20 12:19:35 +00:00
VMW_CMD_DEF ( SVGA_3D_CMD_SET_SHADER , & vmw_cmd_set_shader ) ,
2009-12-10 00:19:58 +00:00
VMW_CMD_DEF ( SVGA_3D_CMD_SET_SHADER_CONST , & vmw_cmd_cid_check ) ,
2009-12-22 16:53:41 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_DRAW_PRIMITIVES , & vmw_cmd_draw ) ,
2009-12-10 00:19:58 +00:00
VMW_CMD_DEF ( SVGA_3D_CMD_SETSCISSORRECT , & vmw_cmd_cid_check ) ,
2012-11-20 12:19:35 +00:00
VMW_CMD_DEF ( SVGA_3D_CMD_BEGIN_QUERY , & vmw_cmd_begin_query ) ,
2010-02-21 14:54:55 +00:00
VMW_CMD_DEF ( SVGA_3D_CMD_END_QUERY , & vmw_cmd_end_query ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_WAIT_FOR_QUERY , & vmw_cmd_wait_query ) ,
2009-12-10 00:19:58 +00:00
VMW_CMD_DEF ( SVGA_3D_CMD_PRESENT_READBACK , & vmw_cmd_ok ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN ,
2012-11-20 12:19:35 +00:00
& vmw_cmd_blt_surf_screen_check ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SURFACE_DEFINE_V2 , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_GENERATE_MIPMAPS , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_ACTIVATE_SURFACE , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DEACTIVATE_SURFACE , & vmw_cmd_invalid ) ,
2012-11-21 12:14:33 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_SCREEN_DMA , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_OPEN_CONTEXT_SURFACE , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_LOGICOPS_BITBLT , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_LOGICOPS_TRANSBLT , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_LOGICOPS_STRETCHBLT , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_LOGICOPS_COLORFILL , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_LOGICOPS_ALPHABLEND , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_SET_OTABLE_BASE , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_READBACK_OTABLE , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DEFINE_GB_MOB , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DESTROY_GB_MOB , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_REDEFINE_GB_MOB , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING , & vmw_cmd_invalid ) ,
2012-11-21 11:45:13 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_DEFINE_GB_SURFACE , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DESTROY_GB_SURFACE , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_BIND_GB_SURFACE , & vmw_cmd_bind_gb_surface ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_COND_BIND_GB_SURFACE , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_UPDATE_GB_IMAGE , & vmw_cmd_update_gb_image ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_UPDATE_GB_SURFACE ,
& vmw_cmd_update_gb_surface ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_READBACK_GB_IMAGE ,
& vmw_cmd_readback_gb_image ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_READBACK_GB_SURFACE ,
& vmw_cmd_readback_gb_surface ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_INVALIDATE_GB_IMAGE ,
& vmw_cmd_invalidate_gb_image ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_INVALIDATE_GB_SURFACE ,
& vmw_cmd_invalidate_gb_surface ) ,
2012-11-21 11:36:36 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_DEFINE_GB_CONTEXT , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DESTROY_GB_CONTEXT , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_BIND_GB_CONTEXT , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_READBACK_GB_CONTEXT , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_INVALIDATE_GB_CONTEXT , & vmw_cmd_invalid ) ,
2012-11-21 12:10:26 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_DEFINE_GB_SHADER , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_BIND_GB_SHADER , & vmw_cmd_bind_gb_shader ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DESTROY_GB_SHADER , & vmw_cmd_invalid ) ,
2012-11-21 12:12:53 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_BIND_SHADERCONSTS ,
& vmw_cmd_bind_gb_shader_consts ) ,
2012-11-21 11:26:55 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_BEGIN_GB_QUERY , & vmw_cmd_begin_gb_query ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_END_GB_QUERY , & vmw_cmd_end_gb_query ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_WAIT_FOR_GB_QUERY , & vmw_cmd_wait_gb_query ) ,
2012-11-21 12:14:33 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_NOP , & vmw_cmd_ok ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_ENABLE_GART , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DISABLE_GART , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_MAP_MOB_INTO_GART , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_UNMAP_GART_RANGE , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DEFINE_GB_SCREENTARGET , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_DESTROY_GB_SCREENTARGET , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_BIND_GB_SCREENTARGET , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_UPDATE_GB_SCREENTARGET , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL , & vmw_cmd_invalid ) ,
VMW_CMD_DEF ( SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL , & vmw_cmd_invalid ) ,
2012-11-21 12:12:53 +01:00
VMW_CMD_DEF ( SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE , & vmw_cmd_cid_check )
2009-12-10 00:19:58 +00:00
} ;
static int vmw_cmd_check ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
void * buf , uint32_t * size )
{
uint32_t cmd_id ;
2009-12-22 16:53:41 +01:00
uint32_t size_remaining = * size ;
2009-12-10 00:19:58 +00:00
SVGA3dCmdHeader * header = ( SVGA3dCmdHeader * ) buf ;
int ret ;
2011-10-04 20:13:19 +02:00
cmd_id = le32_to_cpu ( ( ( uint32_t * ) buf ) [ 0 ] ) ;
/* Handle any none 3D commands */
if ( unlikely ( cmd_id < SVGA_CMD_MAX ) )
return vmw_cmd_check_not_3d ( dev_priv , sw_context , buf , size ) ;
2009-12-10 00:19:58 +00:00
cmd_id = le32_to_cpu ( header - > id ) ;
* size = le32_to_cpu ( header - > size ) + sizeof ( SVGA3dCmdHeader ) ;
cmd_id - = SVGA_3D_CMD_BASE ;
2009-12-22 16:53:41 +01:00
if ( unlikely ( * size > size_remaining ) )
goto out_err ;
2009-12-10 00:19:58 +00:00
if ( unlikely ( cmd_id > = SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE ) )
goto out_err ;
ret = vmw_cmd_funcs [ cmd_id ] ( dev_priv , sw_context , header ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_err ;
return 0 ;
out_err :
DRM_ERROR ( " Illegal / Invalid SVGA3D command: %d \n " ,
cmd_id + SVGA_3D_CMD_BASE ) ;
return - EINVAL ;
}
static int vmw_cmd_check_all ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context ,
2011-10-04 20:13:17 +02:00
void * buf ,
2011-08-31 07:42:54 +00:00
uint32_t size )
2009-12-10 00:19:58 +00:00
{
int32_t cur_size = size ;
int ret ;
2012-11-20 12:19:35 +00:00
sw_context - > buf_start = buf ;
2009-12-10 00:19:58 +00:00
while ( cur_size > 0 ) {
2009-12-22 16:53:41 +01:00
size = cur_size ;
2009-12-10 00:19:58 +00:00
ret = vmw_cmd_check ( dev_priv , sw_context , buf , & size ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
buf = ( void * ) ( ( unsigned long ) buf + size ) ;
cur_size - = size ;
}
if ( unlikely ( cur_size ! = 0 ) ) {
DRM_ERROR ( " Command verifier out of sync. \n " ) ;
return - EINVAL ;
}
return 0 ;
}
static void vmw_free_relocations ( struct vmw_sw_context * sw_context )
{
sw_context - > cur_reloc = 0 ;
}
static void vmw_apply_relocations ( struct vmw_sw_context * sw_context )
{
uint32_t i ;
struct vmw_relocation * reloc ;
struct ttm_validate_buffer * validate ;
struct ttm_buffer_object * bo ;
for ( i = 0 ; i < sw_context - > cur_reloc ; + + i ) {
reloc = & sw_context - > relocs [ i ] ;
2012-11-20 12:19:35 +00:00
validate = & sw_context - > val_bufs [ reloc - > index ] . base ;
2009-12-10 00:19:58 +00:00
bo = validate - > bo ;
2012-11-20 12:19:35 +00:00
switch ( bo - > mem . mem_type ) {
case TTM_PL_VRAM :
2010-10-26 21:21:47 +02:00
reloc - > location - > offset + = bo - > offset ;
reloc - > location - > gmrId = SVGA_GMR_FRAMEBUFFER ;
2012-11-20 12:19:35 +00:00
break ;
case VMW_PL_GMR :
2010-10-26 21:21:47 +02:00
reloc - > location - > gmrId = bo - > mem . start ;
2012-11-20 12:19:35 +00:00
break ;
2012-11-21 11:26:55 +01:00
case VMW_PL_MOB :
* reloc - > mob_loc = bo - > mem . start ;
break ;
2012-11-20 12:19:35 +00:00
default :
BUG ( ) ;
}
2009-12-10 00:19:58 +00:00
}
vmw_free_relocations ( sw_context ) ;
}
2012-11-20 12:19:35 +00:00
/**
* vmw_resource_list_unrefererence - Free up a resource list and unreference
* all resources referenced by it .
*
* @ list : The resource list .
*/
static void vmw_resource_list_unreference ( struct list_head * list )
{
struct vmw_resource_val_node * val , * val_next ;
/*
* Drop references to resources held during command submission .
*/
list_for_each_entry_safe ( val , val_next , list , head ) {
list_del_init ( & val - > head ) ;
vmw_resource_unreference ( & val - > res ) ;
kfree ( val ) ;
}
}
2009-12-10 00:19:58 +00:00
static void vmw_clear_validations ( struct vmw_sw_context * sw_context )
{
2012-11-20 12:19:35 +00:00
struct vmw_validate_buffer * entry , * next ;
struct vmw_resource_val_node * val ;
2009-12-10 00:19:58 +00:00
2011-08-31 07:42:54 +00:00
/*
* Drop references to DMA buffers held during command submission .
*/
2009-12-10 00:19:58 +00:00
list_for_each_entry_safe ( entry , next , & sw_context - > validate_nodes ,
2012-11-20 12:19:35 +00:00
base . head ) {
list_del ( & entry - > base . head ) ;
ttm_bo_unref ( & entry - > base . bo ) ;
( void ) drm_ht_remove_item ( & sw_context - > res_ht , & entry - > hash ) ;
2009-12-10 00:19:58 +00:00
sw_context - > cur_val_buf - - ;
}
BUG_ON ( sw_context - > cur_val_buf ! = 0 ) ;
2011-08-31 07:42:54 +00:00
2012-11-20 12:19:35 +00:00
list_for_each_entry ( val , & sw_context - > resource_list , head )
( void ) drm_ht_remove_item ( & sw_context - > res_ht , & val - > hash ) ;
2009-12-10 00:19:58 +00:00
}
static int vmw_validate_single_buffer ( struct vmw_private * dev_priv ,
2012-11-21 11:19:53 +01:00
struct ttm_buffer_object * bo ,
bool validate_as_mob )
2009-12-10 00:19:58 +00:00
{
int ret ;
2011-10-04 20:13:30 +02:00
/*
* Don ' t validate pinned buffers .
*/
if ( bo = = dev_priv - > pinned_bo | |
( bo = = dev_priv - > dummy_query_bo & &
dev_priv - > dummy_query_bo_pinned ) )
return 0 ;
2012-11-21 11:19:53 +01:00
if ( validate_as_mob )
return ttm_bo_validate ( bo , & vmw_mob_placement , true , false ) ;
2010-01-16 16:05:05 +01:00
/**
2010-10-26 21:21:47 +02:00
* Put BO in VRAM if there is space , otherwise as a GMR .
* If there is no space in VRAM and GMR ids are all used up ,
* start evicting GMRs to make room . If the DMA buffer can ' t be
* used as a GMR , this will return - ENOMEM .
2010-01-16 16:05:05 +01:00
*/
2012-11-28 11:25:44 +00:00
ret = ttm_bo_validate ( bo , & vmw_vram_gmr_placement , true , false ) ;
2009-12-08 12:59:34 +01:00
if ( likely ( ret = = 0 | | ret = = - ERESTARTSYS ) )
2009-12-10 00:19:58 +00:00
return ret ;
2010-01-16 16:05:05 +01:00
/**
* If that failed , try VRAM again , this time evicting
* previous contents .
*/
2009-12-10 00:19:58 +00:00
2010-10-26 21:21:47 +02:00
DRM_INFO ( " Falling through to VRAM. \n " ) ;
2012-11-28 11:25:44 +00:00
ret = ttm_bo_validate ( bo , & vmw_vram_placement , true , false ) ;
2009-12-10 00:19:58 +00:00
return ret ;
}
static int vmw_validate_buffers ( struct vmw_private * dev_priv ,
struct vmw_sw_context * sw_context )
{
2012-11-20 12:19:35 +00:00
struct vmw_validate_buffer * entry ;
2009-12-10 00:19:58 +00:00
int ret ;
2012-11-20 12:19:35 +00:00
list_for_each_entry ( entry , & sw_context - > validate_nodes , base . head ) {
2012-11-21 11:19:53 +01:00
ret = vmw_validate_single_buffer ( dev_priv , entry - > base . bo ,
entry - > validate_as_mob ) ;
2009-12-10 00:19:58 +00:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
}
return 0 ;
}
2011-08-31 07:42:54 +00:00
static int vmw_resize_cmd_bounce ( struct vmw_sw_context * sw_context ,
uint32_t size )
{
if ( likely ( sw_context - > cmd_bounce_size > = size ) )
return 0 ;
if ( sw_context - > cmd_bounce_size = = 0 )
sw_context - > cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE ;
while ( sw_context - > cmd_bounce_size < size ) {
sw_context - > cmd_bounce_size =
PAGE_ALIGN ( sw_context - > cmd_bounce_size +
( sw_context - > cmd_bounce_size > > 1 ) ) ;
}
if ( sw_context - > cmd_bounce ! = NULL )
vfree ( sw_context - > cmd_bounce ) ;
sw_context - > cmd_bounce = vmalloc ( sw_context - > cmd_bounce_size ) ;
if ( sw_context - > cmd_bounce = = NULL ) {
DRM_ERROR ( " Failed to allocate command bounce buffer. \n " ) ;
sw_context - > cmd_bounce_size = 0 ;
return - ENOMEM ;
}
return 0 ;
}
2011-09-01 20:18:44 +00:00
/**
* vmw_execbuf_fence_commands - create and submit a command stream fence
*
* Creates a fence object and submits a command stream marker .
* If this fails for some reason , We sync the fifo and return NULL .
* It is then safe to fence buffers with a NULL pointer .
2011-10-04 20:13:16 +02:00
*
* If @ p_handle is not NULL @ file_priv must also not be NULL . Creates
* a userspace handle if @ p_handle is not NULL , otherwise not .
2011-09-01 20:18:44 +00:00
*/
int vmw_execbuf_fence_commands ( struct drm_file * file_priv ,
struct vmw_private * dev_priv ,
struct vmw_fence_obj * * p_fence ,
uint32_t * p_handle )
{
uint32_t sequence ;
int ret ;
bool synced = false ;
2011-10-04 20:13:16 +02:00
/* p_handle implies file_priv. */
BUG_ON ( p_handle ! = NULL & & file_priv = = NULL ) ;
2011-09-01 20:18:44 +00:00
ret = vmw_fifo_send_fence ( dev_priv , & sequence ) ;
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Fence submission error. Syncing. \n " ) ;
synced = true ;
}
if ( p_handle ! = NULL )
ret = vmw_user_fence_create ( file_priv , dev_priv - > fman ,
sequence ,
DRM_VMW_FENCE_FLAG_EXEC ,
p_fence , p_handle ) ;
else
ret = vmw_fence_create ( dev_priv - > fman , sequence ,
DRM_VMW_FENCE_FLAG_EXEC ,
p_fence ) ;
if ( unlikely ( ret ! = 0 & & ! synced ) ) {
( void ) vmw_fallback_wait ( dev_priv , false , false ,
sequence , false ,
VMW_FENCE_WAIT_TIMEOUT ) ;
* p_fence = NULL ;
}
return 0 ;
}
2011-10-10 12:23:25 +02:00
/**
* vmw_execbuf_copy_fence_user - copy fence object information to
* user - space .
*
* @ dev_priv : Pointer to a vmw_private struct .
* @ vmw_fp : Pointer to the struct vmw_fpriv representing the calling file .
* @ ret : Return value from fence object creation .
* @ user_fence_rep : User space address of a struct drm_vmw_fence_rep to
* which the information should be copied .
* @ fence : Pointer to the fenc object .
* @ fence_handle : User - space fence handle .
*
* This function copies fence information to user - space . If copying fails ,
* The user - space struct drm_vmw_fence_rep : : error member is hopefully
* left untouched , and if it ' s preloaded with an - EFAULT by user - space ,
* the error will hopefully be detected .
* Also if copying fails , user - space will be unable to signal the fence
* object so we wait for it immediately , and then unreference the
* user - space reference .
*/
2011-10-10 12:23:26 +02:00
void
2011-10-10 12:23:25 +02:00
vmw_execbuf_copy_fence_user ( struct vmw_private * dev_priv ,
struct vmw_fpriv * vmw_fp ,
int ret ,
struct drm_vmw_fence_rep __user * user_fence_rep ,
struct vmw_fence_obj * fence ,
uint32_t fence_handle )
{
struct drm_vmw_fence_rep fence_rep ;
if ( user_fence_rep = = NULL )
return ;
2011-10-18 09:10:12 +03:00
memset ( & fence_rep , 0 , sizeof ( fence_rep ) ) ;
2011-10-10 12:23:25 +02:00
fence_rep . error = ret ;
if ( ret = = 0 ) {
BUG_ON ( fence = = NULL ) ;
fence_rep . handle = fence_handle ;
fence_rep . seqno = fence - > seqno ;
vmw_update_seqno ( dev_priv , & dev_priv - > fifo ) ;
fence_rep . passed_seqno = dev_priv - > last_read_seqno ;
}
/*
* copy_to_user errors will be detected by user space not
* seeing fence_rep : : error filled in . Typically
* user - space would have pre - set that member to - EFAULT .
*/
ret = copy_to_user ( user_fence_rep , & fence_rep ,
sizeof ( fence_rep ) ) ;
/*
* User - space lost the fence object . We need to sync
* and unreference the handle .
*/
if ( unlikely ( ret ! = 0 ) & & ( fence_rep . error = = 0 ) ) {
ttm_ref_object_base_unref ( vmw_fp - > tfile ,
fence_handle , TTM_REF_USAGE ) ;
DRM_ERROR ( " Fence copy error. Syncing. \n " ) ;
( void ) vmw_fence_obj_wait ( fence , fence - > signal_mask ,
false , false ,
VMW_FENCE_WAIT_TIMEOUT ) ;
}
}
2011-10-04 20:13:17 +02:00
int vmw_execbuf_process ( struct drm_file * file_priv ,
struct vmw_private * dev_priv ,
void __user * user_commands ,
void * kernel_commands ,
uint32_t command_size ,
uint64_t throttle_us ,
2012-02-09 16:56:43 +01:00
struct drm_vmw_fence_rep __user * user_fence_rep ,
struct vmw_fence_obj * * out_fence )
2009-12-10 00:19:58 +00:00
{
struct vmw_sw_context * sw_context = & dev_priv - > ctx ;
2012-02-09 16:56:43 +01:00
struct vmw_fence_obj * fence = NULL ;
2012-11-20 12:19:35 +00:00
struct vmw_resource * error_resource ;
struct list_head resource_list ;
2013-06-27 13:48:17 +02:00
struct ww_acquire_ctx ticket ;
2011-09-01 20:18:44 +00:00
uint32_t handle ;
2011-10-04 20:13:17 +02:00
void * cmd ;
int ret ;
2009-12-10 00:19:58 +00:00
2011-10-04 20:13:17 +02:00
ret = mutex_lock_interruptible ( & dev_priv - > cmdbuf_mutex ) ;
2009-12-10 00:19:58 +00:00
if ( unlikely ( ret ! = 0 ) )
2011-10-04 20:13:17 +02:00
return - ERESTARTSYS ;
2009-12-10 00:19:58 +00:00
2011-10-04 20:13:17 +02:00
if ( kernel_commands = = NULL ) {
sw_context - > kernel = false ;
2009-12-10 00:19:58 +00:00
2011-10-04 20:13:17 +02:00
ret = vmw_resize_cmd_bounce ( sw_context , command_size ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_unlock ;
2009-12-10 00:19:58 +00:00
2011-10-04 20:13:17 +02:00
ret = copy_from_user ( sw_context - > cmd_bounce ,
user_commands , command_size ) ;
if ( unlikely ( ret ! = 0 ) ) {
ret = - EFAULT ;
DRM_ERROR ( " Failed copying commands. \n " ) ;
goto out_unlock ;
}
kernel_commands = sw_context - > cmd_bounce ;
} else
sw_context - > kernel = true ;
2009-12-10 00:19:58 +00:00
sw_context - > tfile = vmw_fpriv ( file_priv ) - > tfile ;
sw_context - > cur_reloc = 0 ;
sw_context - > cur_val_buf = 0 ;
2011-10-04 20:13:30 +02:00
sw_context - > fence_flags = 0 ;
2011-10-04 20:13:31 +02:00
INIT_LIST_HEAD ( & sw_context - > resource_list ) ;
2011-10-04 20:13:30 +02:00
sw_context - > cur_query_bo = dev_priv - > pinned_bo ;
2012-11-20 12:19:35 +00:00
sw_context - > last_query_ctx = NULL ;
sw_context - > needs_post_query_barrier = false ;
memset ( sw_context - > res_cache , 0 , sizeof ( sw_context - > res_cache ) ) ;
2009-12-10 00:19:58 +00:00
INIT_LIST_HEAD ( & sw_context - > validate_nodes ) ;
2012-11-20 12:19:35 +00:00
INIT_LIST_HEAD ( & sw_context - > res_relocations ) ;
if ( ! sw_context - > res_ht_initialized ) {
ret = drm_ht_create ( & sw_context - > res_ht , VMW_RES_HT_ORDER ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_unlock ;
sw_context - > res_ht_initialized = true ;
}
2009-12-10 00:19:58 +00:00
2012-11-20 12:19:35 +00:00
INIT_LIST_HEAD ( & resource_list ) ;
2011-10-04 20:13:17 +02:00
ret = vmw_cmd_check_all ( dev_priv , sw_context , kernel_commands ,
command_size ) ;
2009-12-10 00:19:58 +00:00
if ( unlikely ( ret ! = 0 ) )
goto out_err ;
2011-08-31 07:42:54 +00:00
2012-11-20 12:19:35 +00:00
ret = vmw_resources_reserve ( sw_context ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_err ;
2013-06-27 13:48:17 +02:00
ret = ttm_eu_reserve_buffers ( & ticket , & sw_context - > validate_nodes ) ;
2009-12-10 00:19:58 +00:00
if ( unlikely ( ret ! = 0 ) )
goto out_err ;
ret = vmw_validate_buffers ( dev_priv , sw_context ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_err ;
2012-11-20 12:19:35 +00:00
ret = vmw_resources_validate ( sw_context ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_err ;
2010-05-28 11:21:57 +02:00
2011-10-04 20:13:17 +02:00
if ( throttle_us ) {
2011-09-01 20:18:42 +00:00
ret = vmw_wait_lag ( dev_priv , & dev_priv - > fifo . marker_queue ,
2011-10-04 20:13:17 +02:00
throttle_us ) ;
2010-05-28 11:21:57 +02:00
if ( unlikely ( ret ! = 0 ) )
2012-11-20 12:19:35 +00:00
goto out_err ;
2011-08-31 07:42:54 +00:00
}
2011-10-04 20:13:17 +02:00
cmd = vmw_fifo_reserve ( dev_priv , command_size ) ;
2011-08-31 07:42:54 +00:00
if ( unlikely ( cmd = = NULL ) ) {
DRM_ERROR ( " Failed reserving fifo space for commands. \n " ) ;
ret = - ENOMEM ;
2012-11-20 12:19:35 +00:00
goto out_err ;
2010-05-28 11:21:57 +02:00
}
2012-11-20 12:19:35 +00:00
vmw_apply_relocations ( sw_context ) ;
2011-10-04 20:13:17 +02:00
memcpy ( cmd , kernel_commands , command_size ) ;
2012-11-20 12:19:35 +00:00
vmw_resource_relocations_apply ( cmd , & sw_context - > res_relocations ) ;
vmw_resource_relocations_free ( & sw_context - > res_relocations ) ;
2011-10-04 20:13:17 +02:00
vmw_fifo_commit ( dev_priv , command_size ) ;
2009-12-10 00:19:58 +00:00
2011-10-04 20:13:30 +02:00
vmw_query_bo_switch_commit ( dev_priv , sw_context ) ;
2011-09-01 20:18:44 +00:00
ret = vmw_execbuf_fence_commands ( file_priv , dev_priv ,
& fence ,
( user_fence_rep ) ? & handle : NULL ) ;
2009-12-10 00:19:58 +00:00
/*
* This error is harmless , because if fence submission fails ,
2011-09-01 20:18:44 +00:00
* vmw_fifo_send_fence will sync . The error will be propagated to
* user - space in @ fence_rep
2009-12-10 00:19:58 +00:00
*/
if ( ret ! = 0 )
DRM_ERROR ( " Fence submission error. Syncing. \n " ) ;
2012-11-20 12:19:35 +00:00
vmw_resource_list_unreserve ( & sw_context - > resource_list , false ) ;
2013-06-27 13:48:17 +02:00
ttm_eu_fence_buffer_objects ( & ticket , & sw_context - > validate_nodes ,
2011-09-01 20:18:44 +00:00
( void * ) fence ) ;
2009-12-10 00:19:58 +00:00
2012-11-20 12:19:35 +00:00
if ( unlikely ( dev_priv - > pinned_bo ! = NULL & &
! dev_priv - > query_cid_valid ) )
__vmw_execbuf_release_pinned_bo ( dev_priv , fence ) ;
2011-09-01 20:18:44 +00:00
vmw_clear_validations ( sw_context ) ;
2011-10-10 12:23:25 +02:00
vmw_execbuf_copy_fence_user ( dev_priv , vmw_fpriv ( file_priv ) , ret ,
user_fence_rep , fence , handle ) ;
2009-12-10 00:19:58 +00:00
2012-02-09 16:56:43 +01:00
/* Don't unreference when handing fence out */
if ( unlikely ( out_fence ! = NULL ) ) {
* out_fence = fence ;
fence = NULL ;
} else if ( likely ( fence ! = NULL ) ) {
2011-09-01 20:18:44 +00:00
vmw_fence_obj_unreference ( & fence ) ;
2012-02-09 16:56:43 +01:00
}
2009-12-10 00:19:58 +00:00
2012-11-20 12:19:35 +00:00
list_splice_init ( & sw_context - > resource_list , & resource_list ) ;
2011-10-04 20:13:17 +02:00
mutex_unlock ( & dev_priv - > cmdbuf_mutex ) ;
2012-11-20 12:19:35 +00:00
/*
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths .
*/
vmw_resource_list_unreference ( & resource_list ) ;
2009-12-10 00:19:58 +00:00
return 0 ;
2011-10-04 20:13:17 +02:00
2009-12-10 00:19:58 +00:00
out_err :
2012-11-20 12:19:35 +00:00
vmw_resource_relocations_free ( & sw_context - > res_relocations ) ;
2009-12-10 00:19:58 +00:00
vmw_free_relocations ( sw_context ) ;
2013-06-27 13:48:17 +02:00
ttm_eu_backoff_reservation ( & ticket , & sw_context - > validate_nodes ) ;
2012-11-20 12:19:35 +00:00
vmw_resource_list_unreserve ( & sw_context - > resource_list , true ) ;
2009-12-10 00:19:58 +00:00
vmw_clear_validations ( sw_context ) ;
2012-11-20 12:19:35 +00:00
if ( unlikely ( dev_priv - > pinned_bo ! = NULL & &
! dev_priv - > query_cid_valid ) )
__vmw_execbuf_release_pinned_bo ( dev_priv , NULL ) ;
2009-12-10 00:19:58 +00:00
out_unlock :
2012-11-20 12:19:35 +00:00
list_splice_init ( & sw_context - > resource_list , & resource_list ) ;
error_resource = sw_context - > error_resource ;
sw_context - > error_resource = NULL ;
2009-12-10 00:19:58 +00:00
mutex_unlock ( & dev_priv - > cmdbuf_mutex ) ;
2012-11-20 12:19:35 +00:00
/*
* Unreference resources outside of the cmdbuf_mutex to
* avoid deadlocks in resource destruction paths .
*/
vmw_resource_list_unreference ( & resource_list ) ;
if ( unlikely ( error_resource ! = NULL ) )
vmw_resource_unreference ( & error_resource ) ;
2011-10-04 20:13:17 +02:00
return ret ;
}
2011-10-04 20:13:30 +02:00
/**
* vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer .
*
* @ dev_priv : The device private structure .
*
* This function is called to idle the fifo and unpin the query buffer
* if the normal way to do this hits an error , which should typically be
* extremely rare .
*/
static void vmw_execbuf_unpin_panic ( struct vmw_private * dev_priv )
{
DRM_ERROR ( " Can't unpin query buffer. Trying to recover. \n " ) ;
( void ) vmw_fallback_wait ( dev_priv , false , true , 0 , false , 10 * HZ ) ;
vmw_bo_pin ( dev_priv - > pinned_bo , false ) ;
vmw_bo_pin ( dev_priv - > dummy_query_bo , false ) ;
dev_priv - > dummy_query_bo_pinned = false ;
}
/**
2012-11-20 12:19:35 +00:00
* __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2011-10-04 20:13:30 +02:00
* query bo .
*
* @ dev_priv : The device private structure .
2012-11-20 12:19:35 +00:00
* @ fence : If non - NULL should point to a struct vmw_fence_obj issued
* _after_ a query barrier that flushes all queries touching the current
* buffer pointed to by @ dev_priv - > pinned_bo
2011-10-04 20:13:30 +02:00
*
* This function should be used to unpin the pinned query bo , or
* as a query barrier when we need to make sure that all queries have
* finished before the next fifo command . ( For example on hardware
* context destructions where the hardware may otherwise leak unfinished
* queries ) .
*
* This function does not return any failure codes , but make attempts
* to do safe unpinning in case of errors .
*
* The function will synchronize on the previous query barrier , and will
* thus not finish until that barrier has executed .
2012-11-20 12:19:35 +00:00
*
* the @ dev_priv - > cmdbuf_mutex needs to be held by the current thread
* before calling this function .
2011-10-04 20:13:30 +02:00
*/
2012-11-20 12:19:35 +00:00
void __vmw_execbuf_release_pinned_bo ( struct vmw_private * dev_priv ,
struct vmw_fence_obj * fence )
2011-10-04 20:13:30 +02:00
{
int ret = 0 ;
struct list_head validate_list ;
struct ttm_validate_buffer pinned_val , query_val ;
2012-11-20 12:19:35 +00:00
struct vmw_fence_obj * lfence = NULL ;
2013-06-27 13:48:17 +02:00
struct ww_acquire_ctx ticket ;
2011-10-04 20:13:30 +02:00
if ( dev_priv - > pinned_bo = = NULL )
goto out_unlock ;
INIT_LIST_HEAD ( & validate_list ) ;
pinned_val . bo = ttm_bo_reference ( dev_priv - > pinned_bo ) ;
list_add_tail ( & pinned_val . head , & validate_list ) ;
query_val . bo = ttm_bo_reference ( dev_priv - > dummy_query_bo ) ;
list_add_tail ( & query_val . head , & validate_list ) ;
do {
2013-06-27 13:48:17 +02:00
ret = ttm_eu_reserve_buffers ( & ticket , & validate_list ) ;
2011-10-04 20:13:30 +02:00
} while ( ret = = - ERESTARTSYS ) ;
if ( unlikely ( ret ! = 0 ) ) {
vmw_execbuf_unpin_panic ( dev_priv ) ;
goto out_no_reserve ;
}
2012-11-20 12:19:35 +00:00
if ( dev_priv - > query_cid_valid ) {
BUG_ON ( fence ! = NULL ) ;
ret = vmw_fifo_emit_dummy_query ( dev_priv , dev_priv - > query_cid ) ;
if ( unlikely ( ret ! = 0 ) ) {
vmw_execbuf_unpin_panic ( dev_priv ) ;
goto out_no_emit ;
}
dev_priv - > query_cid_valid = false ;
2011-10-04 20:13:30 +02:00
}
vmw_bo_pin ( dev_priv - > pinned_bo , false ) ;
vmw_bo_pin ( dev_priv - > dummy_query_bo , false ) ;
dev_priv - > dummy_query_bo_pinned = false ;
2012-11-20 12:19:35 +00:00
if ( fence = = NULL ) {
( void ) vmw_execbuf_fence_commands ( NULL , dev_priv , & lfence ,
NULL ) ;
fence = lfence ;
}
2013-06-27 13:48:17 +02:00
ttm_eu_fence_buffer_objects ( & ticket , & validate_list , ( void * ) fence ) ;
2012-11-20 12:19:35 +00:00
if ( lfence ! = NULL )
vmw_fence_obj_unreference ( & lfence ) ;
2011-10-04 20:13:30 +02:00
ttm_bo_unref ( & query_val . bo ) ;
ttm_bo_unref ( & pinned_val . bo ) ;
ttm_bo_unref ( & dev_priv - > pinned_bo ) ;
out_unlock :
return ;
out_no_emit :
2013-06-27 13:48:17 +02:00
ttm_eu_backoff_reservation ( & ticket , & validate_list ) ;
2011-10-04 20:13:30 +02:00
out_no_reserve :
ttm_bo_unref ( & query_val . bo ) ;
ttm_bo_unref ( & pinned_val . bo ) ;
ttm_bo_unref ( & dev_priv - > pinned_bo ) ;
2012-11-20 12:19:35 +00:00
}
/**
* vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
* query bo .
*
* @ dev_priv : The device private structure .
*
* This function should be used to unpin the pinned query bo , or
* as a query barrier when we need to make sure that all queries have
* finished before the next fifo command . ( For example on hardware
* context destructions where the hardware may otherwise leak unfinished
* queries ) .
*
* This function does not return any failure codes , but make attempts
* to do safe unpinning in case of errors .
*
* The function will synchronize on the previous query barrier , and will
* thus not finish until that barrier has executed .
*/
void vmw_execbuf_release_pinned_bo ( struct vmw_private * dev_priv )
{
mutex_lock ( & dev_priv - > cmdbuf_mutex ) ;
if ( dev_priv - > query_cid_valid )
__vmw_execbuf_release_pinned_bo ( dev_priv , NULL ) ;
2011-10-04 20:13:30 +02:00
mutex_unlock ( & dev_priv - > cmdbuf_mutex ) ;
}
2011-10-04 20:13:17 +02:00
int vmw_execbuf_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct vmw_private * dev_priv = vmw_priv ( dev ) ;
struct drm_vmw_execbuf_arg * arg = ( struct drm_vmw_execbuf_arg * ) data ;
struct vmw_master * vmaster = vmw_master ( file_priv - > master ) ;
int ret ;
/*
* This will allow us to extend the ioctl argument while
* maintaining backwards compatibility :
* We take different code paths depending on the value of
* arg - > version .
*/
if ( unlikely ( arg - > version ! = DRM_VMW_EXECBUF_VERSION ) ) {
DRM_ERROR ( " Incorrect execbuf version. \n " ) ;
DRM_ERROR ( " You're running outdated experimental "
" vmwgfx user-space drivers. " ) ;
return - EINVAL ;
}
ret = ttm_read_lock ( & vmaster - > lock , true ) ;
if ( unlikely ( ret ! = 0 ) )
return ret ;
ret = vmw_execbuf_process ( file_priv , dev_priv ,
( void __user * ) ( unsigned long ) arg - > commands ,
NULL , arg - > command_size , arg - > throttle_us ,
2012-02-09 16:56:43 +01:00
( void __user * ) ( unsigned long ) arg - > fence_rep ,
NULL ) ;
2011-10-04 20:13:17 +02:00
if ( unlikely ( ret ! = 0 ) )
goto out_unlock ;
vmw_kms_cursor_post_execbuf ( dev_priv ) ;
out_unlock :
2009-12-10 00:19:58 +00:00
ttm_read_unlock ( & vmaster - > lock ) ;
return ret ;
}