2015-04-20 16:55:21 -04:00
/*
* Copyright 2014 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
# ifndef __AMDGPU_VCE_H__
# define __AMDGPU_VCE_H__
2015-05-06 14:31:27 -04:00
int amdgpu_vce_sw_init ( struct amdgpu_device * adev , unsigned long size ) ;
2015-04-20 16:55:21 -04:00
int amdgpu_vce_sw_fini ( struct amdgpu_device * adev ) ;
int amdgpu_vce_suspend ( struct amdgpu_device * adev ) ;
int amdgpu_vce_resume ( struct amdgpu_device * adev ) ;
int amdgpu_vce_get_create_msg ( struct amdgpu_ring * ring , uint32_t handle ,
2016-10-25 13:00:45 +01:00
struct dma_fence * * fence ) ;
2015-04-20 16:55:21 -04:00
int amdgpu_vce_get_destroy_msg ( struct amdgpu_ring * ring , uint32_t handle ,
2016-10-25 13:00:45 +01:00
bool direct , struct dma_fence * * fence ) ;
2015-04-20 16:55:21 -04:00
void amdgpu_vce_free_handles ( struct amdgpu_device * adev , struct drm_file * filp ) ;
int amdgpu_vce_ring_parse_cs ( struct amdgpu_cs_parser * p , uint32_t ib_idx ) ;
2016-10-10 15:23:32 +02:00
int amdgpu_vce_ring_parse_cs_vm ( struct amdgpu_cs_parser * p , uint32_t ib_idx ) ;
2016-05-06 17:50:03 +02:00
void amdgpu_vce_ring_emit_ib ( struct amdgpu_ring * ring , struct amdgpu_ib * ib ,
unsigned vm_id , bool ctx_switch ) ;
2015-04-20 16:55:21 -04:00
void amdgpu_vce_ring_emit_fence ( struct amdgpu_ring * ring , u64 addr , u64 seq ,
2015-06-01 14:35:03 +08:00
unsigned flags ) ;
2015-04-20 16:55:21 -04:00
int amdgpu_vce_ring_test_ring ( struct amdgpu_ring * ring ) ;
2016-07-05 21:07:17 +02:00
int amdgpu_vce_ring_test_ib ( struct amdgpu_ring * ring , long timeout ) ;
2016-07-20 16:53:36 +02:00
void amdgpu_vce_ring_begin_use ( struct amdgpu_ring * ring ) ;
void amdgpu_vce_ring_end_use ( struct amdgpu_ring * ring ) ;
2016-09-16 11:01:26 -04:00
unsigned amdgpu_vce_ring_get_emit_ib_size ( struct amdgpu_ring * ring ) ;
unsigned amdgpu_vce_ring_get_dma_frame_size ( struct amdgpu_ring * ring ) ;
2015-04-20 16:55:21 -04:00
# endif