2018-07-18 16:34:49 +02:00
/*
* Copyright 2018 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
# ifndef __AMDGPU_CTX_H__
# define __AMDGPU_CTX_H__
# include "amdgpu_ring.h"
struct drm_device ;
struct drm_file ;
struct amdgpu_fpriv ;
2020-01-21 15:53:53 +01:00
# define AMDGPU_MAX_ENTITY_NUM 4
2018-08-01 16:00:52 +02:00
struct amdgpu_ctx_entity {
2018-07-18 16:34:49 +02:00
uint64_t sequence ;
struct drm_sched_entity entity ;
2020-01-21 15:53:53 +01:00
struct dma_fence * fences [ ] ;
2018-07-18 16:34:49 +02:00
} ;
struct amdgpu_ctx {
2018-08-01 16:00:52 +02:00
struct kref refcount ;
struct amdgpu_device * adev ;
unsigned reset_counter ;
unsigned reset_counter_query ;
uint32_t vram_lost_counter ;
spinlock_t ring_lock ;
2020-01-21 15:53:53 +01:00
struct amdgpu_ctx_entity * entities [ AMDGPU_HW_IP_NUM ] [ AMDGPU_MAX_ENTITY_NUM ] ;
2018-08-01 16:00:52 +02:00
bool preamble_presented ;
enum drm_sched_priority init_priority ;
enum drm_sched_priority override_priority ;
struct mutex lock ;
atomic_t guilty ;
2019-08-16 15:06:52 +08:00
unsigned long ras_counter_ce ;
unsigned long ras_counter_ue ;
2018-07-18 16:34:49 +02:00
} ;
struct amdgpu_ctx_mgr {
struct amdgpu_device * adev ;
struct mutex lock ;
/* protected by lock */
struct idr ctx_handles ;
} ;
2018-08-01 16:00:52 +02:00
extern const unsigned int amdgpu_ctx_num_entities [ AMDGPU_HW_IP_NUM ] ;
2018-07-18 16:34:49 +02:00
struct amdgpu_ctx * amdgpu_ctx_get ( struct amdgpu_fpriv * fpriv , uint32_t id ) ;
int amdgpu_ctx_put ( struct amdgpu_ctx * ctx ) ;
2018-07-19 14:22:25 +02:00
int amdgpu_ctx_get_entity ( struct amdgpu_ctx * ctx , u32 hw_ip , u32 instance ,
u32 ring , struct drm_sched_entity * * entity ) ;
2018-08-24 14:23:33 +02:00
void amdgpu_ctx_add_fence ( struct amdgpu_ctx * ctx ,
struct drm_sched_entity * entity ,
struct dma_fence * fence , uint64_t * seq ) ;
2018-07-18 16:34:49 +02:00
struct dma_fence * amdgpu_ctx_get_fence ( struct amdgpu_ctx * ctx ,
2018-07-19 14:22:25 +02:00
struct drm_sched_entity * entity ,
uint64_t seq ) ;
2018-07-18 16:34:49 +02:00
void amdgpu_ctx_priority_override ( struct amdgpu_ctx * ctx ,
enum drm_sched_priority priority ) ;
int amdgpu_ctx_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp ) ;
2018-07-19 14:22:25 +02:00
int amdgpu_ctx_wait_prev_fence ( struct amdgpu_ctx * ctx ,
struct drm_sched_entity * entity ) ;
2018-07-18 16:34:49 +02:00
void amdgpu_ctx_mgr_init ( struct amdgpu_ctx_mgr * mgr ) ;
void amdgpu_ctx_mgr_entity_fini ( struct amdgpu_ctx_mgr * mgr ) ;
2019-01-10 16:48:23 +01:00
long amdgpu_ctx_mgr_entity_flush ( struct amdgpu_ctx_mgr * mgr , long timeout ) ;
2018-07-18 16:34:49 +02:00
void amdgpu_ctx_mgr_fini ( struct amdgpu_ctx_mgr * mgr ) ;
# endif