2018-05-08 16:20:54 +02:00
/* SPDX-License-Identifier: GPL-2.0 */
2015-12-03 18:21:29 +01:00
/*
2018-05-08 16:20:54 +02:00
* Copyright ( C ) 2015 - 2018 Etnaviv Project
2015-12-03 18:21:29 +01:00
*/
# ifndef __ETNAVIV_GPU_H__
# define __ETNAVIV_GPU_H__
2017-11-24 16:56:37 +01:00
# include "etnaviv_cmdbuf.h"
2015-12-03 18:21:29 +01:00
# include "etnaviv_drv.h"
struct etnaviv_gem_submit ;
2016-01-21 15:20:50 +00:00
struct etnaviv_vram_mapping ;
2015-12-03 18:21:29 +01:00
struct etnaviv_chip_identity {
/* Chip model. */
u32 model ;
/* Revision value.*/
u32 revision ;
/* Supported feature fields. */
u32 features ;
/* Supported minor feature fields. */
u32 minor_features0 ;
u32 minor_features1 ;
u32 minor_features2 ;
u32 minor_features3 ;
2016-01-24 17:36:04 +00:00
u32 minor_features4 ;
u32 minor_features5 ;
2018-01-22 15:56:11 +01:00
u32 minor_features6 ;
u32 minor_features7 ;
u32 minor_features8 ;
u32 minor_features9 ;
u32 minor_features10 ;
u32 minor_features11 ;
2016-01-24 17:36:04 +00:00
2015-12-03 18:21:29 +01:00
/* Number of streams supported. */
u32 stream_count ;
/* Total number of temporary registers per thread. */
u32 register_max ;
/* Maximum number of threads. */
u32 thread_count ;
/* Number of shader cores. */
u32 shader_core_count ;
/* Size of the vertex cache. */
u32 vertex_cache_size ;
/* Number of entries in the vertex output buffer. */
u32 vertex_output_buffer_size ;
/* Number of pixel pipes. */
u32 pixel_pipes ;
/* Number of instructions. */
u32 instruction_count ;
/* Number of constants. */
u32 num_constants ;
/* Buffer size */
u32 buffer_size ;
2016-01-24 17:36:04 +00:00
/* Number of varyings */
u8 varyings_count ;
2015-12-03 18:21:29 +01:00
} ;
2018-01-22 12:35:14 +01:00
enum etnaviv_sec_mode {
ETNA_SEC_NONE = 0 ,
ETNA_SEC_KERNEL ,
ETNA_SEC_TZ
} ;
2015-12-03 18:21:29 +01:00
struct etnaviv_event {
2016-10-25 13:00:45 +01:00
struct dma_fence * fence ;
2017-11-24 12:02:38 +01:00
struct etnaviv_gem_submit * submit ;
2017-09-24 15:15:28 +02:00
void ( * sync_point ) ( struct etnaviv_gpu * gpu , struct etnaviv_event * event ) ;
2015-12-03 18:21:29 +01:00
} ;
2017-01-16 17:29:57 +01:00
struct etnaviv_cmdbuf_suballoc ;
2015-12-03 18:21:29 +01:00
struct etnaviv_cmdbuf ;
2018-10-15 12:49:07 +02:00
struct regulator ;
struct clk ;
2015-12-03 18:21:29 +01:00
2017-09-24 15:15:19 +02:00
# define ETNA_NR_EVENTS 30
2015-12-03 18:21:29 +01:00
struct etnaviv_gpu {
struct drm_device * drm ;
2017-03-12 19:00:59 +00:00
struct thermal_cooling_device * cooling ;
2015-12-03 18:21:29 +01:00
struct device * dev ;
struct mutex lock ;
struct etnaviv_chip_identity identity ;
2018-01-22 12:35:14 +01:00
enum etnaviv_sec_mode sec_mode ;
2017-11-17 17:43:37 +01:00
struct workqueue_struct * wq ;
2017-12-04 18:41:58 +01:00
struct drm_gpu_scheduler sched ;
2015-12-03 18:21:29 +01:00
/* 'ring'-buffer: */
2017-11-24 16:56:37 +01:00
struct etnaviv_cmdbuf buffer ;
2016-01-21 15:20:19 +00:00
int exec_state ;
2015-12-03 18:21:29 +01:00
/* bus base address of memory */
u32 memory_base ;
/* event management: */
2017-09-24 15:15:19 +02:00
DECLARE_BITMAP ( event_bitmap , ETNA_NR_EVENTS ) ;
struct etnaviv_event event [ ETNA_NR_EVENTS ] ;
2015-12-03 18:21:29 +01:00
struct completion event_free ;
spinlock_t event_spinlock ;
u32 idle_mask ;
/* Fencing support */
2018-05-25 16:51:25 +02:00
struct mutex fence_lock ;
2017-11-29 14:49:04 +01:00
struct idr fence_idr ;
2015-12-03 18:21:29 +01:00
u32 next_fence ;
u32 completed_fence ;
wait_queue_head_t fence_event ;
2016-06-01 15:10:02 +02:00
u64 fence_context ;
2015-12-03 18:21:29 +01:00
spinlock_t fence_spinlock ;
2017-09-24 15:15:28 +02:00
/* worker for handling 'sync' points: */
struct work_struct sync_point_work ;
int sync_point_event ;
2018-06-27 15:58:13 +02:00
/* hang detection */
u32 hangcheck_dma_addr ;
2015-12-03 18:21:29 +01:00
void __iomem * mmio ;
int irq ;
struct etnaviv_iommu * mmu ;
2017-01-16 17:29:57 +01:00
struct etnaviv_cmdbuf_suballoc * cmdbuf_suballoc ;
2015-12-03 18:21:29 +01:00
/* Power Control: */
struct clk * clk_bus ;
2018-01-19 15:05:40 +01:00
struct clk * clk_reg ;
2015-12-03 18:21:29 +01:00
struct clk * clk_core ;
struct clk * clk_shader ;
2017-03-12 19:00:59 +00:00
unsigned int freq_scale ;
2017-04-11 15:54:50 +02:00
unsigned long base_rate_core ;
unsigned long base_rate_shader ;
2015-12-03 18:21:29 +01:00
} ;
static inline void gpu_write ( struct etnaviv_gpu * gpu , u32 reg , u32 data )
{
2018-04-19 15:55:40 +02:00
writel ( data , gpu - > mmio + reg ) ;
2015-12-03 18:21:29 +01:00
}
static inline u32 gpu_read ( struct etnaviv_gpu * gpu , u32 reg )
{
2018-04-19 15:55:40 +02:00
return readl ( gpu - > mmio + reg ) ;
2015-12-03 18:21:29 +01:00
}
int etnaviv_gpu_get_param ( struct etnaviv_gpu * gpu , u32 param , u64 * value ) ;
int etnaviv_gpu_init ( struct etnaviv_gpu * gpu ) ;
2018-01-22 15:57:59 +01:00
bool etnaviv_fill_identity_from_hwdb ( struct etnaviv_gpu * gpu ) ;
2015-12-03 18:21:29 +01:00
# ifdef CONFIG_DEBUG_FS
int etnaviv_gpu_debugfs ( struct etnaviv_gpu * gpu , struct seq_file * m ) ;
# endif
2017-12-06 10:53:27 +01:00
void etnaviv_gpu_recover_hang ( struct etnaviv_gpu * gpu ) ;
2015-12-03 18:21:29 +01:00
void etnaviv_gpu_retire ( struct etnaviv_gpu * gpu ) ;
int etnaviv_gpu_wait_fence_interruptible ( struct etnaviv_gpu * gpu ,
u32 fence , struct timespec * timeout ) ;
int etnaviv_gpu_wait_obj_inactive ( struct etnaviv_gpu * gpu ,
struct etnaviv_gem_object * etnaviv_obj , struct timespec * timeout ) ;
2017-12-04 18:41:58 +01:00
struct dma_fence * etnaviv_gpu_submit ( struct etnaviv_gem_submit * submit ) ;
2015-12-03 18:21:29 +01:00
int etnaviv_gpu_pm_get_sync ( struct etnaviv_gpu * gpu ) ;
void etnaviv_gpu_pm_put ( struct etnaviv_gpu * gpu ) ;
2016-08-17 15:16:57 +02:00
int etnaviv_gpu_wait_idle ( struct etnaviv_gpu * gpu , unsigned int timeout_ms ) ;
2016-08-17 15:27:52 +02:00
void etnaviv_gpu_start_fe ( struct etnaviv_gpu * gpu , u32 address , u16 prefetch ) ;
2015-12-03 18:21:29 +01:00
extern struct platform_driver etnaviv_gpu_driver ;
# endif /* __ETNAVIV_GPU_H__ */