2013-07-19 12:59:32 -04:00
/*
* Copyright ( C ) 2013 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# ifndef __MSM_GPU_H__
# define __MSM_GPU_H__
# include <linux/clk.h>
2019-02-12 11:52:38 +02:00
# include <linux/interconnect.h>
2013-07-19 12:59:32 -04:00
# include <linux/regulator/consumer.h>
# include "msm_drv.h"
2016-03-15 17:22:13 -04:00
# include "msm_fence.h"
2013-07-19 12:59:32 -04:00
# include "msm_ringbuffer.h"
struct msm_gem_submit ;
2014-05-30 14:49:43 -04:00
struct msm_gpu_perfcntr ;
2018-07-24 10:33:24 -06:00
struct msm_gpu_state ;
2013-07-19 12:59:32 -04:00
2017-05-08 14:35:03 -06:00
struct msm_gpu_config {
const char * ioname ;
uint64_t va_start ;
uint64_t va_end ;
2017-10-20 11:06:57 -06:00
unsigned int nr_rings ;
2017-05-08 14:35:03 -06:00
} ;
2013-07-19 12:59:32 -04:00
/* So far, with hardware that I've seen to date, we can have:
* + zero , one , or two z180 2 d cores
* + a3xx or a2xx 3 d core , which share a common CP ( the firmware
* for the CP seems to implement some different PM4 packet types
* but the basics of cmdstream submission are the same )
*
* Which means that the eventual complete " class " hierarchy , once
* support for all past and present hw is in place , becomes :
* + msm_gpu
* + adreno_gpu
* + a3xx_gpu
* + a2xx_gpu
* + z180_gpu
*/
struct msm_gpu_funcs {
int ( * get_param ) ( struct msm_gpu * gpu , uint32_t param , uint64_t * value ) ;
int ( * hw_init ) ( struct msm_gpu * gpu ) ;
int ( * pm_suspend ) ( struct msm_gpu * gpu ) ;
int ( * pm_resume ) ( struct msm_gpu * gpu ) ;
2016-05-03 09:46:49 -04:00
void ( * submit ) ( struct msm_gpu * gpu , struct msm_gem_submit * submit ,
2013-07-19 12:59:32 -04:00
struct msm_file_private * ctx ) ;
2017-10-20 11:06:57 -06:00
void ( * flush ) ( struct msm_gpu * gpu , struct msm_ringbuffer * ring ) ;
2013-07-19 12:59:32 -04:00
irqreturn_t ( * irq ) ( struct msm_gpu * irq ) ;
2017-10-20 11:06:57 -06:00
struct msm_ringbuffer * ( * active_ring ) ( struct msm_gpu * gpu ) ;
2013-08-24 14:20:38 -04:00
void ( * recover ) ( struct msm_gpu * gpu ) ;
2013-07-19 12:59:32 -04:00
void ( * destroy ) ( struct msm_gpu * gpu ) ;
2018-08-13 23:23:44 +02:00
# if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
2013-07-19 12:59:32 -04:00
/* show GPU status in debugfs: */
2018-07-24 10:33:25 -06:00
void ( * show ) ( struct msm_gpu * gpu , struct msm_gpu_state * state ,
2018-07-24 10:33:27 -06:00
struct drm_printer * p ) ;
2017-12-13 15:12:56 -05:00
/* for generation specific debugfs: */
int ( * debugfs_init ) ( struct msm_gpu * gpu , struct drm_minor * minor ) ;
2013-07-19 12:59:32 -04:00
# endif
2018-10-04 15:11:42 +05:30
unsigned long ( * gpu_busy ) ( struct msm_gpu * gpu ) ;
2018-07-24 10:33:24 -06:00
struct msm_gpu_state * ( * gpu_state_get ) ( struct msm_gpu * gpu ) ;
2018-07-24 10:33:27 -06:00
int ( * gpu_state_put ) ( struct msm_gpu_state * state ) ;
2018-10-04 15:11:42 +05:30
unsigned long ( * gpu_get_freq ) ( struct msm_gpu * gpu ) ;
void ( * gpu_set_freq ) ( struct msm_gpu * gpu , unsigned long freq ) ;
2013-07-19 12:59:32 -04:00
} ;
struct msm_gpu {
const char * name ;
struct drm_device * dev ;
2017-02-10 15:36:33 -05:00
struct platform_device * pdev ;
2013-07-19 12:59:32 -04:00
const struct msm_gpu_funcs * funcs ;
2014-05-30 14:49:43 -04:00
/* performance counters (hw & sw): */
spinlock_t perf_lock ;
bool perfcntr_active ;
struct {
bool active ;
ktime_t time ;
} last_sample ;
uint32_t totaltime , activetime ; /* sw counters */
uint32_t last_cntrs [ 5 ] ; /* hw counters */
const struct msm_gpu_perfcntr * perfcntrs ;
uint32_t num_perfcntrs ;
2017-10-20 11:06:57 -06:00
struct msm_ringbuffer * rb [ MSM_GPU_MAX_RINGS ] ;
int nr_rings ;
2013-07-19 12:59:32 -04:00
/* list of GEM active objects: */
struct list_head active_list ;
2017-02-10 15:36:33 -05:00
/* does gpu need hw_init? */
bool needs_hw_init ;
2014-01-11 16:25:08 -05:00
2013-07-19 12:59:32 -04:00
/* worker for handling active-list retiring: */
struct work_struct retire_work ;
void __iomem * mmio ;
int irq ;
2016-09-28 19:58:32 -04:00
struct msm_gem_address_space * aspace ;
2013-07-19 12:59:32 -04:00
/* Power Control: */
struct regulator * gpu_reg , * gpu_cx ;
2018-08-06 11:33:21 -06:00
struct clk_bulk_data * grp_clks ;
2017-03-07 10:02:56 -07:00
int nr_clocks ;
struct clk * ebi1_clk , * core_clk , * rbbmtimer_clk ;
2017-11-21 12:40:53 -07:00
uint32_t fast_rate ;
2013-08-24 14:20:38 -04:00
2019-02-12 11:52:38 +02:00
struct icc_path * icc_path ;
2014-01-11 16:25:08 -05:00
/* Hang and Inactivity Detection:
*/
# define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
2017-02-10 15:36:33 -05:00
2013-08-24 14:20:38 -04:00
# define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
# define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer ;
struct work_struct recover_work ;
2015-06-07 13:46:04 -04:00
2017-10-20 11:06:56 -06:00
struct drm_gem_object * memptrs_bo ;
2018-01-10 10:41:54 -07:00
struct {
struct devfreq * devfreq ;
u64 busy_cycles ;
ktime_t time ;
} devfreq ;
2018-07-24 10:33:27 -06:00
struct msm_gpu_state * crashstate ;
2013-07-19 12:59:32 -04:00
} ;
2017-10-20 11:06:57 -06:00
/* It turns out that all targets use the same ringbuffer size */
# define MSM_GPU_RINGBUFFER_SZ SZ_32K
2017-10-20 11:07:00 -06:00
# define MSM_GPU_RINGBUFFER_BLKSIZE 32
# define MSM_GPU_RB_CNTL_DEFAULT \
( AXXX_CP_RB_CNTL_BUFSZ ( ilog2 ( MSM_GPU_RINGBUFFER_SZ / 8 ) ) | \
AXXX_CP_RB_CNTL_BLKSZ ( ilog2 ( MSM_GPU_RINGBUFFER_BLKSIZE / 8 ) ) )
2017-10-20 11:06:57 -06:00
2014-01-11 16:25:08 -05:00
static inline bool msm_gpu_active ( struct msm_gpu * gpu )
{
2017-10-20 11:06:57 -06:00
int i ;
for ( i = 0 ; i < gpu - > nr_rings ; i + + ) {
struct msm_ringbuffer * ring = gpu - > rb [ i ] ;
if ( ring - > seqno > ring - > memptrs - > fence )
return true ;
}
return false ;
2014-01-11 16:25:08 -05:00
}
2014-05-30 14:49:43 -04:00
/* Perf-Counters:
* The select_reg and select_val are just there for the benefit of the child
* class that actually enables the perf counter . . but msm_gpu base class
* will handle sampling / displaying the counters .
*/
struct msm_gpu_perfcntr {
uint32_t select_reg ;
uint32_t sample_reg ;
uint32_t select_val ;
const char * name ;
} ;
2017-10-20 11:06:55 -06:00
struct msm_gpu_submitqueue {
int id ;
u32 flags ;
u32 prio ;
int faults ;
struct list_head node ;
struct kref ref ;
} ;
2018-07-24 10:33:31 -06:00
struct msm_gpu_state_bo {
u64 iova ;
size_t size ;
void * data ;
2018-11-01 20:16:45 +05:30
bool encoded ;
2018-07-24 10:33:31 -06:00
} ;
2018-07-24 10:33:24 -06:00
struct msm_gpu_state {
2018-07-24 10:33:27 -06:00
struct kref ref ;
2018-07-26 14:39:25 +02:00
struct timespec64 time ;
2018-07-24 10:33:24 -06:00
struct {
u64 iova ;
u32 fence ;
u32 seqno ;
u32 rptr ;
u32 wptr ;
2018-07-24 10:33:29 -06:00
void * data ;
int data_size ;
2018-11-01 20:16:45 +05:30
bool encoded ;
2018-07-24 10:33:24 -06:00
} ring [ MSM_GPU_MAX_RINGS ] ;
int nr_registers ;
u32 * registers ;
u32 rbbm_status ;
2018-07-24 10:33:27 -06:00
char * comm ;
char * cmd ;
2018-07-24 10:33:31 -06:00
int nr_bos ;
struct msm_gpu_state_bo * bos ;
2018-07-24 10:33:24 -06:00
} ;
2013-07-19 12:59:32 -04:00
static inline void gpu_write ( struct msm_gpu * gpu , u32 reg , u32 data )
{
msm_writel ( data , gpu - > mmio + ( reg < < 2 ) ) ;
}
static inline u32 gpu_read ( struct msm_gpu * gpu , u32 reg )
{
return msm_readl ( gpu - > mmio + ( reg < < 2 ) ) ;
}
2016-11-28 12:28:28 -07:00
static inline void gpu_rmw ( struct msm_gpu * gpu , u32 reg , u32 mask , u32 or )
{
uint32_t val = gpu_read ( gpu , reg ) ;
val & = ~ mask ;
gpu_write ( gpu , reg , val | or ) ;
}
static inline u64 gpu_read64 ( struct msm_gpu * gpu , u32 lo , u32 hi )
{
u64 val ;
/*
* Why not a readq here ? Two reasons : 1 ) many of the LO registers are
* not quad word aligned and 2 ) the GPU hardware designers have a bit
* of a history of putting registers where they fit , especially in
* spins . The longer a GPU family goes the higher the chance that
* we ' ll get burned . We could do a series of validity checks if we
* wanted to , but really is a readq ( ) that much better ? Nah .
*/
/*
* For some lo / hi registers ( like perfcounters ) , the hi value is latched
* when the lo is read , so make sure to read the lo first to trigger
* that
*/
val = ( u64 ) msm_readl ( gpu - > mmio + ( lo < < 2 ) ) ;
val | = ( ( u64 ) msm_readl ( gpu - > mmio + ( hi < < 2 ) ) < < 32 ) ;
return val ;
}
static inline void gpu_write64 ( struct msm_gpu * gpu , u32 lo , u32 hi , u64 val )
{
/* Why not a writeq here? Read the screed above */
msm_writel ( lower_32_bits ( val ) , gpu - > mmio + ( lo < < 2 ) ) ;
msm_writel ( upper_32_bits ( val ) , gpu - > mmio + ( hi < < 2 ) ) ;
}
2013-07-19 12:59:32 -04:00
int msm_gpu_pm_suspend ( struct msm_gpu * gpu ) ;
int msm_gpu_pm_resume ( struct msm_gpu * gpu ) ;
2018-10-04 15:11:42 +05:30
void msm_gpu_resume_devfreq ( struct msm_gpu * gpu ) ;
2013-07-19 12:59:32 -04:00
2017-02-10 15:36:33 -05:00
int msm_gpu_hw_init ( struct msm_gpu * gpu ) ;
2014-05-30 14:49:43 -04:00
void msm_gpu_perfcntr_start ( struct msm_gpu * gpu ) ;
void msm_gpu_perfcntr_stop ( struct msm_gpu * gpu ) ;
int msm_gpu_perfcntr_sample ( struct msm_gpu * gpu , uint32_t * activetime ,
uint32_t * totaltime , uint32_t ncntrs , uint32_t * cntrs ) ;
2013-07-19 12:59:32 -04:00
void msm_gpu_retire ( struct msm_gpu * gpu ) ;
2016-06-16 16:37:38 -04:00
void msm_gpu_submit ( struct msm_gpu * gpu , struct msm_gem_submit * submit ,
2013-07-19 12:59:32 -04:00
struct msm_file_private * ctx ) ;
int msm_gpu_init ( struct drm_device * drm , struct platform_device * pdev ,
struct msm_gpu * gpu , const struct msm_gpu_funcs * funcs ,
2017-05-08 14:35:03 -06:00
const char * name , struct msm_gpu_config * config ) ;
2013-07-19 12:59:32 -04:00
void msm_gpu_cleanup ( struct msm_gpu * gpu ) ;
2014-09-05 13:30:27 -04:00
struct msm_gpu * adreno_load_gpu ( struct drm_device * dev ) ;
2014-09-05 13:06:37 -04:00
void __init adreno_register ( void ) ;
void __exit adreno_unregister ( void ) ;
2013-07-19 12:59:32 -04:00
2017-10-20 11:06:55 -06:00
static inline void msm_submitqueue_put ( struct msm_gpu_submitqueue * queue )
{
if ( queue )
kref_put ( & queue - > ref , msm_submitqueue_destroy ) ;
}
2018-07-24 10:33:27 -06:00
static inline struct msm_gpu_state * msm_gpu_crashstate_get ( struct msm_gpu * gpu )
{
struct msm_gpu_state * state = NULL ;
mutex_lock ( & gpu - > dev - > struct_mutex ) ;
if ( gpu - > crashstate ) {
kref_get ( & gpu - > crashstate - > ref ) ;
state = gpu - > crashstate ;
}
mutex_unlock ( & gpu - > dev - > struct_mutex ) ;
return state ;
}
static inline void msm_gpu_crashstate_put ( struct msm_gpu * gpu )
{
mutex_lock ( & gpu - > dev - > struct_mutex ) ;
if ( gpu - > crashstate ) {
if ( gpu - > funcs - > gpu_state_put ( gpu - > crashstate ) )
gpu - > crashstate = NULL ;
}
mutex_unlock ( & gpu - > dev - > struct_mutex ) ;
}
2013-07-19 12:59:32 -04:00
# endif /* __MSM_GPU_H__ */