2013-07-19 12:59:32 -04:00
/*
* Copyright ( C ) 2013 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# ifndef __MSM_GPU_H__
# define __MSM_GPU_H__
# include <linux/clk.h>
# include <linux/regulator/consumer.h>
# include "msm_drv.h"
2016-03-15 17:22:13 -04:00
# include "msm_fence.h"
2013-07-19 12:59:32 -04:00
# include "msm_ringbuffer.h"
struct msm_gem_submit ;
2014-05-30 14:49:43 -04:00
struct msm_gpu_perfcntr ;
2013-07-19 12:59:32 -04:00
2017-05-08 14:35:03 -06:00
struct msm_gpu_config {
const char * ioname ;
const char * irqname ;
uint64_t va_start ;
uint64_t va_end ;
unsigned int ringsz ;
} ;
2013-07-19 12:59:32 -04:00
/* So far, with hardware that I've seen to date, we can have:
* + zero , one , or two z180 2 d cores
* + a3xx or a2xx 3 d core , which share a common CP ( the firmware
* for the CP seems to implement some different PM4 packet types
* but the basics of cmdstream submission are the same )
*
* Which means that the eventual complete " class " hierarchy , once
* support for all past and present hw is in place , becomes :
* + msm_gpu
* + adreno_gpu
* + a3xx_gpu
* + a2xx_gpu
* + z180_gpu
*/
struct msm_gpu_funcs {
int ( * get_param ) ( struct msm_gpu * gpu , uint32_t param , uint64_t * value ) ;
int ( * hw_init ) ( struct msm_gpu * gpu ) ;
int ( * pm_suspend ) ( struct msm_gpu * gpu ) ;
int ( * pm_resume ) ( struct msm_gpu * gpu ) ;
2016-05-03 09:46:49 -04:00
void ( * submit ) ( struct msm_gpu * gpu , struct msm_gem_submit * submit ,
2013-07-19 12:59:32 -04:00
struct msm_file_private * ctx ) ;
void ( * flush ) ( struct msm_gpu * gpu ) ;
irqreturn_t ( * irq ) ( struct msm_gpu * irq ) ;
uint32_t ( * last_fence ) ( struct msm_gpu * gpu ) ;
2013-08-24 14:20:38 -04:00
void ( * recover ) ( struct msm_gpu * gpu ) ;
2013-07-19 12:59:32 -04:00
void ( * destroy ) ( struct msm_gpu * gpu ) ;
# ifdef CONFIG_DEBUG_FS
/* show GPU status in debugfs: */
void ( * show ) ( struct msm_gpu * gpu , struct seq_file * m ) ;
# endif
} ;
struct msm_gpu {
const char * name ;
struct drm_device * dev ;
2017-02-10 15:36:33 -05:00
struct platform_device * pdev ;
2013-07-19 12:59:32 -04:00
const struct msm_gpu_funcs * funcs ;
2014-05-30 14:49:43 -04:00
/* performance counters (hw & sw): */
spinlock_t perf_lock ;
bool perfcntr_active ;
struct {
bool active ;
ktime_t time ;
} last_sample ;
uint32_t totaltime , activetime ; /* sw counters */
uint32_t last_cntrs [ 5 ] ; /* hw counters */
const struct msm_gpu_perfcntr * perfcntrs ;
uint32_t num_perfcntrs ;
2016-03-15 17:22:13 -04:00
/* ringbuffer: */
2013-07-19 12:59:32 -04:00
struct msm_ringbuffer * rb ;
2016-11-11 12:06:46 -05:00
uint64_t rb_iova ;
2013-07-19 12:59:32 -04:00
/* list of GEM active objects: */
struct list_head active_list ;
2016-03-15 17:22:13 -04:00
/* fencing: */
struct msm_fence_context * fctx ;
2013-08-24 14:20:38 -04:00
2017-02-10 15:36:33 -05:00
/* does gpu need hw_init? */
bool needs_hw_init ;
2014-01-11 16:25:08 -05:00
2013-07-19 12:59:32 -04:00
/* worker for handling active-list retiring: */
struct work_struct retire_work ;
void __iomem * mmio ;
int irq ;
2016-09-28 19:58:32 -04:00
struct msm_gem_address_space * aspace ;
2013-07-19 12:59:32 -04:00
/* Power Control: */
struct regulator * gpu_reg , * gpu_cx ;
2017-03-07 10:02:56 -07:00
struct clk * * grp_clks ;
int nr_clocks ;
struct clk * ebi1_clk , * core_clk , * rbbmtimer_clk ;
2017-03-07 10:02:54 -07:00
uint32_t fast_rate , bus_freq ;
2013-11-15 09:03:15 -05:00
2015-06-04 10:26:37 -04:00
# ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
2013-11-15 09:03:15 -05:00
struct msm_bus_scale_pdata * bus_scale_table ;
2013-07-19 12:59:32 -04:00
uint32_t bsc ;
2013-11-15 09:03:15 -05:00
# endif
2013-08-24 14:20:38 -04:00
2014-01-11 16:25:08 -05:00
/* Hang and Inactivity Detection:
*/
# define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
2017-02-10 15:36:33 -05:00
2013-08-24 14:20:38 -04:00
# define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
# define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer ;
uint32_t hangcheck_fence ;
struct work_struct recover_work ;
2015-06-07 13:46:04 -04:00
struct list_head submit_list ;
2013-07-19 12:59:32 -04:00
} ;
2014-01-11 16:25:08 -05:00
static inline bool msm_gpu_active ( struct msm_gpu * gpu )
{
2016-03-15 17:22:13 -04:00
return gpu - > fctx - > last_fence > gpu - > funcs - > last_fence ( gpu ) ;
2014-01-11 16:25:08 -05:00
}
2014-05-30 14:49:43 -04:00
/* Perf-Counters:
* The select_reg and select_val are just there for the benefit of the child
* class that actually enables the perf counter . . but msm_gpu base class
* will handle sampling / displaying the counters .
*/
struct msm_gpu_perfcntr {
uint32_t select_reg ;
uint32_t sample_reg ;
uint32_t select_val ;
const char * name ;
} ;
2013-07-19 12:59:32 -04:00
static inline void gpu_write ( struct msm_gpu * gpu , u32 reg , u32 data )
{
msm_writel ( data , gpu - > mmio + ( reg < < 2 ) ) ;
}
static inline u32 gpu_read ( struct msm_gpu * gpu , u32 reg )
{
return msm_readl ( gpu - > mmio + ( reg < < 2 ) ) ;
}
2016-11-28 12:28:28 -07:00
static inline void gpu_rmw ( struct msm_gpu * gpu , u32 reg , u32 mask , u32 or )
{
uint32_t val = gpu_read ( gpu , reg ) ;
val & = ~ mask ;
gpu_write ( gpu , reg , val | or ) ;
}
static inline u64 gpu_read64 ( struct msm_gpu * gpu , u32 lo , u32 hi )
{
u64 val ;
/*
* Why not a readq here ? Two reasons : 1 ) many of the LO registers are
* not quad word aligned and 2 ) the GPU hardware designers have a bit
* of a history of putting registers where they fit , especially in
* spins . The longer a GPU family goes the higher the chance that
* we ' ll get burned . We could do a series of validity checks if we
* wanted to , but really is a readq ( ) that much better ? Nah .
*/
/*
* For some lo / hi registers ( like perfcounters ) , the hi value is latched
* when the lo is read , so make sure to read the lo first to trigger
* that
*/
val = ( u64 ) msm_readl ( gpu - > mmio + ( lo < < 2 ) ) ;
val | = ( ( u64 ) msm_readl ( gpu - > mmio + ( hi < < 2 ) ) < < 32 ) ;
return val ;
}
static inline void gpu_write64 ( struct msm_gpu * gpu , u32 lo , u32 hi , u64 val )
{
/* Why not a writeq here? Read the screed above */
msm_writel ( lower_32_bits ( val ) , gpu - > mmio + ( lo < < 2 ) ) ;
msm_writel ( upper_32_bits ( val ) , gpu - > mmio + ( hi < < 2 ) ) ;
}
2013-07-19 12:59:32 -04:00
int msm_gpu_pm_suspend ( struct msm_gpu * gpu ) ;
int msm_gpu_pm_resume ( struct msm_gpu * gpu ) ;
2017-02-10 15:36:33 -05:00
int msm_gpu_hw_init ( struct msm_gpu * gpu ) ;
2014-05-30 14:49:43 -04:00
void msm_gpu_perfcntr_start ( struct msm_gpu * gpu ) ;
void msm_gpu_perfcntr_stop ( struct msm_gpu * gpu ) ;
int msm_gpu_perfcntr_sample ( struct msm_gpu * gpu , uint32_t * activetime ,
uint32_t * totaltime , uint32_t ncntrs , uint32_t * cntrs ) ;
2013-07-19 12:59:32 -04:00
void msm_gpu_retire ( struct msm_gpu * gpu ) ;
2016-06-16 16:37:38 -04:00
void msm_gpu_submit ( struct msm_gpu * gpu , struct msm_gem_submit * submit ,
2013-07-19 12:59:32 -04:00
struct msm_file_private * ctx ) ;
int msm_gpu_init ( struct drm_device * drm , struct platform_device * pdev ,
struct msm_gpu * gpu , const struct msm_gpu_funcs * funcs ,
2017-05-08 14:35:03 -06:00
const char * name , struct msm_gpu_config * config ) ;
2013-07-19 12:59:32 -04:00
void msm_gpu_cleanup ( struct msm_gpu * gpu ) ;
2014-09-05 13:30:27 -04:00
struct msm_gpu * adreno_load_gpu ( struct drm_device * dev ) ;
2014-09-05 13:06:37 -04:00
void __init adreno_register ( void ) ;
void __exit adreno_unregister ( void ) ;
2013-07-19 12:59:32 -04:00
# endif /* __MSM_GPU_H__ */