2013-07-19 12:59:32 -04:00
/*
* Copyright ( C ) 2013 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# include "msm_gpu.h"
# include "msm_gem.h"
2013-11-16 12:56:06 -05:00
# include "msm_mmu.h"
2016-03-15 15:35:08 -04:00
# include "msm_fence.h"
2013-07-19 12:59:32 -04:00
/*
* Power Management :
*/
2015-06-04 10:26:37 -04:00
# ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
2013-07-19 12:59:32 -04:00
# include <mach/board.h>
2013-11-15 09:03:15 -05:00
static void bs_init ( struct msm_gpu * gpu )
2013-07-19 12:59:32 -04:00
{
2013-11-15 09:03:15 -05:00
if ( gpu - > bus_scale_table ) {
gpu - > bsc = msm_bus_scale_register_client ( gpu - > bus_scale_table ) ;
2013-07-19 12:59:32 -04:00
DBG ( " bus scale client: %08x " , gpu - > bsc ) ;
}
}
static void bs_fini ( struct msm_gpu * gpu )
{
if ( gpu - > bsc ) {
msm_bus_scale_unregister_client ( gpu - > bsc ) ;
gpu - > bsc = 0 ;
}
}
static void bs_set ( struct msm_gpu * gpu , int idx )
{
if ( gpu - > bsc ) {
DBG ( " set bus scaling: %d " , idx ) ;
msm_bus_scale_client_update_request ( gpu - > bsc , idx ) ;
}
}
# else
2013-11-15 09:03:15 -05:00
static void bs_init ( struct msm_gpu * gpu ) { }
2013-07-19 12:59:32 -04:00
static void bs_fini ( struct msm_gpu * gpu ) { }
static void bs_set ( struct msm_gpu * gpu , int idx ) { }
# endif
static int enable_pwrrail ( struct msm_gpu * gpu )
{
struct drm_device * dev = gpu - > dev ;
int ret = 0 ;
if ( gpu - > gpu_reg ) {
ret = regulator_enable ( gpu - > gpu_reg ) ;
if ( ret ) {
dev_err ( dev - > dev , " failed to enable 'gpu_reg': %d \n " , ret ) ;
return ret ;
}
}
if ( gpu - > gpu_cx ) {
ret = regulator_enable ( gpu - > gpu_cx ) ;
if ( ret ) {
dev_err ( dev - > dev , " failed to enable 'gpu_cx': %d \n " , ret ) ;
return ret ;
}
}
return 0 ;
}
static int disable_pwrrail ( struct msm_gpu * gpu )
{
if ( gpu - > gpu_cx )
regulator_disable ( gpu - > gpu_cx ) ;
if ( gpu - > gpu_reg )
regulator_disable ( gpu - > gpu_reg ) ;
return 0 ;
}
static int enable_clk ( struct msm_gpu * gpu )
{
int i ;
2017-03-07 10:02:56 -07:00
if ( gpu - > core_clk & & gpu - > fast_rate )
clk_set_rate ( gpu - > core_clk , gpu - > fast_rate ) ;
2013-07-19 12:59:32 -04:00
2016-11-28 12:28:33 -07:00
/* Set the RBBM timer rate to 19.2Mhz */
2017-03-07 10:02:56 -07:00
if ( gpu - > rbbmtimer_clk )
clk_set_rate ( gpu - > rbbmtimer_clk , 19200000 ) ;
2016-11-28 12:28:33 -07:00
2017-03-07 10:02:56 -07:00
for ( i = gpu - > nr_clocks - 1 ; i > = 0 ; i - - )
2016-11-28 12:28:31 -07:00
if ( gpu - > grp_clks [ i ] )
clk_prepare ( gpu - > grp_clks [ i ] ) ;
2013-07-19 12:59:32 -04:00
2017-03-07 10:02:56 -07:00
for ( i = gpu - > nr_clocks - 1 ; i > = 0 ; i - - )
2013-07-19 12:59:32 -04:00
if ( gpu - > grp_clks [ i ] )
clk_enable ( gpu - > grp_clks [ i ] ) ;
return 0 ;
}
static int disable_clk ( struct msm_gpu * gpu )
{
int i ;
2017-03-07 10:02:56 -07:00
for ( i = gpu - > nr_clocks - 1 ; i > = 0 ; i - - )
2016-11-28 12:28:31 -07:00
if ( gpu - > grp_clks [ i ] )
2013-07-19 12:59:32 -04:00
clk_disable ( gpu - > grp_clks [ i ] ) ;
2017-03-07 10:02:56 -07:00
for ( i = gpu - > nr_clocks - 1 ; i > = 0 ; i - - )
2013-07-19 12:59:32 -04:00
if ( gpu - > grp_clks [ i ] )
clk_unprepare ( gpu - > grp_clks [ i ] ) ;
2017-03-07 10:02:54 -07:00
/*
* Set the clock to a deliberately low rate . On older targets the clock
* speed had to be non zero to avoid problems . On newer targets this
* will be rounded down to zero anyway so it all works out .
*/
2017-03-07 10:02:56 -07:00
if ( gpu - > core_clk )
clk_set_rate ( gpu - > core_clk , 27000000 ) ;
2016-11-28 12:28:31 -07:00
2017-03-07 10:02:56 -07:00
if ( gpu - > rbbmtimer_clk )
clk_set_rate ( gpu - > rbbmtimer_clk , 0 ) ;
2016-11-28 12:28:33 -07:00
2013-07-19 12:59:32 -04:00
return 0 ;
}
static int enable_axi ( struct msm_gpu * gpu )
{
if ( gpu - > ebi1_clk )
clk_prepare_enable ( gpu - > ebi1_clk ) ;
if ( gpu - > bus_freq )
bs_set ( gpu , gpu - > bus_freq ) ;
return 0 ;
}
static int disable_axi ( struct msm_gpu * gpu )
{
if ( gpu - > ebi1_clk )
clk_disable_unprepare ( gpu - > ebi1_clk ) ;
if ( gpu - > bus_freq )
bs_set ( gpu , 0 ) ;
return 0 ;
}
int msm_gpu_pm_resume ( struct msm_gpu * gpu )
{
int ret ;
2017-02-10 15:36:33 -05:00
DBG ( " %s " , gpu - > name ) ;
2013-07-19 12:59:32 -04:00
ret = enable_pwrrail ( gpu ) ;
if ( ret )
return ret ;
ret = enable_clk ( gpu ) ;
if ( ret )
return ret ;
ret = enable_axi ( gpu ) ;
if ( ret )
return ret ;
2017-02-10 15:36:33 -05:00
gpu - > needs_hw_init = true ;
2013-07-19 12:59:32 -04:00
return 0 ;
}
int msm_gpu_pm_suspend ( struct msm_gpu * gpu )
{
int ret ;
2017-02-10 15:36:33 -05:00
DBG ( " %s " , gpu - > name ) ;
2013-07-19 12:59:32 -04:00
ret = disable_axi ( gpu ) ;
if ( ret )
return ret ;
ret = disable_clk ( gpu ) ;
if ( ret )
return ret ;
ret = disable_pwrrail ( gpu ) ;
if ( ret )
return ret ;
return 0 ;
}
2017-02-10 15:36:33 -05:00
int msm_gpu_hw_init ( struct msm_gpu * gpu )
2014-01-11 16:25:08 -05:00
{
2017-02-10 15:36:33 -05:00
int ret ;
2014-01-11 16:25:08 -05:00
2017-06-13 09:15:36 -04:00
WARN_ON ( ! mutex_is_locked ( & gpu - > dev - > struct_mutex ) ) ;
2017-02-10 15:36:33 -05:00
if ( ! gpu - > needs_hw_init )
return 0 ;
2014-01-11 16:25:08 -05:00
2017-02-10 15:36:33 -05:00
disable_irq ( gpu - > irq ) ;
ret = gpu - > funcs - > hw_init ( gpu ) ;
if ( ! ret )
gpu - > needs_hw_init = false ;
enable_irq ( gpu - > irq ) ;
2014-01-11 16:25:08 -05:00
2017-02-10 15:36:33 -05:00
return ret ;
2014-01-11 16:25:08 -05:00
}
2013-08-24 14:20:38 -04:00
/*
* Hangcheck detection for locked gpu :
*/
2016-03-15 18:26:28 -04:00
static void retire_submits ( struct msm_gpu * gpu ) ;
2015-06-07 13:46:04 -04:00
2013-08-24 14:20:38 -04:00
static void recover_worker ( struct work_struct * work )
{
struct msm_gpu * gpu = container_of ( work , struct msm_gpu , recover_work ) ;
struct drm_device * dev = gpu - > dev ;
2016-05-03 10:10:15 -04:00
struct msm_gem_submit * submit ;
2016-03-15 18:26:28 -04:00
uint32_t fence = gpu - > funcs - > last_fence ( gpu ) ;
2013-08-24 14:20:38 -04:00
2016-03-15 18:26:28 -04:00
msm_update_fence ( gpu - > fctx , fence + 1 ) ;
2013-08-24 14:20:38 -04:00
mutex_lock ( & dev - > struct_mutex ) ;
2015-06-07 13:46:04 -04:00
2016-05-03 10:10:15 -04:00
dev_err ( dev - > dev , " %s: hangcheck recover! \n " , gpu - > name ) ;
list_for_each_entry ( submit , & gpu - > submit_list , node ) {
if ( submit - > fence - > seqno = = ( fence + 1 ) ) {
struct task_struct * task ;
rcu_read_lock ( ) ;
task = pid_task ( submit - > pid , PIDTYPE_PID ) ;
if ( task ) {
dev_err ( dev - > dev , " %s: offending task: %s \n " ,
gpu - > name , task - > comm ) ;
}
rcu_read_unlock ( ) ;
break ;
}
}
if ( msm_gpu_active ( gpu ) ) {
2015-06-07 13:46:04 -04:00
/* retire completed submits, plus the one that hung: */
2016-03-15 18:26:28 -04:00
retire_submits ( gpu ) ;
2015-06-07 13:46:04 -04:00
2017-02-10 15:36:33 -05:00
pm_runtime_get_sync ( & gpu - > pdev - > dev ) ;
2014-01-11 16:25:08 -05:00
gpu - > funcs - > recover ( gpu ) ;
2017-02-10 15:36:33 -05:00
pm_runtime_put_sync ( & gpu - > pdev - > dev ) ;
2015-06-07 13:46:04 -04:00
/* replay the remaining submits after the one that hung: */
list_for_each_entry ( submit , & gpu - > submit_list , node ) {
gpu - > funcs - > submit ( gpu , submit , NULL ) ;
}
2014-01-11 16:25:08 -05:00
}
2016-05-03 10:10:15 -04:00
2013-08-24 14:20:38 -04:00
mutex_unlock ( & dev - > struct_mutex ) ;
msm_gpu_retire ( gpu ) ;
}
static void hangcheck_timer_reset ( struct msm_gpu * gpu )
{
DBG ( " %s " , gpu - > name ) ;
mod_timer ( & gpu - > hangcheck_timer ,
round_jiffies_up ( jiffies + DRM_MSM_HANGCHECK_JIFFIES ) ) ;
}
static void hangcheck_handler ( unsigned long data )
{
struct msm_gpu * gpu = ( struct msm_gpu * ) data ;
2013-09-11 17:14:30 -04:00
struct drm_device * dev = gpu - > dev ;
struct msm_drm_private * priv = dev - > dev_private ;
2013-08-24 14:20:38 -04:00
uint32_t fence = gpu - > funcs - > last_fence ( gpu ) ;
if ( fence ! = gpu - > hangcheck_fence ) {
/* some progress has been made.. ya! */
gpu - > hangcheck_fence = fence ;
2016-03-15 17:22:13 -04:00
} else if ( fence < gpu - > fctx - > last_fence ) {
2013-08-24 14:20:38 -04:00
/* no progress and not done.. hung! */
gpu - > hangcheck_fence = fence ;
2013-09-03 07:12:03 -04:00
dev_err ( dev - > dev , " %s: hangcheck detected gpu lockup! \n " ,
gpu - > name ) ;
dev_err ( dev - > dev , " %s: completed fence: %u \n " ,
gpu - > name , fence ) ;
dev_err ( dev - > dev , " %s: submitted fence: %u \n " ,
2016-03-15 17:22:13 -04:00
gpu - > name , gpu - > fctx - > last_fence ) ;
2013-08-24 14:20:38 -04:00
queue_work ( priv - > wq , & gpu - > recover_work ) ;
}
/* if still more pending work, reset the hangcheck timer: */
2016-03-15 17:22:13 -04:00
if ( gpu - > fctx - > last_fence > gpu - > hangcheck_fence )
2013-08-24 14:20:38 -04:00
hangcheck_timer_reset ( gpu ) ;
2013-09-11 17:14:30 -04:00
/* workaround for missing irq: */
queue_work ( priv - > wq , & gpu - > retire_work ) ;
2013-08-24 14:20:38 -04:00
}
2014-05-30 14:49:43 -04:00
/*
* Performance Counters :
*/
/* called under perf_lock */
static int update_hw_cntrs ( struct msm_gpu * gpu , uint32_t ncntrs , uint32_t * cntrs )
{
uint32_t current_cntrs [ ARRAY_SIZE ( gpu - > last_cntrs ) ] ;
int i , n = min ( ncntrs , gpu - > num_perfcntrs ) ;
/* read current values: */
for ( i = 0 ; i < gpu - > num_perfcntrs ; i + + )
current_cntrs [ i ] = gpu_read ( gpu , gpu - > perfcntrs [ i ] . sample_reg ) ;
/* update cntrs: */
for ( i = 0 ; i < n ; i + + )
cntrs [ i ] = current_cntrs [ i ] - gpu - > last_cntrs [ i ] ;
/* save current values: */
for ( i = 0 ; i < gpu - > num_perfcntrs ; i + + )
gpu - > last_cntrs [ i ] = current_cntrs [ i ] ;
return n ;
}
static void update_sw_cntrs ( struct msm_gpu * gpu )
{
ktime_t time ;
uint32_t elapsed ;
unsigned long flags ;
spin_lock_irqsave ( & gpu - > perf_lock , flags ) ;
if ( ! gpu - > perfcntr_active )
goto out ;
time = ktime_get ( ) ;
elapsed = ktime_to_us ( ktime_sub ( time , gpu - > last_sample . time ) ) ;
gpu - > totaltime + = elapsed ;
if ( gpu - > last_sample . active )
gpu - > activetime + = elapsed ;
gpu - > last_sample . active = msm_gpu_active ( gpu ) ;
gpu - > last_sample . time = time ;
out :
spin_unlock_irqrestore ( & gpu - > perf_lock , flags ) ;
}
void msm_gpu_perfcntr_start ( struct msm_gpu * gpu )
{
unsigned long flags ;
2017-02-10 15:36:33 -05:00
pm_runtime_get_sync ( & gpu - > pdev - > dev ) ;
2014-05-30 14:49:43 -04:00
spin_lock_irqsave ( & gpu - > perf_lock , flags ) ;
/* we could dynamically enable/disable perfcntr registers too.. */
gpu - > last_sample . active = msm_gpu_active ( gpu ) ;
gpu - > last_sample . time = ktime_get ( ) ;
gpu - > activetime = gpu - > totaltime = 0 ;
gpu - > perfcntr_active = true ;
update_hw_cntrs ( gpu , 0 , NULL ) ;
spin_unlock_irqrestore ( & gpu - > perf_lock , flags ) ;
}
void msm_gpu_perfcntr_stop ( struct msm_gpu * gpu )
{
gpu - > perfcntr_active = false ;
2017-02-10 15:36:33 -05:00
pm_runtime_put_sync ( & gpu - > pdev - > dev ) ;
2014-05-30 14:49:43 -04:00
}
/* returns -errno or # of cntrs sampled */
int msm_gpu_perfcntr_sample ( struct msm_gpu * gpu , uint32_t * activetime ,
uint32_t * totaltime , uint32_t ncntrs , uint32_t * cntrs )
{
unsigned long flags ;
int ret ;
spin_lock_irqsave ( & gpu - > perf_lock , flags ) ;
if ( ! gpu - > perfcntr_active ) {
ret = - EINVAL ;
goto out ;
}
* activetime = gpu - > activetime ;
* totaltime = gpu - > totaltime ;
gpu - > activetime = gpu - > totaltime = 0 ;
ret = update_hw_cntrs ( gpu , ncntrs , cntrs ) ;
out :
spin_unlock_irqrestore ( & gpu - > perf_lock , flags ) ;
return ret ;
}
2013-07-19 12:59:32 -04:00
/*
* Cmdstream submission / retirement :
*/
2016-03-16 16:07:38 -04:00
static void retire_submit ( struct msm_gpu * gpu , struct msm_gem_submit * submit )
{
int i ;
for ( i = 0 ; i < submit - > nr_bos ; i + + ) {
struct msm_gem_object * msm_obj = submit - > bos [ i ] . obj ;
/* move to inactive: */
msm_gem_move_to_inactive ( & msm_obj - > base ) ;
2017-06-13 11:07:08 -04:00
msm_gem_put_iova ( & msm_obj - > base , gpu - > aspace ) ;
2016-03-16 16:07:38 -04:00
drm_gem_object_unreference ( & msm_obj - > base ) ;
}
2017-02-10 15:36:33 -05:00
pm_runtime_mark_last_busy ( & gpu - > pdev - > dev ) ;
pm_runtime_put_autosuspend ( & gpu - > pdev - > dev ) ;
2016-05-03 09:50:26 -04:00
msm_gem_submit_free ( submit ) ;
2016-03-16 16:07:38 -04:00
}
2016-03-15 18:26:28 -04:00
static void retire_submits ( struct msm_gpu * gpu )
2015-06-07 13:46:04 -04:00
{
struct drm_device * dev = gpu - > dev ;
WARN_ON ( ! mutex_is_locked ( & dev - > struct_mutex ) ) ;
while ( ! list_empty ( & gpu - > submit_list ) ) {
struct msm_gem_submit * submit ;
submit = list_first_entry ( & gpu - > submit_list ,
struct msm_gem_submit , node ) ;
2016-10-25 13:00:45 +01:00
if ( dma_fence_is_signaled ( submit - > fence ) ) {
2016-03-16 16:07:38 -04:00
retire_submit ( gpu , submit ) ;
2015-06-07 13:46:04 -04:00
} else {
break ;
}
}
}
2013-07-19 12:59:32 -04:00
static void retire_worker ( struct work_struct * work )
{
struct msm_gpu * gpu = container_of ( work , struct msm_gpu , retire_work ) ;
struct drm_device * dev = gpu - > dev ;
uint32_t fence = gpu - > funcs - > last_fence ( gpu ) ;
2016-03-15 17:22:13 -04:00
msm_update_fence ( gpu - > fctx , fence ) ;
2013-09-14 14:01:55 -04:00
2013-07-19 12:59:32 -04:00
mutex_lock ( & dev - > struct_mutex ) ;
2016-03-15 18:26:28 -04:00
retire_submits ( gpu ) ;
2013-07-19 12:59:32 -04:00
mutex_unlock ( & dev - > struct_mutex ) ;
}
/* call from irq handler to schedule work to retire bo's */
void msm_gpu_retire ( struct msm_gpu * gpu )
{
struct msm_drm_private * priv = gpu - > dev - > dev_private ;
queue_work ( priv - > wq , & gpu - > retire_work ) ;
2014-05-30 14:49:43 -04:00
update_sw_cntrs ( gpu ) ;
2013-07-19 12:59:32 -04:00
}
/* add bo's to gpu's ring, and kick gpu: */
2016-06-16 16:37:38 -04:00
void msm_gpu_submit ( struct msm_gpu * gpu , struct msm_gem_submit * submit ,
2013-07-19 12:59:32 -04:00
struct msm_file_private * ctx )
{
struct drm_device * dev = gpu - > dev ;
struct msm_drm_private * priv = dev - > dev_private ;
2016-06-16 16:37:38 -04:00
int i ;
2013-07-19 12:59:32 -04:00
2015-06-07 13:46:04 -04:00
WARN_ON ( ! mutex_is_locked ( & dev - > struct_mutex ) ) ;
2017-02-10 15:36:33 -05:00
pm_runtime_get_sync ( & gpu - > pdev - > dev ) ;
msm_gpu_hw_init ( gpu ) ;
2014-01-11 16:25:08 -05:00
2015-06-07 13:46:04 -04:00
list_add_tail ( & submit - > node , & gpu - > submit_list ) ;
2014-05-30 14:47:38 -04:00
msm_rd_dump_submit ( submit ) ;
2014-05-30 14:49:43 -04:00
update_sw_cntrs ( gpu ) ;
2013-07-19 12:59:32 -04:00
for ( i = 0 ; i < submit - > nr_bos ; i + + ) {
struct msm_gem_object * msm_obj = submit - > bos [ i ] . obj ;
2016-11-11 12:06:46 -05:00
uint64_t iova ;
2013-07-19 12:59:32 -04:00
/* can't happen yet.. but when we add 2d support we'll have
* to deal w / cross - ring synchronization :
*/
WARN_ON ( is_active ( msm_obj ) & & ( msm_obj - > gpu ! = gpu ) ) ;
2016-03-16 16:07:38 -04:00
/* submit takes a reference to the bo and iova until retired: */
drm_gem_object_reference ( & msm_obj - > base ) ;
2017-06-13 16:52:54 -06:00
msm_gem_get_iova ( & msm_obj - > base ,
2017-06-13 11:07:08 -04:00
submit - > gpu - > aspace , & iova ) ;
2013-07-19 12:59:32 -04:00
2013-09-01 13:25:09 -04:00
if ( submit - > bos [ i ] . flags & MSM_SUBMIT_BO_WRITE )
msm_gem_move_to_active ( & msm_obj - > base , gpu , true , submit - > fence ) ;
2016-03-15 18:26:28 -04:00
else if ( submit - > bos [ i ] . flags & MSM_SUBMIT_BO_READ )
msm_gem_move_to_active ( & msm_obj - > base , gpu , false , submit - > fence ) ;
2013-07-19 12:59:32 -04:00
}
2015-06-07 13:46:04 -04:00
2016-05-03 09:46:49 -04:00
gpu - > funcs - > submit ( gpu , submit , ctx ) ;
2015-06-07 13:46:04 -04:00
priv - > lastctx = ctx ;
2013-08-24 14:20:38 -04:00
hangcheck_timer_reset ( gpu ) ;
2013-07-19 12:59:32 -04:00
}
/*
* Init / Cleanup :
*/
static irqreturn_t irq_handler ( int irq , void * data )
{
struct msm_gpu * gpu = data ;
return gpu - > funcs - > irq ( gpu ) ;
}
2017-03-07 10:02:56 -07:00
static struct clk * get_clock ( struct device * dev , const char * name )
{
struct clk * clk = devm_clk_get ( dev , name ) ;
return IS_ERR ( clk ) ? NULL : clk ;
}
static int get_clocks ( struct platform_device * pdev , struct msm_gpu * gpu )
{
struct device * dev = & pdev - > dev ;
struct property * prop ;
const char * name ;
int i = 0 ;
gpu - > nr_clocks = of_property_count_strings ( dev - > of_node , " clock-names " ) ;
if ( gpu - > nr_clocks < 1 ) {
gpu - > nr_clocks = 0 ;
return 0 ;
}
gpu - > grp_clks = devm_kcalloc ( dev , sizeof ( struct clk * ) , gpu - > nr_clocks ,
GFP_KERNEL ) ;
if ( ! gpu - > grp_clks )
return - ENOMEM ;
of_property_for_each_string ( dev - > of_node , " clock-names " , prop , name ) {
gpu - > grp_clks [ i ] = get_clock ( dev , name ) ;
/* Remember the key clocks that we need to control later */
2017-05-03 10:43:14 -04:00
if ( ! strcmp ( name , " core " ) | | ! strcmp ( name , " core_clk " ) )
2017-03-07 10:02:56 -07:00
gpu - > core_clk = gpu - > grp_clks [ i ] ;
2017-05-03 10:43:14 -04:00
else if ( ! strcmp ( name , " rbbmtimer " ) | | ! strcmp ( name , " rbbmtimer_clk " ) )
2017-03-07 10:02:56 -07:00
gpu - > rbbmtimer_clk = gpu - > grp_clks [ i ] ;
+ + i ;
}
return 0 ;
}
2013-07-19 12:59:32 -04:00
int msm_gpu_init ( struct drm_device * drm , struct platform_device * pdev ,
struct msm_gpu * gpu , const struct msm_gpu_funcs * funcs ,
2017-05-08 14:35:03 -06:00
const char * name , struct msm_gpu_config * config )
2013-07-19 12:59:32 -04:00
{
2013-11-16 12:56:06 -05:00
struct iommu_domain * iommu ;
2017-03-07 10:02:56 -07:00
int ret ;
2013-07-19 12:59:32 -04:00
2014-05-30 14:49:43 -04:00
if ( WARN_ON ( gpu - > num_perfcntrs > ARRAY_SIZE ( gpu - > last_cntrs ) ) )
gpu - > num_perfcntrs = ARRAY_SIZE ( gpu - > last_cntrs ) ;
2013-07-19 12:59:32 -04:00
gpu - > dev = drm ;
gpu - > funcs = funcs ;
gpu - > name = name ;
2016-03-15 17:22:13 -04:00
gpu - > fctx = msm_fence_context_alloc ( drm , name ) ;
if ( IS_ERR ( gpu - > fctx ) ) {
ret = PTR_ERR ( gpu - > fctx ) ;
gpu - > fctx = NULL ;
goto fail ;
}
2013-07-19 12:59:32 -04:00
INIT_LIST_HEAD ( & gpu - > active_list ) ;
INIT_WORK ( & gpu - > retire_work , retire_worker ) ;
2013-08-24 14:20:38 -04:00
INIT_WORK ( & gpu - > recover_work , recover_worker ) ;
2015-06-07 13:46:04 -04:00
INIT_LIST_HEAD ( & gpu - > submit_list ) ;
2013-08-24 14:20:38 -04:00
setup_timer ( & gpu - > hangcheck_timer , hangcheck_handler ,
( unsigned long ) gpu ) ;
2013-07-19 12:59:32 -04:00
2014-05-30 14:49:43 -04:00
spin_lock_init ( & gpu - > perf_lock ) ;
2013-07-19 12:59:32 -04:00
/* Map registers: */
2017-05-08 14:35:03 -06:00
gpu - > mmio = msm_ioremap ( pdev , config - > ioname , name ) ;
2013-07-19 12:59:32 -04:00
if ( IS_ERR ( gpu - > mmio ) ) {
ret = PTR_ERR ( gpu - > mmio ) ;
goto fail ;
}
/* Get Interrupt: */
2017-05-08 14:35:03 -06:00
gpu - > irq = platform_get_irq_byname ( pdev , config - > irqname ) ;
2013-07-19 12:59:32 -04:00
if ( gpu - > irq < 0 ) {
ret = gpu - > irq ;
dev_err ( drm - > dev , " failed to get irq: %d \n " , ret ) ;
goto fail ;
}
ret = devm_request_irq ( & pdev - > dev , gpu - > irq , irq_handler ,
IRQF_TRIGGER_HIGH , gpu - > name , gpu ) ;
if ( ret ) {
dev_err ( drm - > dev , " failed to request IRQ%u: %d \n " , gpu - > irq , ret ) ;
goto fail ;
}
2017-03-07 10:02:56 -07:00
ret = get_clocks ( pdev , gpu ) ;
if ( ret )
goto fail ;
2013-07-19 12:59:32 -04:00
2017-01-30 11:30:58 -05:00
gpu - > ebi1_clk = msm_clk_get ( pdev , " bus " ) ;
2013-07-19 12:59:32 -04:00
DBG ( " ebi1_clk: %p " , gpu - > ebi1_clk ) ;
if ( IS_ERR ( gpu - > ebi1_clk ) )
gpu - > ebi1_clk = NULL ;
/* Acquire regulators: */
gpu - > gpu_reg = devm_regulator_get ( & pdev - > dev , " vdd " ) ;
DBG ( " gpu_reg: %p " , gpu - > gpu_reg ) ;
if ( IS_ERR ( gpu - > gpu_reg ) )
gpu - > gpu_reg = NULL ;
gpu - > gpu_cx = devm_regulator_get ( & pdev - > dev , " vddcx " ) ;
DBG ( " gpu_cx: %p " , gpu - > gpu_cx ) ;
if ( IS_ERR ( gpu - > gpu_cx ) )
gpu - > gpu_cx = NULL ;
/* Setup IOMMU.. eventually we will (I think) do this once per context
* and have separate page tables per context . For now , to keep things
* simple and to get something working , just use a single address space :
*/
2013-11-16 12:56:06 -05:00
iommu = iommu_domain_alloc ( & platform_bus_type ) ;
if ( iommu ) {
2017-05-08 14:35:03 -06:00
iommu - > geometry . aperture_start = config - > va_start ;
iommu - > geometry . aperture_end = config - > va_end ;
2016-09-28 19:58:32 -04:00
2013-11-16 12:56:06 -05:00
dev_info ( drm - > dev , " %s: using IOMMU \n " , name ) ;
2016-09-28 19:58:32 -04:00
gpu - > aspace = msm_gem_address_space_create ( & pdev - > dev ,
iommu , " gpu " ) ;
if ( IS_ERR ( gpu - > aspace ) ) {
ret = PTR_ERR ( gpu - > aspace ) ;
2015-09-15 08:41:46 -04:00
dev_err ( drm - > dev , " failed to init iommu: %d \n " , ret ) ;
2016-09-28 19:58:32 -04:00
gpu - > aspace = NULL ;
2015-09-15 08:41:46 -04:00
iommu_domain_free ( iommu ) ;
goto fail ;
}
2013-11-16 12:56:06 -05:00
} else {
dev_info ( drm - > dev , " %s: no IOMMU, fallback to VRAM carveout! \n " , name ) ;
2013-07-19 12:59:32 -04:00
}
2014-07-11 11:59:22 -04:00
2013-07-19 12:59:32 -04:00
/* Create ringbuffer: */
2017-05-08 14:35:03 -06:00
gpu - > rb = msm_ringbuffer_new ( gpu , config - > ringsz ) ;
2013-07-19 12:59:32 -04:00
if ( IS_ERR ( gpu - > rb ) ) {
ret = PTR_ERR ( gpu - > rb ) ;
gpu - > rb = NULL ;
dev_err ( drm - > dev , " could not create ringbuffer: %d \n " , ret ) ;
goto fail ;
}
2017-02-10 15:36:33 -05:00
gpu - > pdev = pdev ;
platform_set_drvdata ( pdev , gpu ) ;
2013-11-15 09:03:15 -05:00
bs_init ( gpu ) ;
2013-07-19 12:59:32 -04:00
return 0 ;
fail :
return ret ;
}
void msm_gpu_cleanup ( struct msm_gpu * gpu )
{
DBG ( " %s " , gpu - > name ) ;
WARN_ON ( ! list_empty ( & gpu - > active_list ) ) ;
bs_fini ( gpu ) ;
if ( gpu - > rb ) {
if ( gpu - > rb_iova )
2017-06-13 11:07:08 -04:00
msm_gem_put_iova ( gpu - > rb - > bo , gpu - > aspace ) ;
2013-07-19 12:59:32 -04:00
msm_ringbuffer_destroy ( gpu - > rb ) ;
}
2016-03-15 17:22:13 -04:00
if ( gpu - > fctx )
msm_fence_context_free ( gpu - > fctx ) ;
2013-07-19 12:59:32 -04:00
}