2020-03-06 07:28:19 +03:00
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright ( C ) 2018 - 2020 Linaro Ltd .
*/
2020-09-17 20:39:20 +03:00
# include <linux/refcount.h>
2020-03-06 07:28:19 +03:00
# include <linux/mutex.h>
# include <linux/clk.h>
# include <linux/device.h>
# include <linux/interconnect.h>
# include "ipa.h"
# include "ipa_clock.h"
# include "ipa_modem.h"
2020-11-20 01:40:39 +03:00
# include "ipa_data.h"
2020-03-06 07:28:19 +03:00
/**
* DOC : IPA Clocking
*
* The " IPA Clock " manages both the IPA core clock and the interconnects
* ( buses ) the IPA depends on as a single logical entity . A reference count
* is incremented by " get " operations and decremented by " put " operations .
* Transitions of that count from 0 to 1 result in the clock and interconnects
* being enabled , and transitions of the count from 1 to 0 cause them to be
* disabled . We currently operate the core clock at a fixed clock rate , and
* all buses at a fixed average and peak bandwidth . As more advanced IPA
* features are enabled , we can make better use of clock and bus scaling .
*
* An IPA clock reference must be held for any access to IPA hardware .
*/
2021-01-15 15:50:46 +03:00
/**
* struct ipa_interconnect - IPA interconnect information
* @ path : Interconnect path
2021-01-15 15:50:47 +03:00
* @ average_bandwidth : Average interconnect bandwidth ( KB / second )
* @ peak_bandwidth : Peak interconnect bandwidth ( KB / second )
2021-01-15 15:50:46 +03:00
*/
struct ipa_interconnect {
struct icc_path * path ;
2021-01-15 15:50:47 +03:00
u32 average_bandwidth ;
u32 peak_bandwidth ;
2021-01-15 15:50:46 +03:00
} ;
2020-03-06 07:28:19 +03:00
/**
* struct ipa_clock - IPA clocking information
* @ count : Clocking reference count
2020-07-13 15:24:18 +03:00
* @ mutex : Protects clock enable / disable
2020-03-06 07:28:19 +03:00
* @ core : IPA core clock
2021-01-15 15:50:50 +03:00
* @ interconnect_count : Number of elements in interconnect [ ]
2021-01-15 15:50:46 +03:00
* @ interconnect : Interconnect array
2020-03-06 07:28:19 +03:00
*/
struct ipa_clock {
2020-09-17 20:39:20 +03:00
refcount_t count ;
2020-03-06 07:28:19 +03:00
struct mutex mutex ; /* protects clock enable/disable */
struct clk * core ;
2021-01-15 15:50:50 +03:00
u32 interconnect_count ;
struct ipa_interconnect * interconnect ;
2020-03-06 07:28:19 +03:00
} ;
2021-01-15 15:50:49 +03:00
static int ipa_interconnect_init_one ( struct device * dev ,
struct ipa_interconnect * interconnect ,
const struct ipa_interconnect_data * data )
2020-03-06 07:28:19 +03:00
{
struct icc_path * path ;
2021-01-15 15:50:49 +03:00
path = of_icc_get ( dev , data - > name ) ;
if ( IS_ERR ( path ) ) {
int ret = PTR_ERR ( path ) ;
2020-03-06 07:28:19 +03:00
2021-01-15 15:50:49 +03:00
dev_err ( dev , " error %d getting %s interconnect \n " , ret ,
data - > name ) ;
return ret ;
}
interconnect - > path = path ;
interconnect - > average_bandwidth = data - > average_bandwidth ;
interconnect - > peak_bandwidth = data - > peak_bandwidth ;
return 0 ;
}
static void ipa_interconnect_exit_one ( struct ipa_interconnect * interconnect )
{
icc_put ( interconnect - > path ) ;
memset ( interconnect , 0 , sizeof ( * interconnect ) ) ;
2020-03-06 07:28:19 +03:00
}
/* Initialize interconnects required for IPA operation */
2021-01-15 15:50:47 +03:00
static int ipa_interconnect_init ( struct ipa_clock * clock , struct device * dev ,
const struct ipa_interconnect_data * data )
2020-03-06 07:28:19 +03:00
{
2021-01-15 15:50:47 +03:00
struct ipa_interconnect * interconnect ;
2021-01-15 15:50:50 +03:00
u32 count ;
2021-01-15 15:50:49 +03:00
int ret ;
2020-03-06 07:28:19 +03:00
2021-01-15 15:50:50 +03:00
count = clock - > interconnect_count ;
interconnect = kcalloc ( count , sizeof ( * interconnect ) , GFP_KERNEL ) ;
if ( ! interconnect )
return - ENOMEM ;
clock - > interconnect = interconnect ;
while ( count - - ) {
ret = ipa_interconnect_init_one ( dev , interconnect , data + + ) ;
if ( ret )
goto out_unwind ;
interconnect + + ;
}
2020-03-06 07:28:19 +03:00
return 0 ;
2021-01-15 15:50:50 +03:00
out_unwind :
while ( interconnect - - > clock - > interconnect )
ipa_interconnect_exit_one ( interconnect ) ;
kfree ( clock - > interconnect ) ;
clock - > interconnect = NULL ;
2021-01-15 15:50:49 +03:00
return ret ;
2020-03-06 07:28:19 +03:00
}
/* Inverse of ipa_interconnect_init() */
static void ipa_interconnect_exit ( struct ipa_clock * clock )
{
2021-01-15 15:50:49 +03:00
struct ipa_interconnect * interconnect ;
2021-01-15 15:50:50 +03:00
interconnect = clock - > interconnect + clock - > interconnect_count ;
while ( interconnect - - > clock - > interconnect )
ipa_interconnect_exit_one ( interconnect ) ;
kfree ( clock - > interconnect ) ;
clock - > interconnect = NULL ;
2020-03-06 07:28:19 +03:00
}
/* Currently we only use one bandwidth level, so just "enable" interconnects */
static int ipa_interconnect_enable ( struct ipa * ipa )
{
2021-01-15 15:50:47 +03:00
struct ipa_interconnect * interconnect ;
2020-03-06 07:28:19 +03:00
struct ipa_clock * clock = ipa - > clock ;
int ret ;
2021-01-15 15:50:50 +03:00
u32 i ;
interconnect = clock - > interconnect ;
for ( i = 0 ; i < clock - > interconnect_count ; i + + ) {
ret = icc_set_bw ( interconnect - > path ,
interconnect - > average_bandwidth ,
interconnect - > peak_bandwidth ) ;
if ( ret )
goto out_unwind ;
interconnect + + ;
}
2020-03-06 07:28:19 +03:00
return 0 ;
2021-01-15 15:50:50 +03:00
out_unwind :
while ( interconnect - - > clock - > interconnect )
( void ) icc_set_bw ( interconnect - > path , 0 , 0 ) ;
2020-03-06 07:28:19 +03:00
return ret ;
}
/* To disable an interconnect, we just its bandwidth to 0 */
2021-01-15 15:50:45 +03:00
static void ipa_interconnect_disable ( struct ipa * ipa )
2020-03-06 07:28:19 +03:00
{
2021-01-15 15:50:47 +03:00
struct ipa_interconnect * interconnect ;
2020-03-06 07:28:19 +03:00
struct ipa_clock * clock = ipa - > clock ;
2021-01-15 15:50:45 +03:00
int result = 0 ;
2021-01-15 15:50:50 +03:00
u32 count ;
2020-03-06 07:28:19 +03:00
int ret ;
2021-01-15 15:50:50 +03:00
count = clock - > interconnect_count ;
interconnect = clock - > interconnect + count ;
while ( count - - ) {
interconnect - - ;
ret = icc_set_bw ( interconnect - > path , 0 , 0 ) ;
if ( ret & & ! result )
result = ret ;
}
2020-03-06 07:28:19 +03:00
2021-01-15 15:50:45 +03:00
if ( result )
dev_err ( & ipa - > pdev - > dev ,
" error %d disabling IPA interconnects \n " , ret ) ;
2020-03-06 07:28:19 +03:00
}
/* Turn on IPA clocks, including interconnects */
static int ipa_clock_enable ( struct ipa * ipa )
{
int ret ;
ret = ipa_interconnect_enable ( ipa ) ;
if ( ret )
return ret ;
ret = clk_prepare_enable ( ipa - > clock - > core ) ;
if ( ret )
ipa_interconnect_disable ( ipa ) ;
return ret ;
}
/* Inverse of ipa_clock_enable() */
static void ipa_clock_disable ( struct ipa * ipa )
{
clk_disable_unprepare ( ipa - > clock - > core ) ;
2021-01-15 15:50:45 +03:00
ipa_interconnect_disable ( ipa ) ;
2020-03-06 07:28:19 +03:00
}
/* Get an IPA clock reference, but only if the reference count is
* already non - zero . Returns true if the additional reference was
* added successfully , or false otherwise .
*/
bool ipa_clock_get_additional ( struct ipa * ipa )
{
2020-09-17 20:39:20 +03:00
return refcount_inc_not_zero ( & ipa - > clock - > count ) ;
2020-03-06 07:28:19 +03:00
}
/* Get an IPA clock reference. If the reference count is non-zero, it is
* incremented and return is immediate . Otherwise it is checked again
2020-09-17 20:39:22 +03:00
* under protection of the mutex , and if appropriate the IPA clock
* is enabled .
2020-03-06 07:28:19 +03:00
*
* Incrementing the reference count is intentionally deferred until
* after the clock is running and endpoints are resumed .
*/
void ipa_clock_get ( struct ipa * ipa )
{
struct ipa_clock * clock = ipa - > clock ;
int ret ;
/* If the clock is running, just bump the reference count */
if ( ipa_clock_get_additional ( ipa ) )
return ;
/* Otherwise get the mutex and check again */
mutex_lock ( & clock - > mutex ) ;
/* A reference might have been added before we got the mutex. */
if ( ipa_clock_get_additional ( ipa ) )
goto out_mutex_unlock ;
ret = ipa_clock_enable ( ipa ) ;
if ( ret ) {
dev_err ( & ipa - > pdev - > dev , " error %d enabling IPA clock \n " , ret ) ;
goto out_mutex_unlock ;
}
2020-09-17 20:39:20 +03:00
refcount_set ( & clock - > count , 1 ) ;
2020-03-06 07:28:19 +03:00
out_mutex_unlock :
mutex_unlock ( & clock - > mutex ) ;
}
2020-09-17 20:39:22 +03:00
/* Attempt to remove an IPA clock reference. If this represents the
* last reference , disable the IPA clock under protection of the mutex .
2020-03-06 07:28:19 +03:00
*/
void ipa_clock_put ( struct ipa * ipa )
{
struct ipa_clock * clock = ipa - > clock ;
/* If this is not the last reference there's nothing more to do */
2020-09-17 20:39:20 +03:00
if ( ! refcount_dec_and_mutex_lock ( & clock - > count , & clock - > mutex ) )
2020-03-06 07:28:19 +03:00
return ;
ipa_clock_disable ( ipa ) ;
mutex_unlock ( & clock - > mutex ) ;
}
2020-07-04 00:23:34 +03:00
/* Return the current IPA core clock rate */
u32 ipa_clock_rate ( struct ipa * ipa )
{
return ipa - > clock ? ( u32 ) clk_get_rate ( ipa - > clock - > core ) : 0 ;
}
2020-03-06 07:28:19 +03:00
/* Initialize IPA clocking */
2020-11-20 01:40:39 +03:00
struct ipa_clock *
ipa_clock_init ( struct device * dev , const struct ipa_clock_data * data )
2020-03-06 07:28:19 +03:00
{
struct ipa_clock * clock ;
struct clk * clk ;
int ret ;
clk = clk_get ( dev , " core " ) ;
if ( IS_ERR ( clk ) ) {
dev_err ( dev , " error %ld getting core clock \n " , PTR_ERR ( clk ) ) ;
return ERR_CAST ( clk ) ;
}
2020-11-20 01:40:41 +03:00
ret = clk_set_rate ( clk , data - > core_clock_rate ) ;
2020-03-06 07:28:19 +03:00
if ( ret ) {
2020-11-20 01:40:41 +03:00
dev_err ( dev , " error %d setting core clock rate to %u \n " ,
ret , data - > core_clock_rate ) ;
2020-03-06 07:28:19 +03:00
goto err_clk_put ;
}
clock = kzalloc ( sizeof ( * clock ) , GFP_KERNEL ) ;
if ( ! clock ) {
ret = - ENOMEM ;
goto err_clk_put ;
}
clock - > core = clk ;
2021-01-15 15:50:50 +03:00
clock - > interconnect_count = data - > interconnect_count ;
2020-03-06 07:28:19 +03:00
2021-01-15 15:50:50 +03:00
ret = ipa_interconnect_init ( clock , dev , data - > interconnect_data ) ;
2020-03-06 07:28:19 +03:00
if ( ret )
goto err_kfree ;
mutex_init ( & clock - > mutex ) ;
2020-09-17 20:39:20 +03:00
refcount_set ( & clock - > count , 0 ) ;
2020-03-06 07:28:19 +03:00
return clock ;
err_kfree :
kfree ( clock ) ;
err_clk_put :
clk_put ( clk ) ;
return ERR_PTR ( ret ) ;
}
/* Inverse of ipa_clock_init() */
void ipa_clock_exit ( struct ipa_clock * clock )
{
struct clk * clk = clock - > core ;
2020-09-17 20:39:20 +03:00
WARN_ON ( refcount_read ( & clock - > count ) ! = 0 ) ;
2020-03-06 07:28:19 +03:00
mutex_destroy ( & clock - > mutex ) ;
ipa_interconnect_exit ( clock ) ;
kfree ( clock ) ;
clk_put ( clk ) ;
}