2020-03-05 22:28:19 -06:00
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
2021-02-12 08:33:59 -06:00
* Copyright ( C ) 2018 - 2021 Linaro Ltd .
2020-03-05 22:28:19 -06:00
*/
# include <linux/clk.h>
# include <linux/device.h>
# include <linux/interconnect.h>
2021-08-04 10:36:24 -05:00
# include <linux/pm.h>
2021-08-10 14:27:00 -05:00
# include <linux/pm_runtime.h>
2021-08-04 10:36:24 -05:00
# include <linux/bitops.h>
2020-03-05 22:28:19 -06:00
# include "ipa.h"
# include "ipa_clock.h"
2021-08-04 10:36:24 -05:00
# include "ipa_endpoint.h"
2020-03-05 22:28:19 -06:00
# include "ipa_modem.h"
2020-11-19 16:40:39 -06:00
# include "ipa_data.h"
2020-03-05 22:28:19 -06:00
/**
* DOC : IPA Clocking
*
* The " IPA Clock " manages both the IPA core clock and the interconnects
* ( buses ) the IPA depends on as a single logical entity . A reference count
* is incremented by " get " operations and decremented by " put " operations .
* Transitions of that count from 0 to 1 result in the clock and interconnects
* being enabled , and transitions of the count from 1 to 0 cause them to be
* disabled . We currently operate the core clock at a fixed clock rate , and
* all buses at a fixed average and peak bandwidth . As more advanced IPA
* features are enabled , we can make better use of clock and bus scaling .
*
* An IPA clock reference must be held for any access to IPA hardware .
*/
2021-01-15 06:50:46 -06:00
/**
* struct ipa_interconnect - IPA interconnect information
* @ path : Interconnect path
2021-01-15 06:50:47 -06:00
* @ average_bandwidth : Average interconnect bandwidth ( KB / second )
* @ peak_bandwidth : Peak interconnect bandwidth ( KB / second )
2021-01-15 06:50:46 -06:00
*/
struct ipa_interconnect {
struct icc_path * path ;
2021-01-15 06:50:47 -06:00
u32 average_bandwidth ;
u32 peak_bandwidth ;
2021-01-15 06:50:46 -06:00
} ;
2021-08-04 10:36:26 -05:00
/**
* enum ipa_power_flag - IPA power flags
* @ IPA_POWER_FLAG_RESUMED : Whether resume from suspend has been signaled
* @ IPA_POWER_FLAG_COUNT : Number of defined power flags
*/
enum ipa_power_flag {
IPA_POWER_FLAG_RESUMED ,
IPA_POWER_FLAG_COUNT , /* Last; not a flag */
} ;
2020-03-05 22:28:19 -06:00
/**
* struct ipa_clock - IPA clocking information
2021-08-10 14:27:01 -05:00
* @ dev : IPA device pointer
2020-03-05 22:28:19 -06:00
* @ core : IPA core clock
2021-08-04 10:36:26 -05:00
* @ flags : Boolean state flags
2021-01-15 06:50:50 -06:00
* @ interconnect_count : Number of elements in interconnect [ ]
2021-01-15 06:50:46 -06:00
* @ interconnect : Interconnect array
2020-03-05 22:28:19 -06:00
*/
struct ipa_clock {
2021-08-10 14:27:01 -05:00
struct device * dev ;
2020-03-05 22:28:19 -06:00
struct clk * core ;
2021-08-04 10:36:26 -05:00
DECLARE_BITMAP ( flags , IPA_POWER_FLAG_COUNT ) ;
2021-01-15 06:50:50 -06:00
u32 interconnect_count ;
struct ipa_interconnect * interconnect ;
2020-03-05 22:28:19 -06:00
} ;
2021-01-15 06:50:49 -06:00
static int ipa_interconnect_init_one ( struct device * dev ,
struct ipa_interconnect * interconnect ,
const struct ipa_interconnect_data * data )
2020-03-05 22:28:19 -06:00
{
struct icc_path * path ;
2021-01-15 06:50:49 -06:00
path = of_icc_get ( dev , data - > name ) ;
if ( IS_ERR ( path ) ) {
int ret = PTR_ERR ( path ) ;
2020-03-05 22:28:19 -06:00
2021-02-12 08:33:59 -06:00
dev_err_probe ( dev , ret , " error getting %s interconnect \n " ,
data - > name ) ;
2021-01-15 06:50:49 -06:00
return ret ;
}
interconnect - > path = path ;
interconnect - > average_bandwidth = data - > average_bandwidth ;
interconnect - > peak_bandwidth = data - > peak_bandwidth ;
return 0 ;
}
static void ipa_interconnect_exit_one ( struct ipa_interconnect * interconnect )
{
icc_put ( interconnect - > path ) ;
memset ( interconnect , 0 , sizeof ( * interconnect ) ) ;
2020-03-05 22:28:19 -06:00
}
/* Initialize interconnects required for IPA operation */
2021-01-15 06:50:47 -06:00
static int ipa_interconnect_init ( struct ipa_clock * clock , struct device * dev ,
const struct ipa_interconnect_data * data )
2020-03-05 22:28:19 -06:00
{
2021-01-15 06:50:47 -06:00
struct ipa_interconnect * interconnect ;
2021-01-15 06:50:50 -06:00
u32 count ;
2021-01-15 06:50:49 -06:00
int ret ;
2020-03-05 22:28:19 -06:00
2021-01-15 06:50:50 -06:00
count = clock - > interconnect_count ;
interconnect = kcalloc ( count , sizeof ( * interconnect ) , GFP_KERNEL ) ;
if ( ! interconnect )
return - ENOMEM ;
clock - > interconnect = interconnect ;
while ( count - - ) {
ret = ipa_interconnect_init_one ( dev , interconnect , data + + ) ;
if ( ret )
goto out_unwind ;
interconnect + + ;
}
2020-03-05 22:28:19 -06:00
return 0 ;
2021-01-15 06:50:50 -06:00
out_unwind :
while ( interconnect - - > clock - > interconnect )
ipa_interconnect_exit_one ( interconnect ) ;
kfree ( clock - > interconnect ) ;
clock - > interconnect = NULL ;
2021-01-15 06:50:49 -06:00
return ret ;
2020-03-05 22:28:19 -06:00
}
/* Inverse of ipa_interconnect_init() */
static void ipa_interconnect_exit ( struct ipa_clock * clock )
{
2021-01-15 06:50:49 -06:00
struct ipa_interconnect * interconnect ;
2021-01-15 06:50:50 -06:00
interconnect = clock - > interconnect + clock - > interconnect_count ;
while ( interconnect - - > clock - > interconnect )
ipa_interconnect_exit_one ( interconnect ) ;
kfree ( clock - > interconnect ) ;
clock - > interconnect = NULL ;
2020-03-05 22:28:19 -06:00
}
/* Currently we only use one bandwidth level, so just "enable" interconnects */
static int ipa_interconnect_enable ( struct ipa * ipa )
{
2021-01-15 06:50:47 -06:00
struct ipa_interconnect * interconnect ;
2020-03-05 22:28:19 -06:00
struct ipa_clock * clock = ipa - > clock ;
int ret ;
2021-01-15 06:50:50 -06:00
u32 i ;
interconnect = clock - > interconnect ;
for ( i = 0 ; i < clock - > interconnect_count ; i + + ) {
ret = icc_set_bw ( interconnect - > path ,
interconnect - > average_bandwidth ,
interconnect - > peak_bandwidth ) ;
2021-08-04 10:36:23 -05:00
if ( ret ) {
dev_err ( & ipa - > pdev - > dev ,
" error %d enabling %s interconnect \n " ,
ret , icc_get_name ( interconnect - > path ) ) ;
2021-01-15 06:50:50 -06:00
goto out_unwind ;
2021-08-04 10:36:23 -05:00
}
2021-01-15 06:50:50 -06:00
interconnect + + ;
}
2020-03-05 22:28:19 -06:00
return 0 ;
2021-01-15 06:50:50 -06:00
out_unwind :
while ( interconnect - - > clock - > interconnect )
( void ) icc_set_bw ( interconnect - > path , 0 , 0 ) ;
2020-03-05 22:28:19 -06:00
return ret ;
}
/* To disable an interconnect, we just its bandwidth to 0 */
2021-08-04 10:36:23 -05:00
static int ipa_interconnect_disable ( struct ipa * ipa )
2020-03-05 22:28:19 -06:00
{
2021-01-15 06:50:47 -06:00
struct ipa_interconnect * interconnect ;
2020-03-05 22:28:19 -06:00
struct ipa_clock * clock = ipa - > clock ;
2021-08-04 10:36:23 -05:00
struct device * dev = & ipa - > pdev - > dev ;
2021-01-15 06:50:45 -06:00
int result = 0 ;
2021-01-15 06:50:50 -06:00
u32 count ;
2020-03-05 22:28:19 -06:00
int ret ;
2021-01-15 06:50:50 -06:00
count = clock - > interconnect_count ;
interconnect = clock - > interconnect + count ;
while ( count - - ) {
interconnect - - ;
ret = icc_set_bw ( interconnect - > path , 0 , 0 ) ;
2021-08-04 10:36:23 -05:00
if ( ret ) {
dev_err ( dev , " error %d disabling %s interconnect \n " ,
ret , icc_get_name ( interconnect - > path ) ) ;
/* Try to disable all; record only the first error */
if ( ! result )
result = ret ;
}
2021-01-15 06:50:50 -06:00
}
2020-03-05 22:28:19 -06:00
2021-08-04 10:36:23 -05:00
return result ;
2020-03-05 22:28:19 -06:00
}
/* Turn on IPA clocks, including interconnects */
static int ipa_clock_enable ( struct ipa * ipa )
{
int ret ;
ret = ipa_interconnect_enable ( ipa ) ;
if ( ret )
return ret ;
ret = clk_prepare_enable ( ipa - > clock - > core ) ;
2021-08-04 10:36:23 -05:00
if ( ret ) {
dev_err ( & ipa - > pdev - > dev , " error %d enabling core clock \n " , ret ) ;
( void ) ipa_interconnect_disable ( ipa ) ;
}
2020-03-05 22:28:19 -06:00
return ret ;
}
/* Inverse of ipa_clock_enable() */
net: ipa: have ipa_clock_get() return a value
We currently assume no errors occur when enabling or disabling the
IPA core clock and interconnects. And although this commit exposes
errors that could occur, we generally assume this won't happen in
practice.
This commit changes ipa_clock_get() and ipa_clock_put() so each
returns a value. The values returned are meant to mimic what the
runtime power management functions return, so we can set up error
handling here before we make the switch. Have ipa_clock_get()
increment the reference count even if it returns an error, to match
the behavior of pm_runtime_get().
More details follow.
When taking a reference in ipa_clock_get(), return 0 for the first
reference, 1 for subsequent references, or a negative error code if
an error occurs. Note that if ipa_clock_get() returns an error, we
must not touch hardware; in some cases such errors now cause entire
blocks of code to be skipped.
When dropping a reference in ipa_clock_put(), we return 0 or an
error code. The error would come from ipa_clock_disable(), which
now returns what ipa_interconnect_disable() returns (either 0 or a
negative error code). For now, callers ignore the return value;
if an error occurs, a message will have already been logged, and
little more can actually be done to improve the situation.
Signed-off-by: Alex Elder <elder@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-10 14:26:58 -05:00
static int ipa_clock_disable ( struct ipa * ipa )
2020-03-05 22:28:19 -06:00
{
clk_disable_unprepare ( ipa - > clock - > core ) ;
net: ipa: have ipa_clock_get() return a value
We currently assume no errors occur when enabling or disabling the
IPA core clock and interconnects. And although this commit exposes
errors that could occur, we generally assume this won't happen in
practice.
This commit changes ipa_clock_get() and ipa_clock_put() so each
returns a value. The values returned are meant to mimic what the
runtime power management functions return, so we can set up error
handling here before we make the switch. Have ipa_clock_get()
increment the reference count even if it returns an error, to match
the behavior of pm_runtime_get().
More details follow.
When taking a reference in ipa_clock_get(), return 0 for the first
reference, 1 for subsequent references, or a negative error code if
an error occurs. Note that if ipa_clock_get() returns an error, we
must not touch hardware; in some cases such errors now cause entire
blocks of code to be skipped.
When dropping a reference in ipa_clock_put(), we return 0 or an
error code. The error would come from ipa_clock_disable(), which
now returns what ipa_interconnect_disable() returns (either 0 or a
negative error code). For now, callers ignore the return value;
if an error occurs, a message will have already been logged, and
little more can actually be done to improve the situation.
Signed-off-by: Alex Elder <elder@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-10 14:26:58 -05:00
return ipa_interconnect_disable ( ipa ) ;
2020-03-05 22:28:19 -06:00
}
2021-08-10 14:27:00 -05:00
static int ipa_runtime_suspend ( struct device * dev )
{
struct ipa * ipa = dev_get_drvdata ( dev ) ;
/* Endpoints aren't usable until setup is complete */
if ( ipa - > setup_complete ) {
__clear_bit ( IPA_POWER_FLAG_RESUMED , ipa - > clock - > flags ) ;
ipa_endpoint_suspend ( ipa ) ;
gsi_suspend ( & ipa - > gsi ) ;
}
return ipa_clock_disable ( ipa ) ;
}
static int ipa_runtime_resume ( struct device * dev )
{
struct ipa * ipa = dev_get_drvdata ( dev ) ;
int ret ;
ret = ipa_clock_enable ( ipa ) ;
if ( WARN_ON ( ret < 0 ) )
return ret ;
/* Endpoints aren't usable until setup is complete */
if ( ipa - > setup_complete ) {
gsi_resume ( & ipa - > gsi ) ;
ipa_endpoint_resume ( ipa ) ;
}
return 0 ;
}
2021-08-10 14:27:01 -05:00
static int ipa_runtime_idle ( struct device * dev )
{
return - EAGAIN ;
}
2020-03-05 22:28:19 -06:00
/* Get an IPA clock reference. If the reference count is non-zero, it is
2021-08-10 14:27:03 -05:00
* incremented and return is immediate . Otherwise the IPA clock is
* enabled .
2020-03-05 22:28:19 -06:00
*/
net: ipa: have ipa_clock_get() return a value
We currently assume no errors occur when enabling or disabling the
IPA core clock and interconnects. And although this commit exposes
errors that could occur, we generally assume this won't happen in
practice.
This commit changes ipa_clock_get() and ipa_clock_put() so each
returns a value. The values returned are meant to mimic what the
runtime power management functions return, so we can set up error
handling here before we make the switch. Have ipa_clock_get()
increment the reference count even if it returns an error, to match
the behavior of pm_runtime_get().
More details follow.
When taking a reference in ipa_clock_get(), return 0 for the first
reference, 1 for subsequent references, or a negative error code if
an error occurs. Note that if ipa_clock_get() returns an error, we
must not touch hardware; in some cases such errors now cause entire
blocks of code to be skipped.
When dropping a reference in ipa_clock_put(), we return 0 or an
error code. The error would come from ipa_clock_disable(), which
now returns what ipa_interconnect_disable() returns (either 0 or a
negative error code). For now, callers ignore the return value;
if an error occurs, a message will have already been logged, and
little more can actually be done to improve the situation.
Signed-off-by: Alex Elder <elder@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-10 14:26:58 -05:00
int ipa_clock_get ( struct ipa * ipa )
2020-03-05 22:28:19 -06:00
{
2021-08-10 14:27:03 -05:00
return pm_runtime_get_sync ( & ipa - > pdev - > dev ) ;
2020-03-05 22:28:19 -06:00
}
2020-09-17 12:39:22 -05:00
/* Attempt to remove an IPA clock reference. If this represents the
2021-08-10 14:27:03 -05:00
* last reference , disable the IPA clock .
2020-03-05 22:28:19 -06:00
*/
net: ipa: have ipa_clock_get() return a value
We currently assume no errors occur when enabling or disabling the
IPA core clock and interconnects. And although this commit exposes
errors that could occur, we generally assume this won't happen in
practice.
This commit changes ipa_clock_get() and ipa_clock_put() so each
returns a value. The values returned are meant to mimic what the
runtime power management functions return, so we can set up error
handling here before we make the switch. Have ipa_clock_get()
increment the reference count even if it returns an error, to match
the behavior of pm_runtime_get().
More details follow.
When taking a reference in ipa_clock_get(), return 0 for the first
reference, 1 for subsequent references, or a negative error code if
an error occurs. Note that if ipa_clock_get() returns an error, we
must not touch hardware; in some cases such errors now cause entire
blocks of code to be skipped.
When dropping a reference in ipa_clock_put(), we return 0 or an
error code. The error would come from ipa_clock_disable(), which
now returns what ipa_interconnect_disable() returns (either 0 or a
negative error code). For now, callers ignore the return value;
if an error occurs, a message will have already been logged, and
little more can actually be done to improve the situation.
Signed-off-by: Alex Elder <elder@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-10 14:26:58 -05:00
int ipa_clock_put ( struct ipa * ipa )
2020-03-05 22:28:19 -06:00
{
2021-08-10 14:27:03 -05:00
return pm_runtime_put ( & ipa - > pdev - > dev ) ;
2020-03-05 22:28:19 -06:00
}
2020-07-03 16:23:34 -05:00
/* Return the current IPA core clock rate */
u32 ipa_clock_rate ( struct ipa * ipa )
{
return ipa - > clock ? ( u32 ) clk_get_rate ( ipa - > clock - > core ) : 0 ;
}
2021-08-04 10:36:25 -05:00
/**
* ipa_suspend_handler ( ) - Handle the suspend IPA interrupt
* @ ipa : IPA pointer
* @ irq_id : IPA interrupt type ( unused )
*
* If an RX endpoint is suspended , and the IPA has a packet destined for
* that endpoint , the IPA generates a SUSPEND interrupt to inform the AP
* that it should resume the endpoint . If we get one of these interrupts
* we just wake up the system .
*/
static void ipa_suspend_handler ( struct ipa * ipa , enum ipa_irq_id irq_id )
{
/* Just report the event, and let system resume handle the rest.
* More than one endpoint could signal this ; if so , ignore
* all but the first .
*/
2021-08-04 10:36:26 -05:00
if ( ! test_and_set_bit ( IPA_POWER_FLAG_RESUMED , ipa - > clock - > flags ) )
2021-08-04 10:36:25 -05:00
pm_wakeup_dev_event ( & ipa - > pdev - > dev , 0 , true ) ;
/* Acknowledge/clear the suspend interrupt on all endpoints */
ipa_interrupt_suspend_clear_all ( ipa - > interrupt ) ;
}
2021-08-12 14:50:30 -05:00
int ipa_power_setup ( struct ipa * ipa )
2021-08-04 10:36:25 -05:00
{
2021-08-12 14:50:30 -05:00
int ret ;
2021-08-04 10:36:25 -05:00
ipa_interrupt_add ( ipa - > interrupt , IPA_IRQ_TX_SUSPEND ,
ipa_suspend_handler ) ;
2021-08-12 14:50:30 -05:00
ret = device_init_wakeup ( & ipa - > pdev - > dev , true ) ;
if ( ret )
ipa_interrupt_remove ( ipa - > interrupt , IPA_IRQ_TX_SUSPEND ) ;
return ret ;
2021-08-04 10:36:25 -05:00
}
void ipa_power_teardown ( struct ipa * ipa )
{
2021-08-12 14:50:30 -05:00
( void ) device_init_wakeup ( & ipa - > pdev - > dev , false ) ;
2021-08-04 10:36:25 -05:00
ipa_interrupt_remove ( ipa - > interrupt , IPA_IRQ_TX_SUSPEND ) ;
}
2020-03-05 22:28:19 -06:00
/* Initialize IPA clocking */
2020-11-19 16:40:39 -06:00
struct ipa_clock *
ipa_clock_init ( struct device * dev , const struct ipa_clock_data * data )
2020-03-05 22:28:19 -06:00
{
struct ipa_clock * clock ;
struct clk * clk ;
int ret ;
clk = clk_get ( dev , " core " ) ;
if ( IS_ERR ( clk ) ) {
2021-02-12 08:33:59 -06:00
dev_err_probe ( dev , PTR_ERR ( clk ) , " error getting core clock \n " ) ;
2020-03-05 22:28:19 -06:00
return ERR_CAST ( clk ) ;
}
2020-11-19 16:40:41 -06:00
ret = clk_set_rate ( clk , data - > core_clock_rate ) ;
2020-03-05 22:28:19 -06:00
if ( ret ) {
2020-11-19 16:40:41 -06:00
dev_err ( dev , " error %d setting core clock rate to %u \n " ,
ret , data - > core_clock_rate ) ;
2020-03-05 22:28:19 -06:00
goto err_clk_put ;
}
clock = kzalloc ( sizeof ( * clock ) , GFP_KERNEL ) ;
if ( ! clock ) {
ret = - ENOMEM ;
goto err_clk_put ;
}
2021-08-10 14:27:01 -05:00
clock - > dev = dev ;
2020-03-05 22:28:19 -06:00
clock - > core = clk ;
2021-01-15 06:50:50 -06:00
clock - > interconnect_count = data - > interconnect_count ;
2020-03-05 22:28:19 -06:00
2021-01-15 06:50:50 -06:00
ret = ipa_interconnect_init ( clock , dev , data - > interconnect_data ) ;
2020-03-05 22:28:19 -06:00
if ( ret )
goto err_kfree ;
2021-08-10 14:27:01 -05:00
pm_runtime_dont_use_autosuspend ( dev ) ;
pm_runtime_enable ( dev ) ;
2020-03-05 22:28:19 -06:00
return clock ;
err_kfree :
kfree ( clock ) ;
err_clk_put :
clk_put ( clk ) ;
return ERR_PTR ( ret ) ;
}
/* Inverse of ipa_clock_init() */
void ipa_clock_exit ( struct ipa_clock * clock )
{
struct clk * clk = clock - > core ;
2021-08-10 14:27:01 -05:00
pm_runtime_disable ( clock - > dev ) ;
2020-03-05 22:28:19 -06:00
ipa_interconnect_exit ( clock ) ;
kfree ( clock ) ;
clk_put ( clk ) ;
}
2021-08-04 10:36:24 -05:00
const struct dev_pm_ops ipa_pm_ops = {
2021-08-10 14:27:01 -05:00
. suspend = pm_runtime_force_suspend ,
. resume = pm_runtime_force_resume ,
. runtime_suspend = ipa_runtime_suspend ,
. runtime_resume = ipa_runtime_resume ,
. runtime_idle = ipa_runtime_idle ,
2021-08-04 10:36:24 -05:00
} ;