2019-05-28 20:10:04 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2014-11-24 15:28:17 +03:00
/*
* A devfreq driver for NVIDIA Tegra SoCs
*
* Copyright ( c ) 2014 NVIDIA CORPORATION . All rights reserved .
* Copyright ( C ) 2014 Google , Inc
*/
# include <linux/clk.h>
# include <linux/cpufreq.h>
# include <linux/devfreq.h>
# include <linux/interrupt.h>
# include <linux/io.h>
2019-11-05 00:56:00 +03:00
# include <linux/irq.h>
2014-11-24 15:28:17 +03:00
# include <linux/module.h>
2019-11-05 00:56:10 +03:00
# include <linux/of_device.h>
2014-11-24 15:28:17 +03:00
# include <linux/platform_device.h>
# include <linux/pm_opp.h>
# include <linux/reset.h>
2019-11-05 00:56:05 +03:00
# include <linux/workqueue.h>
2014-11-24 15:28:17 +03:00
# include "governor.h"
# define ACTMON_GLB_STATUS 0x0
# define ACTMON_GLB_PERIOD_CTRL 0x4
# define ACTMON_DEV_CTRL 0x0
# define ACTMON_DEV_CTRL_K_VAL_SHIFT 10
# define ACTMON_DEV_CTRL_ENB_PERIODIC BIT(18)
# define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN BIT(20)
# define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN BIT(21)
# define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT 23
# define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT 26
# define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN BIT(29)
# define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN BIT(30)
# define ACTMON_DEV_CTRL_ENB BIT(31)
2019-11-05 00:56:05 +03:00
# define ACTMON_DEV_CTRL_STOP 0x00000000
2014-11-24 15:28:17 +03:00
# define ACTMON_DEV_UPPER_WMARK 0x4
# define ACTMON_DEV_LOWER_WMARK 0x8
# define ACTMON_DEV_INIT_AVG 0xc
# define ACTMON_DEV_AVG_UPPER_WMARK 0x10
# define ACTMON_DEV_AVG_LOWER_WMARK 0x14
# define ACTMON_DEV_COUNT_WEIGHT 0x18
# define ACTMON_DEV_AVG_COUNT 0x20
# define ACTMON_DEV_INTR_STATUS 0x24
# define ACTMON_INTR_STATUS_CLEAR 0xffffffff
# define ACTMON_DEV_INTR_CONSECUTIVE_UPPER BIT(31)
# define ACTMON_DEV_INTR_CONSECUTIVE_LOWER BIT(30)
# define ACTMON_ABOVE_WMARK_WINDOW 1
# define ACTMON_BELOW_WMARK_WINDOW 3
# define ACTMON_BOOST_FREQ_STEP 16000
2015-03-17 12:36:12 +03:00
/*
* Activity counter is incremented every 256 memory transactions , and each
2014-11-24 15:28:17 +03:00
* transaction takes 4 EMC clocks for Tegra124 ; So the COUNT_WEIGHT is
* 4 * 256 = 1024.
*/
# define ACTMON_COUNT_WEIGHT 0x400
/*
* ACTMON_AVERAGE_WINDOW_LOG2 : default value for @ DEV_CTRL_K_VAL , which
* translates to 2 ^ ( K_VAL + 1 ) . ex : 2 ^ ( 6 + 1 ) = 128
*/
# define ACTMON_AVERAGE_WINDOW_LOG2 6
# define ACTMON_SAMPLING_PERIOD 12 /* ms */
# define ACTMON_DEFAULT_AVG_BAND 6 /* 1/10 of % */
# define KHZ 1000
2019-11-05 00:56:03 +03:00
# define KHZ_MAX (ULONG_MAX / KHZ)
2014-11-24 15:28:17 +03:00
/* Assume that the bus is saturated if the utilization is 25% */
# define BUS_SATURATION_RATIO 25
/**
* struct tegra_devfreq_device_config - configuration specific to an ACTMON
* device
*
2015-03-17 12:36:12 +03:00
* Coefficients and thresholds are percentages unless otherwise noted
2014-11-24 15:28:17 +03:00
*/
struct tegra_devfreq_device_config {
u32 offset ;
u32 irq_mask ;
2015-03-17 12:36:12 +03:00
/* Factors applied to boost_freq every consecutive watermark breach */
2014-11-24 15:28:17 +03:00
unsigned int boost_up_coeff ;
unsigned int boost_down_coeff ;
2015-03-17 12:36:12 +03:00
/* Define the watermark bounds when applied to the current avg */
2014-11-24 15:28:17 +03:00
unsigned int boost_up_threshold ;
unsigned int boost_down_threshold ;
2015-03-17 12:36:12 +03:00
/*
2019-11-05 00:56:13 +03:00
* Threshold of activity ( cycles translated to kHz ) below which the
* CPU frequency isn ' t to be taken into account . This is to avoid
* increasing the EMC frequency when the CPU is very busy but not
* accessing the bus often .
2015-03-17 12:36:12 +03:00
*/
2014-11-24 15:28:17 +03:00
u32 avg_dependency_threshold ;
} ;
enum tegra_actmon_device {
MCALL = 0 ,
MCCPU ,
} ;
2019-11-05 00:56:09 +03:00
static const struct tegra_devfreq_device_config actmon_device_configs [ ] = {
2014-11-24 15:28:17 +03:00
{
2015-03-17 12:36:12 +03:00
/* MCALL: All memory accesses (including from the CPUs) */
2014-11-24 15:28:17 +03:00
. offset = 0x1c0 ,
. irq_mask = 1 < < 26 ,
. boost_up_coeff = 200 ,
. boost_down_coeff = 50 ,
. boost_up_threshold = 60 ,
. boost_down_threshold = 40 ,
} ,
{
2015-03-17 12:36:12 +03:00
/* MCCPU: memory accesses from the CPUs */
2014-11-24 15:28:17 +03:00
. offset = 0x200 ,
. irq_mask = 1 < < 25 ,
. boost_up_coeff = 800 ,
2019-11-05 00:56:16 +03:00
. boost_down_coeff = 40 ,
2014-11-24 15:28:17 +03:00
. boost_up_threshold = 27 ,
. boost_down_threshold = 10 ,
2019-11-05 00:56:13 +03:00
. avg_dependency_threshold = 16000 , /* 16MHz in kHz units */
2014-11-24 15:28:17 +03:00
} ,
} ;
/**
* struct tegra_devfreq_device - state specific to an ACTMON device
*
* Frequencies are in kHz .
*/
struct tegra_devfreq_device {
const struct tegra_devfreq_device_config * config ;
2015-03-17 12:36:12 +03:00
void __iomem * regs ;
2014-11-24 15:28:17 +03:00
2015-03-17 12:36:12 +03:00
/* Average event count sampled in the last interrupt */
u32 avg_count ;
2014-11-24 15:28:17 +03:00
2015-03-17 12:36:12 +03:00
/*
* Extra frequency to increase the target by due to consecutive
* watermark breaches .
*/
unsigned long boost_freq ;
/* Optimal frequency calculated from the stats for this device */
unsigned long target_freq ;
2014-11-24 15:28:17 +03:00
} ;
struct tegra_devfreq {
struct devfreq * devfreq ;
struct reset_control * reset ;
struct clk * clock ;
void __iomem * regs ;
struct clk * emc_clock ;
unsigned long max_freq ;
unsigned long cur_freq ;
2019-11-05 00:56:05 +03:00
struct notifier_block clk_rate_change_nb ;
struct delayed_work cpufreq_update_work ;
struct notifier_block cpu_rate_change_nb ;
2014-11-24 15:28:17 +03:00
struct tegra_devfreq_device devices [ ARRAY_SIZE ( actmon_device_configs ) ] ;
2019-05-02 02:38:06 +03:00
2019-11-05 00:55:59 +03:00
unsigned int irq ;
2019-11-05 00:56:15 +03:00
bool started ;
2014-11-24 15:28:17 +03:00
} ;
struct tegra_actmon_emc_ratio {
unsigned long cpu_freq ;
unsigned long emc_freq ;
} ;
2019-11-05 00:56:09 +03:00
static const struct tegra_actmon_emc_ratio actmon_emc_ratios [ ] = {
2019-11-05 00:56:03 +03:00
{ 1400000 , KHZ_MAX } ,
2014-11-24 15:28:17 +03:00
{ 1200000 , 750000 } ,
{ 1100000 , 600000 } ,
{ 1000000 , 500000 } ,
{ 800000 , 375000 } ,
{ 500000 , 200000 } ,
{ 250000 , 100000 } ,
} ;
2015-03-17 12:36:12 +03:00
static u32 actmon_readl ( struct tegra_devfreq * tegra , u32 offset )
{
2019-05-02 02:38:01 +03:00
return readl_relaxed ( tegra - > regs + offset ) ;
2015-03-17 12:36:12 +03:00
}
static void actmon_writel ( struct tegra_devfreq * tegra , u32 val , u32 offset )
{
2019-05-02 02:38:01 +03:00
writel_relaxed ( val , tegra - > regs + offset ) ;
2015-03-17 12:36:12 +03:00
}
static u32 device_readl ( struct tegra_devfreq_device * dev , u32 offset )
{
2019-05-02 02:38:01 +03:00
return readl_relaxed ( dev - > regs + offset ) ;
2015-03-17 12:36:12 +03:00
}
static void device_writel ( struct tegra_devfreq_device * dev , u32 val ,
u32 offset )
{
2019-05-02 02:38:01 +03:00
writel_relaxed ( val , dev - > regs + offset ) ;
2015-03-17 12:36:12 +03:00
}
2019-11-05 00:56:15 +03:00
static unsigned long do_percent ( unsigned long long val , unsigned int pct )
2014-11-24 15:28:17 +03:00
{
2019-11-05 00:56:15 +03:00
val = val * pct ;
do_div ( val , 100 ) ;
/*
* High freq + high boosting percent + large polling interval are
* resulting in integer overflow when watermarks are calculated .
*/
return min_t ( u64 , val , U32_MAX ) ;
2014-11-24 15:28:17 +03:00
}
2015-03-17 12:36:12 +03:00
static void tegra_devfreq_update_avg_wmark ( struct tegra_devfreq * tegra ,
struct tegra_devfreq_device * dev )
2014-11-24 15:28:17 +03:00
{
2015-03-17 12:36:12 +03:00
u32 avg_band_freq = tegra - > max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ ;
2019-11-05 00:56:15 +03:00
u32 band = avg_band_freq * tegra - > devfreq - > profile - > polling_ms ;
u32 avg ;
2015-03-17 12:36:12 +03:00
2019-11-05 00:56:15 +03:00
avg = min ( dev - > avg_count , U32_MAX - band ) ;
2015-03-17 12:36:12 +03:00
device_writel ( dev , avg + band , ACTMON_DEV_AVG_UPPER_WMARK ) ;
2014-11-24 15:28:17 +03:00
2015-03-17 12:36:12 +03:00
avg = max ( dev - > avg_count , band ) ;
device_writel ( dev , avg - band , ACTMON_DEV_AVG_LOWER_WMARK ) ;
2014-11-24 15:28:17 +03:00
}
static void tegra_devfreq_update_wmark ( struct tegra_devfreq * tegra ,
struct tegra_devfreq_device * dev )
{
2019-11-05 00:56:15 +03:00
u32 val = tegra - > cur_freq * tegra - > devfreq - > profile - > polling_ms ;
2014-11-24 15:28:17 +03:00
2015-03-17 12:36:12 +03:00
device_writel ( dev , do_percent ( val , dev - > config - > boost_up_threshold ) ,
ACTMON_DEV_UPPER_WMARK ) ;
2014-11-24 15:28:17 +03:00
2015-03-17 12:36:12 +03:00
device_writel ( dev , do_percent ( val , dev - > config - > boost_down_threshold ) ,
ACTMON_DEV_LOWER_WMARK ) ;
2014-11-24 15:28:17 +03:00
}
2015-03-17 12:36:12 +03:00
static void actmon_isr_device ( struct tegra_devfreq * tegra ,
struct tegra_devfreq_device * dev )
2014-11-24 15:28:17 +03:00
{
2015-03-17 12:36:12 +03:00
u32 intr_status , dev_ctrl ;
2014-11-24 15:28:17 +03:00
2015-03-17 12:36:12 +03:00
dev - > avg_count = device_readl ( dev , ACTMON_DEV_AVG_COUNT ) ;
tegra_devfreq_update_avg_wmark ( tegra , dev ) ;
2014-11-24 15:28:17 +03:00
2015-03-17 12:36:12 +03:00
intr_status = device_readl ( dev , ACTMON_DEV_INTR_STATUS ) ;
dev_ctrl = device_readl ( dev , ACTMON_DEV_CTRL ) ;
2014-11-24 15:28:17 +03:00
2015-03-17 12:36:12 +03:00
if ( intr_status & ACTMON_DEV_INTR_CONSECUTIVE_UPPER ) {
2014-11-24 15:28:17 +03:00
/*
* new_boost = min ( old_boost * up_coef + step , max_freq )
*/
dev - > boost_freq = do_percent ( dev - > boost_freq ,
dev - > config - > boost_up_coeff ) ;
dev - > boost_freq + = ACTMON_BOOST_FREQ_STEP ;
2015-03-17 12:36:12 +03:00
dev_ctrl | = ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN ;
2019-11-05 00:56:12 +03:00
if ( dev - > boost_freq > = tegra - > max_freq ) {
dev_ctrl & = ~ ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN ;
2015-03-17 12:36:12 +03:00
dev - > boost_freq = tegra - > max_freq ;
2019-11-05 00:56:12 +03:00
}
2015-03-17 12:36:12 +03:00
} else if ( intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER ) {
2014-11-24 15:28:17 +03:00
/*
* new_boost = old_boost * down_coef
* or 0 if ( old_boost * down_coef < step / 2 )
*/
dev - > boost_freq = do_percent ( dev - > boost_freq ,
dev - > config - > boost_down_coeff ) ;
2015-03-17 12:36:12 +03:00
dev_ctrl | = ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN ;
2019-11-05 00:56:12 +03:00
if ( dev - > boost_freq < ( ACTMON_BOOST_FREQ_STEP > > 1 ) ) {
2015-03-17 12:36:12 +03:00
dev_ctrl & = ~ ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN ;
2019-11-05 00:56:12 +03:00
dev - > boost_freq = 0 ;
}
2014-11-24 15:28:17 +03:00
}
2015-03-17 12:36:12 +03:00
device_writel ( dev , dev_ctrl , ACTMON_DEV_CTRL ) ;
device_writel ( dev , ACTMON_INTR_STATUS_CLEAR , ACTMON_DEV_INTR_STATUS ) ;
2014-11-24 15:28:17 +03:00
}
static unsigned long actmon_cpu_to_emc_rate ( struct tegra_devfreq * tegra ,
unsigned long cpu_freq )
{
unsigned int i ;
2019-11-05 00:56:09 +03:00
const struct tegra_actmon_emc_ratio * ratio = actmon_emc_ratios ;
2014-11-24 15:28:17 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( actmon_emc_ratios ) ; i + + , ratio + + ) {
if ( cpu_freq > = ratio - > cpu_freq ) {
if ( ratio - > emc_freq > = tegra - > max_freq )
return tegra - > max_freq ;
else
return ratio - > emc_freq ;
}
}
return 0 ;
}
2019-11-05 00:56:05 +03:00
static unsigned long actmon_device_target_freq ( struct tegra_devfreq * tegra ,
struct tegra_devfreq_device * dev )
{
unsigned int avg_sustain_coef ;
unsigned long target_freq ;
2019-11-05 00:56:15 +03:00
target_freq = dev - > avg_count / tegra - > devfreq - > profile - > polling_ms ;
2019-11-05 00:56:05 +03:00
avg_sustain_coef = 100 * 100 / dev - > config - > boost_up_threshold ;
target_freq = do_percent ( target_freq , avg_sustain_coef ) ;
return target_freq ;
}
2014-11-24 15:28:17 +03:00
static void actmon_update_target ( struct tegra_devfreq * tegra ,
struct tegra_devfreq_device * dev )
{
unsigned long cpu_freq = 0 ;
unsigned long static_cpu_emc_freq = 0 ;
2019-11-05 00:56:13 +03:00
dev - > target_freq = actmon_device_target_freq ( tegra , dev ) ;
if ( dev - > config - > avg_dependency_threshold & &
dev - > config - > avg_dependency_threshold < = dev - > target_freq ) {
2019-11-05 00:56:05 +03:00
cpu_freq = cpufreq_quick_get ( 0 ) ;
2014-11-24 15:28:17 +03:00
static_cpu_emc_freq = actmon_cpu_to_emc_rate ( tegra , cpu_freq ) ;
2019-11-05 00:56:13 +03:00
dev - > target_freq + = dev - > boost_freq ;
2014-11-24 15:28:17 +03:00
dev - > target_freq = max ( dev - > target_freq , static_cpu_emc_freq ) ;
2019-11-05 00:56:13 +03:00
} else {
dev - > target_freq + = dev - > boost_freq ;
}
2014-11-24 15:28:17 +03:00
}
static irqreturn_t actmon_thread_isr ( int irq , void * data )
{
struct tegra_devfreq * tegra = data ;
2019-05-02 02:38:05 +03:00
bool handled = false ;
unsigned int i ;
u32 val ;
2014-11-24 15:28:17 +03:00
mutex_lock ( & tegra - > devfreq - > lock ) ;
2019-05-02 02:38:05 +03:00
val = actmon_readl ( tegra , ACTMON_GLB_STATUS ) ;
for ( i = 0 ; i < ARRAY_SIZE ( tegra - > devices ) ; i + + ) {
if ( val & tegra - > devices [ i ] . config - > irq_mask ) {
actmon_isr_device ( tegra , tegra - > devices + i ) ;
handled = true ;
}
}
if ( handled )
update_devfreq ( tegra - > devfreq ) ;
2014-11-24 15:28:17 +03:00
mutex_unlock ( & tegra - > devfreq - > lock ) ;
2019-05-02 02:38:05 +03:00
return handled ? IRQ_HANDLED : IRQ_NONE ;
2014-11-24 15:28:17 +03:00
}
2019-11-05 00:56:05 +03:00
static int tegra_actmon_clk_notify_cb ( struct notifier_block * nb ,
unsigned long action , void * ptr )
2014-11-24 15:28:17 +03:00
{
struct clk_notifier_data * data = ptr ;
2015-03-17 12:36:12 +03:00
struct tegra_devfreq * tegra ;
struct tegra_devfreq_device * dev ;
2014-11-24 15:28:17 +03:00
unsigned int i ;
2015-03-17 12:36:12 +03:00
if ( action ! = POST_RATE_CHANGE )
return NOTIFY_OK ;
2014-11-24 15:28:17 +03:00
2019-11-05 00:56:05 +03:00
tegra = container_of ( nb , struct tegra_devfreq , clk_rate_change_nb ) ;
2014-11-24 15:28:17 +03:00
2015-03-17 12:36:12 +03:00
tegra - > cur_freq = data - > new_rate / KHZ ;
2014-11-24 15:28:17 +03:00
2015-03-17 12:36:12 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( tegra - > devices ) ; i + + ) {
dev = & tegra - > devices [ i ] ;
tegra_devfreq_update_wmark ( tegra , dev ) ;
}
2014-11-24 15:28:17 +03:00
return NOTIFY_OK ;
}
2019-11-05 00:56:05 +03:00
static void tegra_actmon_delayed_update ( struct work_struct * work )
{
struct tegra_devfreq * tegra = container_of ( work , struct tegra_devfreq ,
cpufreq_update_work . work ) ;
mutex_lock ( & tegra - > devfreq - > lock ) ;
update_devfreq ( tegra - > devfreq ) ;
mutex_unlock ( & tegra - > devfreq - > lock ) ;
}
static unsigned long
tegra_actmon_cpufreq_contribution ( struct tegra_devfreq * tegra ,
unsigned int cpu_freq )
{
2019-11-05 00:56:13 +03:00
struct tegra_devfreq_device * actmon_dev = & tegra - > devices [ MCCPU ] ;
2019-11-05 00:56:05 +03:00
unsigned long static_cpu_emc_freq , dev_freq ;
2019-11-05 00:56:13 +03:00
dev_freq = actmon_device_target_freq ( tegra , actmon_dev ) ;
2019-11-05 00:56:05 +03:00
/* check whether CPU's freq is taken into account at all */
2019-11-05 00:56:13 +03:00
if ( dev_freq < actmon_dev - > config - > avg_dependency_threshold )
2019-11-05 00:56:05 +03:00
return 0 ;
static_cpu_emc_freq = actmon_cpu_to_emc_rate ( tegra , cpu_freq ) ;
2020-04-03 01:24:48 +03:00
if ( dev_freq + actmon_dev - > boost_freq > = static_cpu_emc_freq )
2019-11-05 00:56:05 +03:00
return 0 ;
return static_cpu_emc_freq ;
}
static int tegra_actmon_cpu_notify_cb ( struct notifier_block * nb ,
unsigned long action , void * ptr )
{
struct cpufreq_freqs * freqs = ptr ;
struct tegra_devfreq * tegra ;
unsigned long old , new , delay ;
if ( action ! = CPUFREQ_POSTCHANGE )
return NOTIFY_OK ;
tegra = container_of ( nb , struct tegra_devfreq , cpu_rate_change_nb ) ;
/*
* Quickly check whether CPU frequency should be taken into account
* at all , without blocking CPUFreq ' s core .
*/
if ( mutex_trylock ( & tegra - > devfreq - > lock ) ) {
old = tegra_actmon_cpufreq_contribution ( tegra , freqs - > old ) ;
new = tegra_actmon_cpufreq_contribution ( tegra , freqs - > new ) ;
mutex_unlock ( & tegra - > devfreq - > lock ) ;
/*
* If CPU ' s frequency shouldn ' t be taken into account at
* the moment , then there is no need to update the devfreq ' s
* state because ISR will re - check CPU ' s frequency on the
* next interrupt .
*/
if ( old = = new )
return NOTIFY_OK ;
}
/*
* CPUFreq driver should support CPUFREQ_ASYNC_NOTIFICATION in order
* to allow asynchronous notifications . This means we can ' t block
* here for too long , otherwise CPUFreq ' s core will complain with a
* warning splat .
*/
delay = msecs_to_jiffies ( ACTMON_SAMPLING_PERIOD ) ;
schedule_delayed_work ( & tegra - > cpufreq_update_work , delay ) ;
return NOTIFY_OK ;
}
2015-03-17 12:36:12 +03:00
static void tegra_actmon_configure_device ( struct tegra_devfreq * tegra ,
struct tegra_devfreq_device * dev )
2014-11-24 15:28:17 +03:00
{
2015-03-17 12:36:12 +03:00
u32 val = 0 ;
2014-11-24 15:28:17 +03:00
2019-11-05 00:56:07 +03:00
/* reset boosting on governor's restart */
dev - > boost_freq = 0 ;
2015-03-17 12:36:12 +03:00
dev - > target_freq = tegra - > cur_freq ;
2014-11-24 15:28:17 +03:00
2019-11-05 00:56:15 +03:00
dev - > avg_count = tegra - > cur_freq * tegra - > devfreq - > profile - > polling_ms ;
2015-03-17 12:36:12 +03:00
device_writel ( dev , dev - > avg_count , ACTMON_DEV_INIT_AVG ) ;
2014-11-24 15:28:17 +03:00
2015-03-17 12:36:12 +03:00
tegra_devfreq_update_avg_wmark ( tegra , dev ) ;
tegra_devfreq_update_wmark ( tegra , dev ) ;
2014-11-24 15:28:17 +03:00
2015-03-17 12:36:12 +03:00
device_writel ( dev , ACTMON_COUNT_WEIGHT , ACTMON_DEV_COUNT_WEIGHT ) ;
device_writel ( dev , ACTMON_INTR_STATUS_CLEAR , ACTMON_DEV_INTR_STATUS ) ;
val | = ACTMON_DEV_CTRL_ENB_PERIODIC ;
val | = ( ACTMON_AVERAGE_WINDOW_LOG2 - 1 )
< < ACTMON_DEV_CTRL_K_VAL_SHIFT ;
val | = ( ACTMON_BELOW_WMARK_WINDOW - 1 )
< < ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT ;
val | = ( ACTMON_ABOVE_WMARK_WINDOW - 1 )
< < ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT ;
2019-05-02 02:38:11 +03:00
val | = ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN ;
val | = ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN ;
val | = ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN ;
2015-03-17 12:36:12 +03:00
val | = ACTMON_DEV_CTRL_ENB ;
device_writel ( dev , val , ACTMON_DEV_CTRL ) ;
2019-05-02 02:38:11 +03:00
}
2019-11-05 00:56:05 +03:00
static void tegra_actmon_stop_devices ( struct tegra_devfreq * tegra )
{
struct tegra_devfreq_device * dev = tegra - > devices ;
unsigned int i ;
for ( i = 0 ; i < ARRAY_SIZE ( tegra - > devices ) ; i + + , dev + + ) {
device_writel ( dev , ACTMON_DEV_CTRL_STOP , ACTMON_DEV_CTRL ) ;
device_writel ( dev , ACTMON_INTR_STATUS_CLEAR ,
ACTMON_DEV_INTR_STATUS ) ;
}
}
2019-11-05 00:56:15 +03:00
static int tegra_actmon_resume ( struct tegra_devfreq * tegra )
2019-05-02 02:38:11 +03:00
{
unsigned int i ;
2019-11-05 00:56:05 +03:00
int err ;
2019-05-02 02:38:11 +03:00
2019-11-05 00:56:15 +03:00
if ( ! tegra - > devfreq - > profile - > polling_ms | | ! tegra - > started )
return 0 ;
actmon_writel ( tegra , tegra - > devfreq - > profile - > polling_ms - 1 ,
2019-05-02 02:38:11 +03:00
ACTMON_GLB_PERIOD_CTRL ) ;
2019-11-05 00:56:06 +03:00
/*
* CLK notifications are needed in order to reconfigure the upper
* consecutive watermark in accordance to the actual clock rate
* to avoid unnecessary upper interrupts .
*/
err = clk_notifier_register ( tegra - > emc_clock ,
& tegra - > clk_rate_change_nb ) ;
if ( err ) {
dev_err ( tegra - > devfreq - > dev . parent ,
" Failed to register rate change notifier \n " ) ;
return err ;
}
tegra - > cur_freq = clk_get_rate ( tegra - > emc_clock ) / KHZ ;
2019-05-02 02:38:11 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( tegra - > devices ) ; i + + )
tegra_actmon_configure_device ( tegra , & tegra - > devices [ i ] ) ;
2019-11-05 00:56:05 +03:00
/*
* We are estimating CPU ' s memory bandwidth requirement based on
* amount of memory accesses and system ' s load , judging by CPU ' s
* frequency . We also don ' t want to receive events about CPU ' s
* frequency transaction when governor is stopped , hence notifier
* is registered dynamically .
*/
err = cpufreq_register_notifier ( & tegra - > cpu_rate_change_nb ,
CPUFREQ_TRANSITION_NOTIFIER ) ;
if ( err ) {
dev_err ( tegra - > devfreq - > dev . parent ,
" Failed to register rate change notifier: %d \n " , err ) ;
goto err_stop ;
}
2019-05-02 02:38:11 +03:00
enable_irq ( tegra - > irq ) ;
2019-11-05 00:56:05 +03:00
return 0 ;
err_stop :
tegra_actmon_stop_devices ( tegra ) ;
2019-11-05 00:56:06 +03:00
clk_notifier_unregister ( tegra - > emc_clock , & tegra - > clk_rate_change_nb ) ;
2019-11-05 00:56:05 +03:00
return err ;
2019-05-02 02:38:11 +03:00
}
2019-11-05 00:56:15 +03:00
static int tegra_actmon_start ( struct tegra_devfreq * tegra )
2019-05-02 02:38:11 +03:00
{
2019-11-05 00:56:15 +03:00
int ret = 0 ;
if ( ! tegra - > started ) {
tegra - > started = true ;
ret = tegra_actmon_resume ( tegra ) ;
if ( ret )
tegra - > started = false ;
}
return ret ;
}
static void tegra_actmon_pause ( struct tegra_devfreq * tegra )
{
if ( ! tegra - > devfreq - > profile - > polling_ms | | ! tegra - > started )
return ;
2019-05-02 02:38:11 +03:00
disable_irq ( tegra - > irq ) ;
2019-11-05 00:56:05 +03:00
cpufreq_unregister_notifier ( & tegra - > cpu_rate_change_nb ,
CPUFREQ_TRANSITION_NOTIFIER ) ;
cancel_delayed_work_sync ( & tegra - > cpufreq_update_work ) ;
tegra_actmon_stop_devices ( tegra ) ;
2019-11-05 00:56:06 +03:00
clk_notifier_unregister ( tegra - > emc_clock , & tegra - > clk_rate_change_nb ) ;
2014-11-24 15:28:17 +03:00
}
2019-11-05 00:56:15 +03:00
static void tegra_actmon_stop ( struct tegra_devfreq * tegra )
{
tegra_actmon_pause ( tegra ) ;
tegra - > started = false ;
}
2014-11-24 15:28:17 +03:00
static int tegra_devfreq_target ( struct device * dev , unsigned long * freq ,
u32 flags )
{
2015-03-17 12:36:12 +03:00
struct tegra_devfreq * tegra = dev_get_drvdata ( dev ) ;
2019-05-02 02:38:03 +03:00
struct devfreq * devfreq = tegra - > devfreq ;
2014-11-24 15:28:17 +03:00
struct dev_pm_opp * opp ;
2019-05-02 02:38:00 +03:00
unsigned long rate ;
2019-05-02 02:38:03 +03:00
int err ;
2014-11-24 15:28:17 +03:00
2019-05-02 02:38:00 +03:00
opp = devfreq_recommended_opp ( dev , freq , flags ) ;
2014-11-24 15:28:17 +03:00
if ( IS_ERR ( opp ) ) {
2019-05-02 02:38:00 +03:00
dev_err ( dev , " Failed to find opp for %lu Hz \n " , * freq ) ;
2014-11-24 15:28:17 +03:00
return PTR_ERR ( opp ) ;
}
rate = dev_pm_opp_get_freq ( opp ) ;
2017-01-23 07:41:47 +03:00
dev_pm_opp_put ( opp ) ;
2014-11-24 15:28:17 +03:00
2019-11-05 00:56:04 +03:00
err = clk_set_min_rate ( tegra - > emc_clock , rate * KHZ ) ;
2019-05-02 02:38:03 +03:00
if ( err )
return err ;
err = clk_set_rate ( tegra - > emc_clock , 0 ) ;
if ( err )
goto restore_min_rate ;
2014-11-24 15:28:17 +03:00
return 0 ;
2019-05-02 02:38:03 +03:00
restore_min_rate :
clk_set_min_rate ( tegra - > emc_clock , devfreq - > previous_freq ) ;
return err ;
2014-11-24 15:28:17 +03:00
}
static int tegra_devfreq_get_dev_status ( struct device * dev ,
struct devfreq_dev_status * stat )
{
2015-03-17 12:36:12 +03:00
struct tegra_devfreq * tegra = dev_get_drvdata ( dev ) ;
2014-11-24 15:28:17 +03:00
struct tegra_devfreq_device * actmon_dev ;
2019-05-02 02:38:08 +03:00
unsigned long cur_freq ;
2014-11-24 15:28:17 +03:00
2019-05-02 02:38:08 +03:00
cur_freq = READ_ONCE ( tegra - > cur_freq ) ;
2014-11-24 15:28:17 +03:00
/* To be used by the tegra governor */
stat - > private_data = tegra ;
/* The below are to be used by the other governors */
2019-11-05 00:56:04 +03:00
stat - > current_frequency = cur_freq ;
2014-11-24 15:28:17 +03:00
actmon_dev = & tegra - > devices [ MCALL ] ;
/* Number of cycles spent on memory access */
2015-03-17 12:36:12 +03:00
stat - > busy_time = device_readl ( actmon_dev , ACTMON_DEV_AVG_COUNT ) ;
2014-11-24 15:28:17 +03:00
/* The bus can be considered to be saturated way before 100% */
stat - > busy_time * = 100 / BUS_SATURATION_RATIO ;
/* Number of cycles in a sampling period */
2019-11-05 00:56:15 +03:00
stat - > total_time = tegra - > devfreq - > profile - > polling_ms * cur_freq ;
2014-11-24 15:28:17 +03:00
2015-03-17 12:36:12 +03:00
stat - > busy_time = min ( stat - > busy_time , stat - > total_time ) ;
2014-11-24 15:28:17 +03:00
return 0 ;
}
2015-03-17 12:36:12 +03:00
static struct devfreq_dev_profile tegra_devfreq_profile = {
2019-11-05 00:56:15 +03:00
. polling_ms = ACTMON_SAMPLING_PERIOD ,
2015-03-17 12:36:12 +03:00
. target = tegra_devfreq_target ,
. get_dev_status = tegra_devfreq_get_dev_status ,
} ;
static int tegra_governor_get_target ( struct devfreq * devfreq ,
unsigned long * freq )
2014-11-24 15:28:17 +03:00
{
2015-08-18 07:47:41 +03:00
struct devfreq_dev_status * stat ;
2014-11-24 15:28:17 +03:00
struct tegra_devfreq * tegra ;
struct tegra_devfreq_device * dev ;
unsigned long target_freq = 0 ;
unsigned int i ;
int err ;
2015-08-18 07:47:41 +03:00
err = devfreq_update_stats ( devfreq ) ;
2014-11-24 15:28:17 +03:00
if ( err )
return err ;
2015-08-18 07:47:41 +03:00
stat = & devfreq - > last_status ;
tegra = stat - > private_data ;
2014-11-24 15:28:17 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( tegra - > devices ) ; i + + ) {
dev = & tegra - > devices [ i ] ;
actmon_update_target ( tegra , dev ) ;
target_freq = max ( target_freq , dev - > target_freq ) ;
}
2019-11-05 00:56:04 +03:00
* freq = target_freq ;
2014-11-24 15:28:17 +03:00
return 0 ;
}
2015-03-17 12:36:12 +03:00
static int tegra_governor_event_handler ( struct devfreq * devfreq ,
unsigned int event , void * data )
2014-11-24 15:28:17 +03:00
{
2019-02-16 18:18:26 +03:00
struct tegra_devfreq * tegra = dev_get_drvdata ( devfreq - > dev . parent ) ;
2019-11-05 00:56:15 +03:00
unsigned int * new_delay = data ;
2019-11-05 00:56:05 +03:00
int ret = 0 ;
2015-03-17 12:36:12 +03:00
2019-11-05 00:56:00 +03:00
/*
* Couple devfreq - device with the governor early because it is
* needed at the moment of governor ' s start ( used by ISR ) .
*/
tegra - > devfreq = devfreq ;
2015-03-17 12:36:12 +03:00
switch ( event ) {
case DEVFREQ_GOV_START :
devfreq_monitor_start ( devfreq ) ;
2019-11-05 00:56:05 +03:00
ret = tegra_actmon_start ( tegra ) ;
2015-03-17 12:36:12 +03:00
break ;
case DEVFREQ_GOV_STOP :
2019-05-02 02:38:11 +03:00
tegra_actmon_stop ( tegra ) ;
2015-03-17 12:36:12 +03:00
devfreq_monitor_stop ( devfreq ) ;
break ;
2020-01-29 07:24:18 +03:00
case DEVFREQ_GOV_UPDATE_INTERVAL :
2019-11-05 00:56:15 +03:00
/*
* ACTMON hardware supports up to 256 milliseconds for the
* sampling period .
*/
if ( * new_delay > 256 ) {
ret = - EINVAL ;
break ;
}
tegra_actmon_pause ( tegra ) ;
2020-01-29 07:24:18 +03:00
devfreq_update_interval ( devfreq , new_delay ) ;
2019-11-05 00:56:15 +03:00
ret = tegra_actmon_resume ( tegra ) ;
break ;
2015-03-17 12:36:12 +03:00
case DEVFREQ_GOV_SUSPEND :
2019-05-02 02:38:11 +03:00
tegra_actmon_stop ( tegra ) ;
2015-03-17 12:36:12 +03:00
devfreq_monitor_suspend ( devfreq ) ;
break ;
case DEVFREQ_GOV_RESUME :
devfreq_monitor_resume ( devfreq ) ;
2019-11-05 00:56:05 +03:00
ret = tegra_actmon_start ( tegra ) ;
2015-03-17 12:36:12 +03:00
break ;
}
2019-11-05 00:56:05 +03:00
return ret ;
2014-11-24 15:28:17 +03:00
}
static struct devfreq_governor tegra_devfreq_governor = {
2015-03-17 12:36:12 +03:00
. name = " tegra_actmon " ,
2020-07-03 11:20:27 +03:00
. attrs = DEVFREQ_GOV_ATTR_POLLING_INTERVAL ,
2020-10-05 08:48:01 +03:00
. flags = DEVFREQ_GOV_FLAG_IMMUTABLE
| DEVFREQ_GOV_FLAG_IRQ_DRIVEN ,
2015-03-17 12:36:12 +03:00
. get_target_freq = tegra_governor_get_target ,
. event_handler = tegra_governor_event_handler ,
2014-11-24 15:28:17 +03:00
} ;
static int tegra_devfreq_probe ( struct platform_device * pdev )
{
struct tegra_devfreq_device * dev ;
2019-11-05 00:56:00 +03:00
struct tegra_devfreq * tegra ;
struct devfreq * devfreq ;
unsigned int i ;
2019-11-05 00:56:01 +03:00
long rate ;
2014-11-24 15:28:17 +03:00
int err ;
tegra = devm_kzalloc ( & pdev - > dev , sizeof ( * tegra ) , GFP_KERNEL ) ;
if ( ! tegra )
return - ENOMEM ;
2019-05-02 02:38:07 +03:00
tegra - > regs = devm_platform_ioremap_resource ( pdev , 0 ) ;
2015-03-17 12:36:12 +03:00
if ( IS_ERR ( tegra - > regs ) )
2014-11-24 15:28:17 +03:00
return PTR_ERR ( tegra - > regs ) ;
tegra - > reset = devm_reset_control_get ( & pdev - > dev , " actmon " ) ;
if ( IS_ERR ( tegra - > reset ) ) {
dev_err ( & pdev - > dev , " Failed to get reset \n " ) ;
return PTR_ERR ( tegra - > reset ) ;
}
tegra - > clock = devm_clk_get ( & pdev - > dev , " actmon " ) ;
if ( IS_ERR ( tegra - > clock ) ) {
dev_err ( & pdev - > dev , " Failed to get actmon clock \n " ) ;
return PTR_ERR ( tegra - > clock ) ;
}
tegra - > emc_clock = devm_clk_get ( & pdev - > dev , " emc " ) ;
if ( IS_ERR ( tegra - > emc_clock ) ) {
dev_err ( & pdev - > dev , " Failed to get emc clock \n " ) ;
return PTR_ERR ( tegra - > emc_clock ) ;
}
2019-11-05 00:55:59 +03:00
err = platform_get_irq ( pdev , 0 ) ;
2020-04-04 21:34:02 +03:00
if ( err < 0 )
2014-11-24 15:28:17 +03:00
return err ;
2020-04-04 21:34:02 +03:00
2019-11-05 00:55:59 +03:00
tegra - > irq = err ;
2014-11-24 15:28:17 +03:00
2019-11-05 00:56:00 +03:00
irq_set_status_flags ( tegra - > irq , IRQ_NOAUTOEN ) ;
err = devm_request_threaded_irq ( & pdev - > dev , tegra - > irq , NULL ,
actmon_thread_isr , IRQF_ONESHOT ,
" tegra-devfreq " , tegra ) ;
if ( err ) {
dev_err ( & pdev - > dev , " Interrupt request failed: %d \n " , err ) ;
return err ;
}
2014-11-24 15:28:17 +03:00
err = clk_prepare_enable ( tegra - > clock ) ;
if ( err ) {
2015-03-17 12:36:12 +03:00
dev_err ( & pdev - > dev ,
" Failed to prepare and enable ACTMON clock \n " ) ;
2014-11-24 15:28:17 +03:00
return err ;
}
2020-09-27 23:51:39 +03:00
err = reset_control_reset ( tegra - > reset ) ;
if ( err ) {
dev_err ( & pdev - > dev , " Failed to reset hardware: %d \n " , err ) ;
goto disable_clk ;
}
2014-11-24 15:28:17 +03:00
2019-11-05 00:56:01 +03:00
rate = clk_round_rate ( tegra - > emc_clock , ULONG_MAX ) ;
if ( rate < 0 ) {
dev_err ( & pdev - > dev , " Failed to round clock rate: %ld \n " , rate ) ;
2020-09-08 10:25:57 +03:00
err = rate ;
goto disable_clk ;
2019-11-05 00:56:01 +03:00
}
tegra - > max_freq = rate / KHZ ;
2014-11-24 15:28:17 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( actmon_device_configs ) ; i + + ) {
dev = tegra - > devices + i ;
dev - > config = actmon_device_configs + i ;
dev - > regs = tegra - > regs + dev - > config - > offset ;
}
2015-03-17 12:36:15 +03:00
for ( rate = 0 ; rate < = tegra - > max_freq * KHZ ; rate + + ) {
rate = clk_round_rate ( tegra - > emc_clock , rate ) ;
2019-11-05 00:56:01 +03:00
if ( rate < 0 ) {
dev_err ( & pdev - > dev ,
" Failed to round clock rate: %ld \n " , rate ) ;
err = rate ;
goto remove_opps ;
}
2019-11-05 00:56:04 +03:00
err = dev_pm_opp_add ( & pdev - > dev , rate / KHZ , 0 ) ;
2019-05-02 02:38:07 +03:00
if ( err ) {
dev_err ( & pdev - > dev , " Failed to add OPP: %d \n " , err ) ;
goto remove_opps ;
}
2014-11-24 15:28:17 +03:00
}
2015-03-17 12:36:16 +03:00
platform_set_drvdata ( pdev , tegra ) ;
2019-11-05 00:56:06 +03:00
tegra - > clk_rate_change_nb . notifier_call = tegra_actmon_clk_notify_cb ;
2019-11-05 00:56:05 +03:00
tegra - > cpu_rate_change_nb . notifier_call = tegra_actmon_cpu_notify_cb ;
INIT_DELAYED_WORK ( & tegra - > cpufreq_update_work ,
tegra_actmon_delayed_update ) ;
2019-05-02 02:38:10 +03:00
err = devfreq_add_governor ( & tegra_devfreq_governor ) ;
if ( err ) {
dev_err ( & pdev - > dev , " Failed to add governor: %d \n " , err ) ;
2019-11-05 00:56:06 +03:00
goto remove_opps ;
2019-05-02 02:38:10 +03:00
}
2019-11-05 00:56:06 +03:00
tegra_devfreq_profile . initial_freq = clk_get_rate ( tegra - > emc_clock ) ;
tegra_devfreq_profile . initial_freq / = KHZ ;
2019-11-05 00:56:04 +03:00
2019-11-05 00:56:00 +03:00
devfreq = devfreq_add_device ( & pdev - > dev , & tegra_devfreq_profile ,
" tegra_actmon " , NULL ) ;
if ( IS_ERR ( devfreq ) ) {
err = PTR_ERR ( devfreq ) ;
2019-05-02 02:38:10 +03:00
goto remove_governor ;
2019-05-02 02:38:07 +03:00
}
2014-11-24 15:28:17 +03:00
return 0 ;
2019-05-02 02:38:07 +03:00
2019-05-02 02:38:10 +03:00
remove_governor :
devfreq_remove_governor ( & tegra_devfreq_governor ) ;
2019-05-02 02:38:07 +03:00
remove_opps :
dev_pm_opp_remove_all_dynamic ( & pdev - > dev ) ;
reset_control_reset ( tegra - > reset ) ;
2020-09-08 10:25:57 +03:00
disable_clk :
2019-05-02 02:38:07 +03:00
clk_disable_unprepare ( tegra - > clock ) ;
return err ;
2014-11-24 15:28:17 +03:00
}
static int tegra_devfreq_remove ( struct platform_device * pdev )
{
struct tegra_devfreq * tegra = platform_get_drvdata ( pdev ) ;
2015-03-17 12:36:12 +03:00
2019-05-02 02:38:07 +03:00
devfreq_remove_device ( tegra - > devfreq ) ;
2019-05-02 02:38:10 +03:00
devfreq_remove_governor ( & tegra_devfreq_governor ) ;
2014-11-24 15:28:17 +03:00
2019-05-02 02:38:07 +03:00
dev_pm_opp_remove_all_dynamic ( & pdev - > dev ) ;
2014-11-24 15:28:17 +03:00
2019-05-02 02:38:07 +03:00
reset_control_reset ( tegra - > reset ) ;
2014-11-24 15:28:17 +03:00
clk_disable_unprepare ( tegra - > clock ) ;
return 0 ;
}
2015-03-17 12:36:12 +03:00
static const struct of_device_id tegra_devfreq_of_match [ ] = {
2019-05-02 02:38:12 +03:00
{ . compatible = " nvidia,tegra30-actmon " } ,
2014-11-24 15:28:17 +03:00
{ . compatible = " nvidia,tegra124-actmon " } ,
{ } ,
} ;
2015-03-17 12:36:12 +03:00
MODULE_DEVICE_TABLE ( of , tegra_devfreq_of_match ) ;
2014-11-24 15:28:17 +03:00
static struct platform_driver tegra_devfreq_driver = {
. probe = tegra_devfreq_probe ,
. remove = tegra_devfreq_remove ,
. driver = {
2015-03-17 12:36:12 +03:00
. name = " tegra-devfreq " ,
2014-11-24 15:28:17 +03:00
. of_match_table = tegra_devfreq_of_match ,
} ,
} ;
2019-05-02 02:38:10 +03:00
module_platform_driver ( tegra_devfreq_driver ) ;
2014-11-24 15:28:17 +03:00
2015-03-17 12:36:12 +03:00
MODULE_LICENSE ( " GPL v2 " ) ;
2014-11-24 15:28:17 +03:00
MODULE_DESCRIPTION ( " Tegra devfreq driver " ) ;
MODULE_AUTHOR ( " Tomeu Vizoso <tomeu.vizoso@collabora.com> " ) ;