2019-06-04 10:11:33 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2014-12-17 12:24:12 +08:00
/*
* Copyright ( C ) 2014 Freescale Semiconductor , Inc .
*/
# include <linux/cpuidle.h>
# include <linux/cpu_pm.h>
# include <linux/module.h>
2016-08-29 23:41:12 +08:00
# include <asm/cacheflush.h>
2014-12-17 12:24:12 +08:00
# include <asm/cpuidle.h>
# include <asm/suspend.h>
# include "common.h"
# include "cpuidle.h"
2019-05-13 00:15:31 -03:00
# include "hardware.h"
2014-12-17 12:24:12 +08:00
static int imx6sx_idle_finish ( unsigned long val )
{
2016-08-29 23:41:12 +08:00
/*
* for Cortex - A7 which has an internal L2
* cache , need to flush it before powering
* down ARM platform , since flushing L1 cache
* here again has very small overhead , compared
* to adding conditional code for L2 cache type ,
* just call flush_cache_all ( ) is fine .
*/
flush_cache_all ( ) ;
2014-12-17 12:24:12 +08:00
cpu_do_idle ( ) ;
return 0 ;
}
2023-01-12 20:44:02 +01:00
static __cpuidle int imx6sx_enter_wait ( struct cpuidle_device * dev ,
struct cpuidle_driver * drv , int index )
2014-12-17 12:24:12 +08:00
{
2015-04-25 22:59:19 +08:00
imx6_set_lpm ( WAIT_UNCLOCKED ) ;
2014-12-17 12:24:12 +08:00
switch ( index ) {
case 1 :
cpu_do_idle ( ) ;
break ;
case 2 :
imx6_enable_rbc ( true ) ;
imx_gpc_set_arm_power_in_lpm ( true ) ;
imx_set_cpu_jump ( 0 , v7_cpu_resume ) ;
/* Need to notify there is a cpu pm operation. */
cpu_pm_enter ( ) ;
cpu_cluster_pm_enter ( ) ;
2023-01-12 20:43:27 +01:00
ct_cpuidle_enter ( ) ;
2014-12-17 12:24:12 +08:00
cpu_suspend ( 0 , imx6sx_idle_finish ) ;
2023-01-12 20:43:27 +01:00
ct_cpuidle_exit ( ) ;
2014-12-17 12:24:12 +08:00
cpu_cluster_pm_exit ( ) ;
cpu_pm_exit ( ) ;
imx_gpc_set_arm_power_in_lpm ( false ) ;
imx6_enable_rbc ( false ) ;
break ;
default :
break ;
}
2015-04-25 22:59:19 +08:00
imx6_set_lpm ( WAIT_CLOCKED ) ;
2014-12-17 12:24:12 +08:00
return index ;
}
static struct cpuidle_driver imx6sx_cpuidle_driver = {
. name = " imx6sx_cpuidle " ,
. owner = THIS_MODULE ,
. states = {
/* WFI */
ARM_CPUIDLE_WFI_STATE ,
/* WAIT */
{
. exit_latency = 50 ,
. target_residency = 75 ,
2015-01-06 20:06:16 +08:00
. flags = CPUIDLE_FLAG_TIMER_STOP ,
2014-12-17 12:24:12 +08:00
. enter = imx6sx_enter_wait ,
. name = " WAIT " ,
. desc = " Clock off " ,
} ,
/* WAIT + ARM power off */
{
/*
* ARM gating 31u s * 5 + RBC clear 65u s
* and some margin for SW execution , here set it
* to 300u s .
*/
. exit_latency = 300 ,
. target_residency = 500 ,
2023-01-12 20:43:22 +01:00
. flags = CPUIDLE_FLAG_TIMER_STOP |
CPUIDLE_FLAG_RCU_IDLE ,
2014-12-17 12:24:12 +08:00
. enter = imx6sx_enter_wait ,
. name = " LOW-POWER-IDLE " ,
. desc = " ARM power off " ,
} ,
} ,
. state_count = 3 ,
. safe_state_index = 0 ,
} ;
int __init imx6sx_cpuidle_init ( void )
{
2016-08-29 21:49:57 +08:00
imx6_set_int_mem_clk_lpm ( true ) ;
2014-12-17 12:24:12 +08:00
imx6_enable_rbc ( false ) ;
2018-06-03 10:33:45 +08:00
imx_gpc_set_l2_mem_power_in_lpm ( false ) ;
2014-12-17 12:24:12 +08:00
/*
* set ARM power up / down timing to the fastest ,
* sw2iso and sw can be set to one 32 K cycle = 31u s
* except for power up sw2iso which need to be
* larger than LDO ramp up time .
*/
2019-05-13 00:15:31 -03:00
imx_gpc_set_arm_power_up_timing ( cpu_is_imx6sx ( ) ? 0xf : 0x2 , 1 ) ;
2014-12-17 12:24:12 +08:00
imx_gpc_set_arm_power_down_timing ( 1 , 1 ) ;
return cpuidle_register ( & imx6sx_cpuidle_driver , NULL ) ;
}