From 0db1803e4ee459fd261915a2f1b2c39bb34767eb Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Thu, 3 Mar 2011 17:36:52 +0530 Subject: [PATCH 01/24] ARM: OMAP4: Use WARN_ON() instead of BUG_ON() with graceful exit OMAP4 L2X0 initialisation code uses BUG_ON() for the ioremap() failure scenarios. Use WARN_ON() instead and allow graceful function exits. This was suggsted by Kevin Hilman during OMAP4 PM code review. Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/omap4-common.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index beecfdd56ea3..21d4821c9612 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -72,7 +72,8 @@ static int __init omap_l2_cache_init(void) /* Static mapping, never released */ l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K); - BUG_ON(!l2cache_base); + if (WARN_ON(!l2cache_base)) + return -ENOMEM; /* * 16-way associativity, parity disabled From 02afe8a7f23d562cec76743ae34c4735d2819345 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Thu, 3 Mar 2011 18:03:25 +0530 Subject: [PATCH 02/24] ARM: OMAP4: Export omap4_get_base*() rather than global address pointers This patch exports APIs to get base address for GIC distributor, CPU interface, SCU and PL310 L2 Cache which are used in OMAP4 PM code. This was suggested by Kevin Hilman during OMAP4 PM code review. Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/common.h | 11 ++++++++++- arch/arm/mach-omap2/omap-smp.c | 5 +++++ arch/arm/mach-omap2/omap4-common.c | 7 ++++++- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 012bac7d56a5..ca04152a9199 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -168,7 +168,16 @@ void omap3_intc_handle_irq(struct pt_regs *regs); #endif #ifdef CONFIG_CACHE_L2X0 -extern void __iomem *l2cache_base; +extern void __iomem *omap4_get_l2cache_base(void); +#endif + +#ifdef CONFIG_SMP +extern void __iomem *omap4_get_scu_base(void); +#else +static inline void __iomem *omap4_get_scu_base(void) +{ + return NULL; +} #endif extern void __init gic_init_irq(void); diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index e99bc6cd4714..74e90b40a0c7 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -32,6 +32,11 @@ static void __iomem *scu_base; static DEFINE_SPINLOCK(boot_lock); +void __iomem *omap4_get_scu_base(void) +{ + return scu_base; +} + void __cpuinit platform_secondary_init(unsigned int cpu) { /* diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 21d4821c9612..4a3d2898543b 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -26,7 +26,7 @@ #include "common.h" #ifdef CONFIG_CACHE_L2X0 -void __iomem *l2cache_base; +static void __iomem *l2cache_base; #endif void __init gic_init_irq(void) @@ -47,6 +47,11 @@ void __init gic_init_irq(void) #ifdef CONFIG_CACHE_L2X0 +void __iomem *omap4_get_l2cache_base(void) +{ + return l2cache_base; +} + static void omap4_l2x0_disable(void) { /* Disable PL310 L2 Cache controller */ From 501f0c751de06d8484b4279131c26f58bd49a69d Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Sat, 1 Jan 2011 19:56:04 +0530 Subject: [PATCH 03/24] ARM: OMAP4: PM: Add SAR RAM support This patch adds SAR RAM support on OMAP4430. SAR RAM used to save and restore the HW context in low power modes. Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/common.h | 1 + arch/arm/mach-omap2/omap4-common.c | 30 ++++++++++++++++++++++ arch/arm/mach-omap2/omap4-sar-layout.h | 22 ++++++++++++++++ arch/arm/plat-omap/include/plat/omap44xx.h | 1 + 4 files changed, 54 insertions(+) create mode 100644 arch/arm/mach-omap2/omap4-sar-layout.h diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index ca04152a9199..7ebcb6a9b73e 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -182,6 +182,7 @@ static inline void __iomem *omap4_get_scu_base(void) extern void __init gic_init_irq(void); extern void omap_smc1(u32 fn, u32 arg); +extern void __iomem *omap4_get_sar_ram_base(void); #ifdef CONFIG_SMP /* Needed for secondary core boot */ diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 4a3d2898543b..2489f5b8b983 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -24,11 +24,14 @@ #include #include "common.h" +#include "omap4-sar-layout.h" #ifdef CONFIG_CACHE_L2X0 static void __iomem *l2cache_base; #endif +static void __iomem *sar_ram_base; + void __init gic_init_irq(void) { void __iomem *omap_irq_base; @@ -118,3 +121,30 @@ static int __init omap_l2_cache_init(void) } early_initcall(omap_l2_cache_init); #endif + +void __iomem *omap4_get_sar_ram_base(void) +{ + return sar_ram_base; +} + +/* + * SAR RAM used to save and restore the HW + * context in low power modes + */ +static int __init omap4_sar_ram_init(void) +{ + /* + * To avoid code running on other OMAPs in + * multi-omap builds + */ + if (!cpu_is_omap44xx()) + return -ENOMEM; + + /* Static mapping, never released */ + sar_ram_base = ioremap(OMAP44XX_SAR_RAM_BASE, SZ_16K); + if (WARN_ON(!sar_ram_base)) + return -ENOMEM; + + return 0; +} +early_initcall(omap4_sar_ram_init); diff --git a/arch/arm/mach-omap2/omap4-sar-layout.h b/arch/arm/mach-omap2/omap4-sar-layout.h new file mode 100644 index 000000000000..7781ea4dacbc --- /dev/null +++ b/arch/arm/mach-omap2/omap4-sar-layout.h @@ -0,0 +1,22 @@ +/* + * omap4-sar-layout.h: OMAP4 SAR RAM layout header file + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Santosh Shilimkar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef OMAP_ARCH_OMAP4_SAR_LAYOUT_H +#define OMAP_ARCH_OMAP4_SAR_LAYOUT_H + +/* + * SAR BANK offsets from base address OMAP44XX_SAR_RAM_BASE + */ +#define SAR_BANK1_OFFSET 0x0000 +#define SAR_BANK2_OFFSET 0x1000 +#define SAR_BANK3_OFFSET 0x2000 +#define SAR_BANK4_OFFSET 0x3000 + +#endif diff --git a/arch/arm/plat-omap/include/plat/omap44xx.h b/arch/arm/plat-omap/include/plat/omap44xx.h index ea2b8a6306e7..c0d478e55c84 100644 --- a/arch/arm/plat-omap/include/plat/omap44xx.h +++ b/arch/arm/plat-omap/include/plat/omap44xx.h @@ -45,6 +45,7 @@ #define OMAP44XX_WKUPGEN_BASE 0x48281000 #define OMAP44XX_MCPDM_BASE 0x40132000 #define OMAP44XX_MCPDM_L3_BASE 0x49032000 +#define OMAP44XX_SAR_RAM_BASE 0x4a326000 #define OMAP44XX_MAILBOX_BASE (L4_44XX_BASE + 0xF4000) #define OMAP44XX_HSUSB_OTG_BASE (L4_44XX_BASE + 0xAB000) From 12f27826bdaf56b01cbdfc8bdeb577ebc106dee3 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Tue, 8 Mar 2011 18:24:30 +0530 Subject: [PATCH 04/24] ARM: OMAP4: PM: Keep static dep between MPUSS-EMIF and MPUSS-L3/L4 and DUCATI-L3 As per OMAP4430 TRM, the dynamic dependency between MPUSS -> EMIF and MPUSS -> L4PER/L3_* and DUCATI -> L3_* clockdomains is enable by default. Refer register CM_MPU_DYNAMICDEP description for details. But these dynamic dependencies doesn't work as expected. The hardware recommendation is to enable static dependencies for above clockdomains. Without this, system locks up or randomly crashes. Signed-off-by: Rajendra Nayak Signed-off-by: Santosh Shilimkar Acked-by: Paul Walmsley Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/pm44xx.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c index 8edb015f5618..715035d0512a 100644 --- a/arch/arm/mach-omap2/pm44xx.c +++ b/arch/arm/mach-omap2/pm44xx.c @@ -99,6 +99,8 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused) static int __init omap4_pm_init(void) { int ret; + struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm; + struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm; if (!cpu_is_omap44xx()) return -ENODEV; @@ -111,6 +113,34 @@ static int __init omap4_pm_init(void) goto err2; } + /* + * The dynamic dependency between MPUSS -> MEMIF and + * MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as + * expected. The hardware recommendation is to enable static + * dependencies for these to avoid system lock ups or random crashes. + */ + mpuss_clkdm = clkdm_lookup("mpuss_clkdm"); + emif_clkdm = clkdm_lookup("l3_emif_clkdm"); + l3_1_clkdm = clkdm_lookup("l3_1_clkdm"); + l3_2_clkdm = clkdm_lookup("l3_2_clkdm"); + l4_per_clkdm = clkdm_lookup("l4_per_clkdm"); + ducati_clkdm = clkdm_lookup("ducati_clkdm"); + if ((!mpuss_clkdm) || (!emif_clkdm) || (!l3_1_clkdm) || + (!l3_2_clkdm) || (!ducati_clkdm) || (!l4_per_clkdm)) + goto err2; + + ret = clkdm_add_wkdep(mpuss_clkdm, emif_clkdm); + ret |= clkdm_add_wkdep(mpuss_clkdm, l3_1_clkdm); + ret |= clkdm_add_wkdep(mpuss_clkdm, l3_2_clkdm); + ret |= clkdm_add_wkdep(mpuss_clkdm, l4_per_clkdm); + ret |= clkdm_add_wkdep(ducati_clkdm, l3_1_clkdm); + ret |= clkdm_add_wkdep(ducati_clkdm, l3_2_clkdm); + if (ret) { + pr_err("Failed to add MPUSS -> L3/EMIF/L4PER, DUCATI -> L3 " + "wakeup dependency\n"); + goto err2; + } + #ifdef CONFIG_SUSPEND suspend_set_ops(&omap_pm_ops); #endif /* CONFIG_SUSPEND */ From 361b02f3538bc5603a426ed3bb04129a8d7b9a67 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Fri, 11 Mar 2011 16:13:09 +0530 Subject: [PATCH 05/24] ARM: OMAP4: PM: Avoid omap4_pm_init() on OMAP4430 ES1.0 On OMAP4430 ES1.0, Power Management features are not supported. Avoid omap4_pm_init() on ES1.0 silicon so that we can continue to use same kernel binary to boot on all OMAP4 silicons. The ES1.0 boot failure with OMAP4 PM series was because of the clockdomain initialisation code. Hardware supervised clockdomain mode isn't functional for all clockdomains on OMAP4430 ES1.0 silicon so avoid the same. Signed-off-by: Santosh Shilimkar Reported-by: Kevin Hilman Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/pm44xx.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c index 715035d0512a..35d392abcaa4 100644 --- a/arch/arm/mach-omap2/pm44xx.c +++ b/arch/arm/mach-omap2/pm44xx.c @@ -105,6 +105,11 @@ static int __init omap4_pm_init(void) if (!cpu_is_omap44xx()) return -ENODEV; + if (omap_rev() == OMAP4430_REV_ES1_0) { + WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); + return -ENODEV; + } + pr_err("Power Management for TI OMAP4.\n"); ret = pwrdm_for_each(pwrdms_setup, NULL); From 3c50729b3fa1cd8ca1f347e6caf1081204cf1a7c Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Wed, 5 Jan 2011 22:03:17 +0530 Subject: [PATCH 06/24] ARM: OMAP4: PM: Initialise all the clockdomains to supported states Initialise hardware supervised mode for all clockdomains if it's supported. Initiate sleep transition for other clockdomains, if they are not being used. Signed-off-by: Santosh Shilimkar Signed-off-by: Rajendra Nayak Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/pm44xx.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c index 35d392abcaa4..c34139dc8d8c 100644 --- a/arch/arm/mach-omap2/pm44xx.c +++ b/arch/arm/mach-omap2/pm44xx.c @@ -17,6 +17,7 @@ #include #include "common.h" +#include "clockdomain.h" #include "powerdomain.h" struct power_state { @@ -73,6 +74,22 @@ static const struct platform_suspend_ops omap_pm_ops = { }; #endif /* CONFIG_SUSPEND */ +/* + * Enable hardware supervised mode for all clockdomains if it's + * supported. Initiate sleep transition for other clockdomains, if + * they are not used + */ +static int __init clkdms_setup(struct clockdomain *clkdm, void *unused) +{ + if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO) + clkdm_allow_idle(clkdm); + else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP && + atomic_read(&clkdm->usecount) == 0) + clkdm_sleep(clkdm); + return 0; +} + + static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused) { struct power_state *pwrst; @@ -146,6 +163,8 @@ static int __init omap4_pm_init(void) goto err2; } + (void) clkdm_for_each(clkdms_setup, NULL); + #ifdef CONFIG_SUSPEND suspend_set_ops(&omap_pm_ops); #endif /* CONFIG_SUSPEND */ From ba9456ac9c72a7a5d4d59340aba4259351832521 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Mon, 6 Jun 2011 17:56:49 +0530 Subject: [PATCH 07/24] ARM: OMAP: Add Secure HAL and monitor mode API infrastructure. On OMAP secure/emulation devices, certain APIs are exported by secure code. Add an infrastructure so that relevant operations on secure devices can be implemented using it. While at this, rename omap44xx-smc.S to omap-smc.S since the common APIs can be used on other OMAP's too. Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/Makefile | 11 ++-- .../arm/mach-omap2/include/mach/omap-secure.h | 40 ++++++++++++++ arch/arm/mach-omap2/omap-secure.c | 52 +++++++++++++++++++ .../mach-omap2/{omap44xx-smc.S => omap-smc.S} | 23 ++++++++ 4 files changed, 121 insertions(+), 5 deletions(-) create mode 100644 arch/arm/mach-omap2/include/mach/omap-secure.h create mode 100644 arch/arm/mach-omap2/omap-secure.c rename arch/arm/mach-omap2/{omap44xx-smc.S => omap-smc.S} (70%) diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index b009f17dee56..bd3a224d1678 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -11,10 +11,11 @@ hwmod-common = omap_hwmod.o \ omap_hwmod_common_data.o clock-common = clock.o clock_common_data.o \ clkt_dpll.o clkt_clksel.o +secure-common = omap-smc.o omap-secure.o -obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) -obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) -obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common) +obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) $(secure-common) +obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common) +obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common) $(secure-common) obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o @@ -24,11 +25,11 @@ obj-$(CONFIG_TWL4030_CORE) += omap_twl.o obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o obj-$(CONFIG_LOCAL_TIMERS) += timer-mpu.o obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o -obj-$(CONFIG_ARCH_OMAP4) += omap44xx-smc.o omap4-common.o +obj-$(CONFIG_ARCH_OMAP4) += omap4-common.o plus_sec := $(call as-instr,.arch_extension sec,+sec) AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec) -AFLAGS_omap44xx-smc.o :=-Wa,-march=armv7-a$(plus_sec) +AFLAGS_omap-smc.o :=-Wa,-march=armv7-a$(plus_sec) # Functions loaded to SRAM obj-$(CONFIG_SOC_OMAP2420) += sram242x.o diff --git a/arch/arm/mach-omap2/include/mach/omap-secure.h b/arch/arm/mach-omap2/include/mach/omap-secure.h new file mode 100644 index 000000000000..26e7bcc49adc --- /dev/null +++ b/arch/arm/mach-omap2/include/mach/omap-secure.h @@ -0,0 +1,40 @@ +/* + * omap-secure.h: OMAP Secure infrastructure header. + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Santosh Shilimkar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef OMAP_ARCH_OMAP_SECURE_H +#define OMAP_ARCH_OMAP_SECURE_H + +/* Monitor error code */ +#define API_HAL_RET_VALUE_NS2S_CONVERSION_ERROR 0xFFFFFFFE +#define API_HAL_RET_VALUE_SERVICE_UNKNWON 0xFFFFFFFF + +/* HAL API error codes */ +#define API_HAL_RET_VALUE_OK 0x00 +#define API_HAL_RET_VALUE_FAIL 0x01 + +/* Secure HAL API flags */ +#define FLAG_START_CRITICAL 0x4 +#define FLAG_IRQFIQ_MASK 0x3 +#define FLAG_IRQ_ENABLE 0x2 +#define FLAG_FIQ_ENABLE 0x1 +#define NO_FLAG 0x0 + + +/* Secure low power HAL API index */ +#define OMAP4_HAL_SAVESECURERAM_INDEX 0x1a +#define OMAP4_HAL_SAVEHW_INDEX 0x1b +#define OMAP4_HAL_SAVEALL_INDEX 0x1c +#define OMAP4_HAL_SAVEGIC_INDEX 0x1d + +extern u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, + u32 arg1, u32 arg2, u32 arg3, u32 arg4); +extern u32 omap_smc2(u32 id, u32 falg, u32 pargs); + +#endif /* OMAP_ARCH_OMAP_SECURE_H */ diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c new file mode 100644 index 000000000000..e5a606e59b1e --- /dev/null +++ b/arch/arm/mach-omap2/omap-secure.c @@ -0,0 +1,52 @@ +/* + * OMAP Secure API infrastructure. + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Santosh Shilimkar + * + * + * This program is free software,you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include + +#include + +/** + * omap_sec_dispatcher: Routine to dispatch low power secure + * service routines + * @idx: The HAL API index + * @flag: The flag indicating criticality of operation + * @nargs: Number of valid arguments out of four. + * @arg1, arg2, arg3 args4: Parameters passed to secure API + * + * Return the non-zero error value on failure. + */ +u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2, + u32 arg3, u32 arg4) +{ + u32 ret; + u32 param[5]; + + param[0] = nargs; + param[1] = arg1; + param[2] = arg2; + param[3] = arg3; + param[4] = arg4; + + /* + * Secure API needs physical address + * pointer for the parameters + */ + flush_cache_all(); + outer_clean_range(__pa(param), __pa(param + 5)); + ret = omap_smc2(idx, flag, __pa(param)); + + return ret; +} diff --git a/arch/arm/mach-omap2/omap44xx-smc.S b/arch/arm/mach-omap2/omap-smc.S similarity index 70% rename from arch/arm/mach-omap2/omap44xx-smc.S rename to arch/arm/mach-omap2/omap-smc.S index e69d37d95204..f6441c13cd8c 100644 --- a/arch/arm/mach-omap2/omap44xx-smc.S +++ b/arch/arm/mach-omap2/omap-smc.S @@ -31,6 +31,29 @@ ENTRY(omap_smc1) ldmfd sp!, {r2-r12, pc} ENDPROC(omap_smc1) +/** + * u32 omap_smc2(u32 id, u32 falg, u32 pargs) + * Low level common routine for secure HAL and PPA APIs. + * @id: Application ID of HAL APIs + * @flag: Flag to indicate the criticality of operation + * @pargs: Physical address of parameter list starting + * with number of parametrs + */ +ENTRY(omap_smc2) + stmfd sp!, {r4-r12, lr} + mov r3, r2 + mov r2, r1 + mov r1, #0x0 @ Process ID + mov r6, #0xff + mov r12, #0x00 @ Secure Service ID + mov r7, #0 + mcr p15, 0, r7, c7, c5, 6 + dsb + dmb + smc #0 + ldmfd sp!, {r4-r12, pc} +ENDPROC(omap_smc2) + ENTRY(omap_modify_auxcoreboot0) stmfd sp!, {r1-r12, lr} ldr r12, =0x104 From 259ee57a8cda5760dd3e803c5271a6327e1f38ac Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Mon, 6 Jun 2011 20:28:23 +0530 Subject: [PATCH 08/24] ARM: OMAP: PM: Add support to allocate the memory for secure RAM Allocate the memory to save secure ram context which needs to be done when MPU is hitting OFF mode. The ROM code expects a physical address to this memory and hence use memblock APIs to reserve this memory as part of .reserve() callback. Maximum size as per secure RAM requirements is allocated. To keep omap1 build working, omap-secure.h file is created under plat-omap directory. Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- .../arm/mach-omap2/include/mach/omap-secure.h | 3 ++ arch/arm/mach-omap2/omap-secure.c | 29 +++++++++++++++++++ arch/arm/plat-omap/common.c | 3 ++ arch/arm/plat-omap/include/plat/omap-secure.h | 13 +++++++++ 4 files changed, 48 insertions(+) create mode 100644 arch/arm/plat-omap/include/plat/omap-secure.h diff --git a/arch/arm/mach-omap2/include/mach/omap-secure.h b/arch/arm/mach-omap2/include/mach/omap-secure.h index 26e7bcc49adc..29f60cae45e9 100644 --- a/arch/arm/mach-omap2/include/mach/omap-secure.h +++ b/arch/arm/mach-omap2/include/mach/omap-secure.h @@ -26,6 +26,8 @@ #define FLAG_FIQ_ENABLE 0x1 #define NO_FLAG 0x0 +/* Maximum Secure memory storage size */ +#define OMAP_SECURE_RAM_STORAGE (88 * SZ_1K) /* Secure low power HAL API index */ #define OMAP4_HAL_SAVESECURERAM_INDEX 0x1a @@ -36,5 +38,6 @@ extern u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2, u32 arg3, u32 arg4); extern u32 omap_smc2(u32 id, u32 falg, u32 pargs); +extern phys_addr_t omap_secure_ram_mempool_base(void); #endif /* OMAP_ARCH_OMAP_SECURE_H */ diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c index e5a606e59b1e..69f3c72d959b 100644 --- a/arch/arm/mach-omap2/omap-secure.c +++ b/arch/arm/mach-omap2/omap-secure.c @@ -13,11 +13,14 @@ #include #include #include +#include #include #include +static phys_addr_t omap_secure_memblock_base; + /** * omap_sec_dispatcher: Routine to dispatch low power secure * service routines @@ -50,3 +53,29 @@ u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2, return ret; } + +/* Allocate the memory to save secure ram */ +int __init omap_secure_ram_reserve_memblock(void) +{ + phys_addr_t paddr; + u32 size = OMAP_SECURE_RAM_STORAGE; + + size = ALIGN(size, SZ_1M); + paddr = memblock_alloc(size, SZ_1M); + if (!paddr) { + pr_err("%s: failed to reserve %x bytes\n", + __func__, size); + return -ENOMEM; + } + memblock_free(paddr, size); + memblock_remove(paddr, size); + + omap_secure_memblock_base = paddr; + + return 0; +} + +phys_addr_t omap_secure_ram_mempool_base(void) +{ + return omap_secure_memblock_base; +} diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c index 2ee6341fffdb..06383b51e655 100644 --- a/arch/arm/plat-omap/common.c +++ b/arch/arm/plat-omap/common.c @@ -22,6 +22,8 @@ #include #include +#include + #define NO_LENGTH_CHECK 0xffffffff @@ -66,6 +68,7 @@ void __init omap_reserve(void) omapfb_reserve_sdram_memblock(); omap_vram_reserve_sdram_memblock(); omap_dsp_reserve_sdram_memblock(); + omap_secure_ram_reserve_memblock(); } void __init omap_init_consistent_dma_size(void) diff --git a/arch/arm/plat-omap/include/plat/omap-secure.h b/arch/arm/plat-omap/include/plat/omap-secure.h new file mode 100644 index 000000000000..64f9d1c7f1bb --- /dev/null +++ b/arch/arm/plat-omap/include/plat/omap-secure.h @@ -0,0 +1,13 @@ +#ifndef __OMAP_SECURE_H__ +#define __OMAP_SECURE_H__ + +#include + +#ifdef CONFIG_ARCH_OMAP2PLUS +extern int omap_secure_ram_reserve_memblock(void); +#else +static inline void omap_secure_ram_reserve_memblock(void) +{ } +#endif + +#endif /* __OMAP_SECURE_H__ */ From fcf6efa3ffbc3cc19e7abe39e0b90f497df2fc42 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Wed, 16 Jun 2010 22:19:47 +0530 Subject: [PATCH 09/24] ARM: OMAP4: PM: Add WakeupGen module as OMAP gic_arch_extn OMAP WakeupGen is the interrupt controller extension used along with ARM GIC to wake the CPU out from low power states on external interrupts. The WakeupGen unit is responsible for generating the wakeup event from the incoming interrupts and enable bits. It is implemented in the MPU always ON power domain. During normal operation, WakeupGen delivers the external interrupts directly to the GIC. WakeupGen specification has one restriction as per Veyron version 1.6. It is SW responsibility to program interrupt enabling/disabling coherently in the GIC and in the WakeupGen enable registers. That is, a given interrupt for a given CPU is either enable at both GIC and WakeupGen, or disable at both, but no mix. That's the reason the WakeupGen is implemented as an extension of GIC. Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/Makefile | 2 +- .../mach-omap2/include/mach/omap-wakeupgen.h | 39 +++ arch/arm/mach-omap2/omap-wakeupgen.c | 226 ++++++++++++++++++ arch/arm/mach-omap2/omap4-common.c | 3 + 4 files changed, 269 insertions(+), 1 deletion(-) create mode 100644 arch/arm/mach-omap2/include/mach/omap-wakeupgen.h create mode 100644 arch/arm/mach-omap2/omap-wakeupgen.c diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index bd3a224d1678..19c29d569d82 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -25,7 +25,7 @@ obj-$(CONFIG_TWL4030_CORE) += omap_twl.o obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o obj-$(CONFIG_LOCAL_TIMERS) += timer-mpu.o obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o -obj-$(CONFIG_ARCH_OMAP4) += omap4-common.o +obj-$(CONFIG_ARCH_OMAP4) += omap4-common.o omap-wakeupgen.o plus_sec := $(call as-instr,.arch_extension sec,+sec) AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec) diff --git a/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h new file mode 100644 index 000000000000..d79321b0f2a2 --- /dev/null +++ b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h @@ -0,0 +1,39 @@ +/* + * OMAP WakeupGen header file + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Santosh Shilimkar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef OMAP_ARCH_WAKEUPGEN_H +#define OMAP_ARCH_WAKEUPGEN_H + +#define OMAP_WKG_CONTROL_0 0x00 +#define OMAP_WKG_ENB_A_0 0x10 +#define OMAP_WKG_ENB_B_0 0x14 +#define OMAP_WKG_ENB_C_0 0x18 +#define OMAP_WKG_ENB_D_0 0x1c +#define OMAP_WKG_ENB_SECURE_A_0 0x20 +#define OMAP_WKG_ENB_SECURE_B_0 0x24 +#define OMAP_WKG_ENB_SECURE_C_0 0x28 +#define OMAP_WKG_ENB_SECURE_D_0 0x2c +#define OMAP_WKG_ENB_A_1 0x410 +#define OMAP_WKG_ENB_B_1 0x414 +#define OMAP_WKG_ENB_C_1 0x418 +#define OMAP_WKG_ENB_D_1 0x41c +#define OMAP_WKG_ENB_SECURE_A_1 0x420 +#define OMAP_WKG_ENB_SECURE_B_1 0x424 +#define OMAP_WKG_ENB_SECURE_C_1 0x428 +#define OMAP_WKG_ENB_SECURE_D_1 0x42c +#define OMAP_AUX_CORE_BOOT_0 0x800 +#define OMAP_AUX_CORE_BOOT_1 0x804 +#define OMAP_PTMSYNCREQ_MASK 0xc00 +#define OMAP_PTMSYNCREQ_EN 0xc04 +#define OMAP_TIMESTAMPCYCLELO 0xc08 +#define OMAP_TIMESTAMPCYCLEHI 0xc0c + +extern int __init omap_wakeupgen_init(void); +#endif diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c new file mode 100644 index 000000000000..a8a8d0efe350 --- /dev/null +++ b/arch/arm/mach-omap2/omap-wakeupgen.c @@ -0,0 +1,226 @@ +/* + * OMAP WakeupGen Source file + * + * OMAP WakeupGen is the interrupt controller extension used along + * with ARM GIC to wake the CPU out from low power states on + * external interrupts. It is responsible for generating wakeup + * event from the incoming interrupts and enable bits. It is + * implemented in MPU always ON power domain. During normal operation, + * WakeupGen delivers external interrupts directly to the GIC. + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Santosh Shilimkar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include + +#define NR_REG_BANKS 4 +#define MAX_IRQS 128 +#define WKG_MASK_ALL 0x00000000 +#define WKG_UNMASK_ALL 0xffffffff +#define CPU_ENA_OFFSET 0x400 +#define CPU0_ID 0x0 +#define CPU1_ID 0x1 + +static void __iomem *wakeupgen_base; +static DEFINE_PER_CPU(u32 [NR_REG_BANKS], irqmasks); +static DEFINE_SPINLOCK(wakeupgen_lock); +static unsigned int irq_target_cpu[NR_IRQS]; + +/* + * Static helper functions. + */ +static inline u32 wakeupgen_readl(u8 idx, u32 cpu) +{ + return __raw_readl(wakeupgen_base + OMAP_WKG_ENB_A_0 + + (cpu * CPU_ENA_OFFSET) + (idx * 4)); +} + +static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu) +{ + __raw_writel(val, wakeupgen_base + OMAP_WKG_ENB_A_0 + + (cpu * CPU_ENA_OFFSET) + (idx * 4)); +} + +static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg) +{ + u8 i; + + for (i = 0; i < NR_REG_BANKS; i++) + wakeupgen_writel(reg, i, cpu); +} + +static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index) +{ + unsigned int spi_irq; + + /* + * PPIs and SGIs are not supported. + */ + if (irq < OMAP44XX_IRQ_GIC_START) + return -EINVAL; + + /* + * Subtract the GIC offset. + */ + spi_irq = irq - OMAP44XX_IRQ_GIC_START; + if (spi_irq > MAX_IRQS) { + pr_err("omap wakeupGen: Invalid IRQ%d\n", irq); + return -EINVAL; + } + + /* + * Each WakeupGen register controls 32 interrupt. + * i.e. 1 bit per SPI IRQ + */ + *reg_index = spi_irq >> 5; + *bit_posn = spi_irq %= 32; + + return 0; +} + +static void _wakeupgen_clear(unsigned int irq, unsigned int cpu) +{ + u32 val, bit_number; + u8 i; + + if (_wakeupgen_get_irq_info(irq, &bit_number, &i)) + return; + + val = wakeupgen_readl(i, cpu); + val &= ~BIT(bit_number); + wakeupgen_writel(val, i, cpu); +} + +static void _wakeupgen_set(unsigned int irq, unsigned int cpu) +{ + u32 val, bit_number; + u8 i; + + if (_wakeupgen_get_irq_info(irq, &bit_number, &i)) + return; + + val = wakeupgen_readl(i, cpu); + val |= BIT(bit_number); + wakeupgen_writel(val, i, cpu); +} + +static void _wakeupgen_save_masks(unsigned int cpu) +{ + u8 i; + + for (i = 0; i < NR_REG_BANKS; i++) + per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu); +} + +static void _wakeupgen_restore_masks(unsigned int cpu) +{ + u8 i; + + for (i = 0; i < NR_REG_BANKS; i++) + wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu); +} + +/* + * Architecture specific Mask extension + */ +static void wakeupgen_mask(struct irq_data *d) +{ + unsigned long flags; + + spin_lock_irqsave(&wakeupgen_lock, flags); + _wakeupgen_clear(d->irq, irq_target_cpu[d->irq]); + spin_unlock_irqrestore(&wakeupgen_lock, flags); +} + +/* + * Architecture specific Unmask extension + */ +static void wakeupgen_unmask(struct irq_data *d) +{ + unsigned long flags; + + spin_lock_irqsave(&wakeupgen_lock, flags); + _wakeupgen_set(d->irq, irq_target_cpu[d->irq]); + spin_unlock_irqrestore(&wakeupgen_lock, flags); +} + +/* + * Mask or unmask all interrupts on given CPU. + * 0 = Mask all interrupts on the 'cpu' + * 1 = Unmask all interrupts on the 'cpu' + * Ensure that the initial mask is maintained. This is faster than + * iterating through GIC registers to arrive at the correct masks. + */ +static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set) +{ + unsigned long flags; + + spin_lock_irqsave(&wakeupgen_lock, flags); + if (set) { + _wakeupgen_save_masks(cpu); + _wakeupgen_set_all(cpu, WKG_MASK_ALL); + } else { + _wakeupgen_set_all(cpu, WKG_UNMASK_ALL); + _wakeupgen_restore_masks(cpu); + } + spin_unlock_irqrestore(&wakeupgen_lock, flags); +} + +/* + * Initialise the wakeupgen module. + */ +int __init omap_wakeupgen_init(void) +{ + int i; + unsigned int boot_cpu = smp_processor_id(); + + /* Not supported on OMAP4 ES1.0 silicon */ + if (omap_rev() == OMAP4430_REV_ES1_0) { + WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n"); + return -EPERM; + } + + /* Static mapping, never released */ + wakeupgen_base = ioremap(OMAP44XX_WKUPGEN_BASE, SZ_4K); + if (WARN_ON(!wakeupgen_base)) + return -ENOMEM; + + /* Clear all IRQ bitmasks at wakeupGen level */ + for (i = 0; i < NR_REG_BANKS; i++) { + wakeupgen_writel(0, i, CPU0_ID); + wakeupgen_writel(0, i, CPU1_ID); + } + + /* + * Override GIC architecture specific functions to add + * OMAP WakeupGen interrupt controller along with GIC + */ + gic_arch_extn.irq_mask = wakeupgen_mask; + gic_arch_extn.irq_unmask = wakeupgen_unmask; + gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE; + + /* + * FIXME: Add support to set_smp_affinity() once the core + * GIC code has necessary hooks in place. + */ + + /* Associate all the IRQs to boot CPU like GIC init does. */ + for (i = 0; i < NR_IRQS; i++) + irq_target_cpu[i] = boot_cpu; + + return 0; +} diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 2489f5b8b983..1b93d31fe8e9 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -22,6 +22,7 @@ #include #include +#include #include "common.h" #include "omap4-sar-layout.h" @@ -45,6 +46,8 @@ void __init gic_init_irq(void) omap_irq_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512); BUG_ON(!omap_irq_base); + omap_wakeupgen_init(); + gic_init(0, 29, gic_dist_base_addr, omap_irq_base); } From b2b9762f76981c16a8768255284efeae7f27e4f1 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Wed, 16 Jun 2010 22:19:48 +0530 Subject: [PATCH 10/24] ARM: OMAP4: PM: Add CPUX OFF mode support This patch adds the CPU0 and CPU1 off mode support. CPUX close switch retention (CSWR) is not supported by hardware design. The CPUx OFF mode isn't supported on OMAP4430 ES1.0 CPUx sleep code is common for hotplug, suspend and CPUilde. Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/Makefile | 6 +- arch/arm/mach-omap2/common.h | 30 ++ .../arm/mach-omap2/include/mach/omap-secure.h | 9 + arch/arm/mach-omap2/omap-mpuss-lowpower.c | 248 ++++++++++++++++ arch/arm/mach-omap2/omap-smp.c | 13 + arch/arm/mach-omap2/omap4-sar-layout.h | 9 + arch/arm/mach-omap2/pm44xx.c | 6 + arch/arm/mach-omap2/sleep44xx.S | 276 ++++++++++++++++++ 8 files changed, 595 insertions(+), 2 deletions(-) create mode 100644 arch/arm/mach-omap2/omap-mpuss-lowpower.c create mode 100644 arch/arm/mach-omap2/sleep44xx.S diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 19c29d569d82..58de1f6df27c 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -25,11 +25,13 @@ obj-$(CONFIG_TWL4030_CORE) += omap_twl.o obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o obj-$(CONFIG_LOCAL_TIMERS) += timer-mpu.o obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o -obj-$(CONFIG_ARCH_OMAP4) += omap4-common.o omap-wakeupgen.o +obj-$(CONFIG_ARCH_OMAP4) += omap4-common.o omap-wakeupgen.o \ + sleep44xx.o plus_sec := $(call as-instr,.arch_extension sec,+sec) AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec) AFLAGS_omap-smc.o :=-Wa,-march=armv7-a$(plus_sec) +AFLAGS_sleep44xx.o :=-Wa,-march=armv7-a$(plus_sec) # Functions loaded to SRAM obj-$(CONFIG_SOC_OMAP2420) += sram242x.o @@ -63,7 +65,7 @@ obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \ cpuidle34xx.o -obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o +obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o omap-mpuss-lowpower.o obj-$(CONFIG_PM_DEBUG) += pm-debug.o obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 7ebcb6a9b73e..36cdba7727f2 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -24,9 +24,11 @@ #ifndef __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H #define __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H +#ifndef __ASSEMBLER__ #include #include +#include #ifdef CONFIG_SOC_OMAP2420 extern void omap242x_map_common_io(void); @@ -183,6 +185,7 @@ static inline void __iomem *omap4_get_scu_base(void) extern void __init gic_init_irq(void); extern void omap_smc1(u32 fn, u32 arg); extern void __iomem *omap4_get_sar_ram_base(void); +extern void omap_do_wfi(void); #ifdef CONFIG_SMP /* Needed for secondary core boot */ @@ -192,4 +195,31 @@ extern void omap_auxcoreboot_addr(u32 cpu_addr); extern u32 omap_read_auxcoreboot0(void); #endif +#if defined(CONFIG_SMP) && defined(CONFIG_PM) +extern int omap4_mpuss_init(void); +extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state); +extern int omap4_finish_suspend(unsigned long cpu_state); +extern void omap4_cpu_resume(void); +#else +static inline int omap4_enter_lowpower(unsigned int cpu, + unsigned int power_state) +{ + cpu_do_idle(); + return 0; +} + +static inline int omap4_mpuss_init(void) +{ + return 0; +} + +static inline int omap4_finish_suspend(unsigned long cpu_state) +{ + return 0; +} + +static inline void omap4_cpu_resume(void) +{} +#endif +#endif /* __ASSEMBLER__ */ #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */ diff --git a/arch/arm/mach-omap2/include/mach/omap-secure.h b/arch/arm/mach-omap2/include/mach/omap-secure.h index 29f60cae45e9..5f0763dd5664 100644 --- a/arch/arm/mach-omap2/include/mach/omap-secure.h +++ b/arch/arm/mach-omap2/include/mach/omap-secure.h @@ -35,9 +35,18 @@ #define OMAP4_HAL_SAVEALL_INDEX 0x1c #define OMAP4_HAL_SAVEGIC_INDEX 0x1d +/* Secure Monitor mode APIs */ +#define OMAP4_MON_SCU_PWR_INDEX 0x108 + +/* Secure PPA(Primary Protected Application) APIs */ +#define OMAP4_PPA_CPU_ACTRL_SMP_INDEX 0x25 + +#ifndef __ASSEMBLER__ + extern u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2, u32 arg3, u32 arg4); extern u32 omap_smc2(u32 id, u32 falg, u32 pargs); extern phys_addr_t omap_secure_ram_mempool_base(void); +#endif /* __ASSEMBLER__ */ #endif /* OMAP_ARCH_OMAP_SECURE_H */ diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c new file mode 100644 index 000000000000..867fee51e42c --- /dev/null +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c @@ -0,0 +1,248 @@ +/* + * OMAP MPUSS low power code + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Santosh Shilimkar + * + * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU + * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller, + * CPU0 and CPU1 LPRM modules. + * CPU0, CPU1 and MPUSS each have there own power domain and + * hence multiple low power combinations of MPUSS are possible. + * + * The CPU0 and CPU1 can't support Closed switch Retention (CSWR) + * because the mode is not supported by hw constraints of dormant + * mode. While waking up from the dormant mode, a reset signal + * to the Cortex-A9 processor must be asserted by the external + * power controller. + * + * With architectural inputs and hardware recommendations, only + * below modes are supported from power gain vs latency point of view. + * + * CPU0 CPU1 MPUSS + * ---------------------------------------------- + * ON ON ON + * ON(Inactive) OFF ON(Inactive) + * OFF OFF CSWR + * OFF OFF OSWR (*TBD) + * OFF OFF OFF* (*TBD) + * ---------------------------------------------- + * + * Note: CPU0 is the master core and it is the last CPU to go down + * and first to wake-up when MPUSS low power states are excercised + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include "common.h" +#include "omap4-sar-layout.h" +#include "pm.h" +#include "powerdomain.h" + +#ifdef CONFIG_SMP + +struct omap4_cpu_pm_info { + struct powerdomain *pwrdm; + void __iomem *scu_sar_addr; + void __iomem *wkup_sar_addr; +}; + +static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info); + +/* + * Program the wakeup routine address for the CPU0 and CPU1 + * used for OFF or DORMANT wakeup. + */ +static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr) +{ + struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); + + __raw_writel(addr, pm_info->wkup_sar_addr); +} + +/* + * Set the CPUx powerdomain's previous power state + */ +static inline void set_cpu_next_pwrst(unsigned int cpu_id, + unsigned int power_state) +{ + struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); + + pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); +} + +/* + * Read CPU's previous power state + */ +static inline unsigned int read_cpu_prev_pwrst(unsigned int cpu_id) +{ + struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); + + return pwrdm_read_prev_pwrst(pm_info->pwrdm); +} + +/* + * Clear the CPUx powerdomain's previous power state + */ +static inline void clear_cpu_prev_pwrst(unsigned int cpu_id) +{ + struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); + + pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); +} + +/* + * Store the SCU power status value to scratchpad memory + */ +static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state) +{ + struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); + u32 scu_pwr_st; + + switch (cpu_state) { + case PWRDM_POWER_RET: + scu_pwr_st = SCU_PM_DORMANT; + break; + case PWRDM_POWER_OFF: + scu_pwr_st = SCU_PM_POWEROFF; + break; + case PWRDM_POWER_ON: + case PWRDM_POWER_INACTIVE: + default: + scu_pwr_st = SCU_PM_NORMAL; + break; + } + + __raw_writel(scu_pwr_st, pm_info->scu_sar_addr); +} + +/** + * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function + * The purpose of this function is to manage low power programming + * of OMAP4 MPUSS subsystem + * @cpu : CPU ID + * @power_state: Low power state. + */ +int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) +{ + unsigned int save_state = 0; + unsigned int wakeup_cpu; + + if (omap_rev() == OMAP4430_REV_ES1_0) + return -ENXIO; + + switch (power_state) { + case PWRDM_POWER_ON: + case PWRDM_POWER_INACTIVE: + save_state = 0; + break; + case PWRDM_POWER_OFF: + save_state = 1; + break; + case PWRDM_POWER_RET: + default: + /* + * CPUx CSWR is invalid hardware state. Also CPUx OSWR + * doesn't make much scense, since logic is lost and $L1 + * needs to be cleaned because of coherency. This makes + * CPUx OSWR equivalent to CPUX OFF and hence not supported + */ + WARN_ON(1); + return -ENXIO; + } + + clear_cpu_prev_pwrst(cpu); + set_cpu_next_pwrst(cpu, power_state); + set_cpu_wakeup_addr(cpu, virt_to_phys(omap4_cpu_resume)); + scu_pwrst_prepare(cpu, power_state); + + /* + * Call low level function with targeted low power state. + */ + cpu_suspend(save_state, omap4_finish_suspend); + + /* + * Restore the CPUx power state to ON otherwise CPUx + * power domain can transitions to programmed low power + * state while doing WFI outside the low powe code. On + * secure devices, CPUx does WFI which can result in + * domain transition + */ + wakeup_cpu = smp_processor_id(); + set_cpu_next_pwrst(wakeup_cpu, PWRDM_POWER_ON); + + return 0; +} + +/* + * Initialise OMAP4 MPUSS + */ +int __init omap4_mpuss_init(void) +{ + struct omap4_cpu_pm_info *pm_info; + void __iomem *sar_base = omap4_get_sar_ram_base(); + + if (omap_rev() == OMAP4430_REV_ES1_0) { + WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); + return -ENODEV; + } + + /* Initilaise per CPU PM information */ + pm_info = &per_cpu(omap4_pm_info, 0x0); + pm_info->scu_sar_addr = sar_base + SCU_OFFSET0; + pm_info->wkup_sar_addr = sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET; + pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm"); + if (!pm_info->pwrdm) { + pr_err("Lookup failed for CPU0 pwrdm\n"); + return -ENODEV; + } + + /* Clear CPU previous power domain state */ + pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); + + /* Initialise CPU0 power domain state to ON */ + pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); + + pm_info = &per_cpu(omap4_pm_info, 0x1); + pm_info->scu_sar_addr = sar_base + SCU_OFFSET1; + pm_info->wkup_sar_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET; + pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm"); + if (!pm_info->pwrdm) { + pr_err("Lookup failed for CPU1 pwrdm\n"); + return -ENODEV; + } + + /* Clear CPU previous power domain state */ + pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); + + /* Initialise CPU1 power domain state to ON */ + pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); + + /* Save device type on scratchpad for low level code to use */ + if (omap_type() != OMAP2_DEVICE_TYPE_GP) + __raw_writel(1, sar_base + OMAP_TYPE_OFFSET); + else + __raw_writel(0, sar_base + OMAP_TYPE_OFFSET); + + return 0; +} + +#endif diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 74e90b40a0c7..ee83808de0ff 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "common.h" @@ -39,6 +40,18 @@ void __iomem *omap4_get_scu_base(void) void __cpuinit platform_secondary_init(unsigned int cpu) { + /* + * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device. + * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA + * init and for CPU1, a secure PPA API provided. CPU0 must be ON + * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+. + * OMAP443X GP devices- SMP bit isn't accessible. + * OMAP446X GP devices - SMP bit access is enabled on both CPUs. + */ + if (cpu_is_omap443x() && (omap_type() != OMAP2_DEVICE_TYPE_GP)) + omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX, + 4, 0, 0, 0, 0, 0); + /* * If any interrupts are already enabled for the primary * core (e.g. timer irq), then they will not have been enabled diff --git a/arch/arm/mach-omap2/omap4-sar-layout.h b/arch/arm/mach-omap2/omap4-sar-layout.h index 7781ea4dacbc..970a2eef3ab9 100644 --- a/arch/arm/mach-omap2/omap4-sar-layout.h +++ b/arch/arm/mach-omap2/omap4-sar-layout.h @@ -19,4 +19,13 @@ #define SAR_BANK3_OFFSET 0x2000 #define SAR_BANK4_OFFSET 0x3000 +/* Scratch pad memory offsets from SAR_BANK1 */ +#define SCU_OFFSET0 0xd00 +#define SCU_OFFSET1 0xd04 +#define OMAP_TYPE_OFFSET 0xd10 + +/* CPUx Wakeup Non-Secure Physical Address offsets in SAR_BANK3 */ +#define CPU0_WAKEUP_NS_PA_ADDR_OFFSET 0xa04 +#define CPU1_WAKEUP_NS_PA_ADDR_OFFSET 0xa08 + #endif diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c index c34139dc8d8c..781aadf98e32 100644 --- a/arch/arm/mach-omap2/pm44xx.c +++ b/arch/arm/mach-omap2/pm44xx.c @@ -163,6 +163,12 @@ static int __init omap4_pm_init(void) goto err2; } + ret = omap4_mpuss_init(); + if (ret) { + pr_err("Failed to initialise OMAP4 MPUSS\n"); + goto err2; + } + (void) clkdm_for_each(clkdms_setup, NULL); #ifdef CONFIG_SUSPEND diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S new file mode 100644 index 000000000000..e5521945ba8e --- /dev/null +++ b/arch/arm/mach-omap2/sleep44xx.S @@ -0,0 +1,276 @@ +/* + * OMAP44xx sleep code. + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Santosh Shilimkar + * + * This program is free software,you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include "common.h" +#include "omap4-sar-layout.h" + +#if defined(CONFIG_SMP) && defined(CONFIG_PM) + +.macro DO_SMC + dsb + smc #0 + dsb +.endm + +ppa_zero_params: + .word 0x0 + +/* + * ============================= + * == CPU suspend finisher == + * ============================= + * + * void omap4_finish_suspend(unsigned long cpu_state) + * + * This function code saves the CPU context and performs the CPU + * power down sequence. Calling WFI effectively changes the CPU + * power domains states to the desired target power state. + * + * @cpu_state : contains context save state (r0) + * 0 - No context lost + * 1 - CPUx L1 and logic lost: MPUSS CSWR + * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR + * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF + * @return: This function never returns for CPU OFF and DORMANT power states. + * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up + * from this follows a full CPU reset path via ROM code to CPU restore code. + * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET. + * It returns to the caller for CPU INACTIVE and ON power states or in case + * CPU failed to transition to targeted OFF/DORMANT state. + */ +ENTRY(omap4_finish_suspend) + stmfd sp!, {lr} + cmp r0, #0x0 + beq do_WFI @ No lowpower state, jump to WFI + + /* + * Flush all data from the L1 data cache before disabling + * SCTLR.C bit. + */ + bl omap4_get_sar_ram_base + ldr r9, [r0, #OMAP_TYPE_OFFSET] + cmp r9, #0x1 @ Check for HS device + bne skip_secure_l1_clean + mov r0, #SCU_PM_NORMAL + mov r1, #0xFF @ clean seucre L1 + stmfd r13!, {r4-r12, r14} + ldr r12, =OMAP4_MON_SCU_PWR_INDEX + DO_SMC + ldmfd r13!, {r4-r12, r14} +skip_secure_l1_clean: + bl v7_flush_dcache_all + + /* + * Clear the SCTLR.C bit to prevent further data cache + * allocation. Clearing SCTLR.C would make all the data accesses + * strongly ordered and would not hit the cache. + */ + mrc p15, 0, r0, c1, c0, 0 + bic r0, r0, #(1 << 2) @ Disable the C bit + mcr p15, 0, r0, c1, c0, 0 + isb + + /* + * Invalidate L1 data cache. Even though only invalidate is + * necessary exported flush API is used here. Doing clean + * on already clean cache would be almost NOP. + */ + bl v7_flush_dcache_all + + /* + * Switch the CPU from Symmetric Multiprocessing (SMP) mode + * to AsymmetricMultiprocessing (AMP) mode by programming + * the SCU power status to DORMANT or OFF mode. + * This enables the CPU to be taken out of coherency by + * preventing the CPU from receiving cache, TLB, or BTB + * maintenance operations broadcast by other CPUs in the cluster. + */ + bl omap4_get_sar_ram_base + mov r8, r0 + ldr r9, [r8, #OMAP_TYPE_OFFSET] + cmp r9, #0x1 @ Check for HS device + bne scu_gp_set + mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR + ands r0, r0, #0x0f + ldreq r0, [r8, #SCU_OFFSET0] + ldrne r0, [r8, #SCU_OFFSET1] + mov r1, #0x00 + stmfd r13!, {r4-r12, r14} + ldr r12, =OMAP4_MON_SCU_PWR_INDEX + DO_SMC + ldmfd r13!, {r4-r12, r14} + b skip_scu_gp_set +scu_gp_set: + mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR + ands r0, r0, #0x0f + ldreq r1, [r8, #SCU_OFFSET0] + ldrne r1, [r8, #SCU_OFFSET1] + bl omap4_get_scu_base + bl scu_power_mode +skip_scu_gp_set: + mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data + tst r0, #(1 << 18) + mrcne p15, 0, r0, c1, c0, 1 + bicne r0, r0, #(1 << 6) @ Disable SMP bit + mcrne p15, 0, r0, c1, c0, 1 + isb + dsb + +do_WFI: + bl omap_do_wfi + + /* + * CPU is here when it failed to enter OFF/DORMANT or + * no low power state was attempted. + */ + mrc p15, 0, r0, c1, c0, 0 + tst r0, #(1 << 2) @ Check C bit enabled? + orreq r0, r0, #(1 << 2) @ Enable the C bit + mcreq p15, 0, r0, c1, c0, 0 + isb + + /* + * Ensure the CPU power state is set to NORMAL in + * SCU power state so that CPU is back in coherency. + * In non-coherent mode CPU can lock-up and lead to + * system deadlock. + */ + mrc p15, 0, r0, c1, c0, 1 + tst r0, #(1 << 6) @ Check SMP bit enabled? + orreq r0, r0, #(1 << 6) + mcreq p15, 0, r0, c1, c0, 1 + isb + bl omap4_get_sar_ram_base + mov r8, r0 + ldr r9, [r8, #OMAP_TYPE_OFFSET] + cmp r9, #0x1 @ Check for HS device + bne scu_gp_clear + mov r0, #SCU_PM_NORMAL + mov r1, #0x00 + stmfd r13!, {r4-r12, r14} + ldr r12, =OMAP4_MON_SCU_PWR_INDEX + DO_SMC + ldmfd r13!, {r4-r12, r14} + b skip_scu_gp_clear +scu_gp_clear: + bl omap4_get_scu_base + mov r1, #SCU_PM_NORMAL + bl scu_power_mode +skip_scu_gp_clear: + isb + dsb + ldmfd sp!, {pc} +ENDPROC(omap4_finish_suspend) + +/* + * ============================ + * == CPU resume entry point == + * ============================ + * + * void omap4_cpu_resume(void) + * + * ROM code jumps to this function while waking up from CPU + * OFF or DORMANT state. Physical address of the function is + * stored in the SAR RAM while entering to OFF or DORMANT mode. + * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET. + */ +ENTRY(omap4_cpu_resume) + /* + * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device. + * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA + * init and for CPU1, a secure PPA API provided. CPU0 must be ON + * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+. + * OMAP443X GP devices- SMP bit isn't accessible. + * OMAP446X GP devices - SMP bit access is enabled on both CPUs. + */ + ldr r8, =OMAP44XX_SAR_RAM_BASE + ldr r9, [r8, #OMAP_TYPE_OFFSET] + cmp r9, #0x1 @ Skip if GP device + bne skip_ns_smp_enable + mrc p15, 0, r0, c0, c0, 5 + ands r0, r0, #0x0f + beq skip_ns_smp_enable +ppa_actrl_retry: + mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX + adr r3, ppa_zero_params @ Pointer to parameters + mov r1, #0x0 @ Process ID + mov r2, #0x4 @ Flag + mov r6, #0xff + mov r12, #0x00 @ Secure Service ID + DO_SMC + cmp r0, #0x0 @ API returns 0 on success. + beq enable_smp_bit + b ppa_actrl_retry +enable_smp_bit: + mrc p15, 0, r0, c1, c0, 1 + tst r0, #(1 << 6) @ Check SMP bit enabled? + orreq r0, r0, #(1 << 6) + mcreq p15, 0, r0, c1, c0, 1 + isb +skip_ns_smp_enable: + + b cpu_resume @ Jump to generic resume +ENDPROC(omap4_cpu_resume) +#endif + +ENTRY(omap_do_wfi) + stmfd sp!, {lr} + + /* + * Execute an ISB instruction to ensure that all of the + * CP15 register changes have been committed. + */ + isb + + /* + * Execute a barrier instruction to ensure that all cache, + * TLB and branch predictor maintenance operations issued + * by any CPU in the cluster have completed. + */ + dsb + dmb + + /* + * Execute a WFI instruction and wait until the + * STANDBYWFI output is asserted to indicate that the + * CPU is in idle and low power state. CPU can specualatively + * prefetch the instructions so add NOPs after WFI. Sixteen + * NOPs as per Cortex-A9 pipeline. + */ + wfi @ Wait For Interrupt + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + + ldmfd sp!, {pc} +ENDPROC(omap_do_wfi) From a6e48358d15fec2f3f9e86a6d6fc62422141a3a9 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Sun, 4 Sep 2011 13:10:32 +0530 Subject: [PATCH 11/24] ARM: OMAP4: Remove __INIT from omap_secondary_startup() to re-use it for hotplug. Remove the __INIT from omap_secondary_startup() so that it can be re-used for CPU hotplug. While at this, remove the un-used AUXBOOT register reference. Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/omap-headsmp.S | 5 ----- 1 file changed, 5 deletions(-) diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S index 4ee6aeca885a..b13ef7ef5ef4 100644 --- a/arch/arm/mach-omap2/omap-headsmp.S +++ b/arch/arm/mach-omap2/omap-headsmp.S @@ -18,11 +18,6 @@ #include #include -/* Physical address needed since MMU not enabled yet on secondary core */ -#define OMAP4_AUX_CORE_BOOT1_PA 0x48281804 - - __INIT - /* * OMAP4 specific entry point for secondary CPU to jump from ROM * code. This routine also provides a holding flag into which From b5b4f2881f619460fdb165111bac10a3dd8eebee Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Wed, 16 Jun 2010 22:19:48 +0530 Subject: [PATCH 12/24] ARM: OMAP4: PM: Program CPU1 to hit OFF when off-lined Program non-boot CPUs to hit lowest supported power state when it is off-lined using cpu hotplug framework. Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/common.h | 7 +++++ arch/arm/mach-omap2/omap-hotplug.c | 14 ++++++---- arch/arm/mach-omap2/omap-mpuss-lowpower.c | 32 +++++++++++++++++++++++ arch/arm/mach-omap2/omap-wakeupgen.c | 32 +++++++++++++++++++++++ 4 files changed, 80 insertions(+), 5 deletions(-) diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 36cdba7727f2..c078db1b3de8 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -200,6 +200,7 @@ extern int omap4_mpuss_init(void); extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state); extern int omap4_finish_suspend(unsigned long cpu_state); extern void omap4_cpu_resume(void); +extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state); #else static inline int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) @@ -208,6 +209,12 @@ static inline int omap4_enter_lowpower(unsigned int cpu, return 0; } +static inline int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state) +{ + cpu_do_idle(); + return 0; +} + static inline int omap4_mpuss_init(void) { return 0; diff --git a/arch/arm/mach-omap2/omap-hotplug.c b/arch/arm/mach-omap2/omap-hotplug.c index e5a1c3f40a86..adbe4d8c7caf 100644 --- a/arch/arm/mach-omap2/omap-hotplug.c +++ b/arch/arm/mach-omap2/omap-hotplug.c @@ -22,6 +22,8 @@ #include "common.h" +#include "powerdomain.h" + int platform_cpu_kill(unsigned int cpu) { return 1; @@ -33,6 +35,8 @@ int platform_cpu_kill(unsigned int cpu) */ void platform_cpu_die(unsigned int cpu) { + unsigned int this_cpu; + flush_cache_all(); dsb(); @@ -40,15 +44,15 @@ void platform_cpu_die(unsigned int cpu) * we're ready for shutdown now, so do it */ if (omap_modify_auxcoreboot0(0x0, 0x200) != 0x0) - printk(KERN_CRIT "Secure clear status failed\n"); + pr_err("Secure clear status failed\n"); for (;;) { /* - * Execute WFI + * Enter into low power state */ - do_wfi(); - - if (omap_read_auxcoreboot0() == cpu) { + omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF); + this_cpu = smp_processor_id(); + if (omap_read_auxcoreboot0() == this_cpu) { /* * OK, proper wakeup, we're done */ diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c index 867fee51e42c..9c1c12b8c5e1 100644 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c @@ -192,6 +192,38 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) return 0; } +/** + * omap4_hotplug_cpu: OMAP4 CPU hotplug entry + * @cpu : CPU ID + * @power_state: CPU low power state. + */ +int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state) +{ + unsigned int cpu_state = 0; + + if (omap_rev() == OMAP4430_REV_ES1_0) + return -ENXIO; + + if (power_state == PWRDM_POWER_OFF) + cpu_state = 1; + + clear_cpu_prev_pwrst(cpu); + set_cpu_next_pwrst(cpu, power_state); + set_cpu_wakeup_addr(cpu, virt_to_phys(omap_secondary_startup)); + scu_pwrst_prepare(cpu, power_state); + + /* + * CPU never retuns back if targetted power state is OFF mode. + * CPU ONLINE follows normal CPU ONLINE ptah via + * omap_secondary_startup(). + */ + omap4_finish_suspend(cpu_state); + + set_cpu_next_pwrst(cpu, PWRDM_POWER_ON); + return 0; +} + + /* * Initialise OMAP4 MPUSS */ diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index a8a8d0efe350..701dfecad64b 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c @@ -180,6 +180,36 @@ static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set) spin_unlock_irqrestore(&wakeupgen_lock, flags); } +#ifdef CONFIG_HOTPLUG_CPU +static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned int)hcpu; + + switch (action) { + case CPU_ONLINE: + wakeupgen_irqmask_all(cpu, 0); + break; + case CPU_DEAD: + wakeupgen_irqmask_all(cpu, 1); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block __refdata irq_hotplug_notifier = { + .notifier_call = irq_cpu_hotplug_notify, +}; + +static void __init irq_hotplug_init(void) +{ + register_hotcpu_notifier(&irq_hotplug_notifier); +} +#else +static void __init irq_hotplug_init(void) +{} +#endif + /* * Initialise the wakeupgen module. */ @@ -222,5 +252,7 @@ int __init omap_wakeupgen_init(void) for (i = 0; i < NR_IRQS; i++) irq_target_cpu[i] = boot_cpu; + irq_hotplug_init(); + return 0; } From e97ca477e993da87769f967bd6f2602a7eab9715 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Wed, 16 Jun 2010 22:19:49 +0530 Subject: [PATCH 13/24] ARM: OMAP4: PM: CPU1 wakeup workaround from Low power modes The SGI(Software Generated Interrupts) are not wakeup capable from low power states. This is known limitation on OMAP4 and needs to be worked around by using software forced clockdomain wake-up. CPU0 forces the CPU1 clockdomain to software force wakeup. More details can be found in OMAP4430 TRM - Version J Section : 4.3.4.2 Power States of CPU0 and CPU1 Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/omap-smp.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index ee83808de0ff..c1bf3ef0ba02 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -28,6 +28,8 @@ #include "common.h" +#include "clockdomain.h" + /* SCU base address */ static void __iomem *scu_base; @@ -68,6 +70,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu) int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { + static struct clockdomain *cpu1_clkdm; + static bool booted; /* * Set synchronisation state between this boot processor * and the secondary one @@ -83,6 +87,29 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) omap_modify_auxcoreboot0(0x200, 0xfffffdff); flush_cache_all(); smp_wmb(); + + if (!cpu1_clkdm) + cpu1_clkdm = clkdm_lookup("mpu1_clkdm"); + + /* + * The SGI(Software Generated Interrupts) are not wakeup capable + * from low power states. This is known limitation on OMAP4 and + * needs to be worked around by using software forced clockdomain + * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to + * software force wakeup. The clockdomain is then put back to + * hardware supervised mode. + * More details can be found in OMAP4430 TRM - Version J + * Section : + * 4.3.4.2 Power States of CPU0 and CPU1 + */ + if (booted) { + clkdm_wakeup(cpu1_clkdm); + clkdm_allow_idle(cpu1_clkdm); + } else { + dsb_sev(); + booted = true; + } + gic_raise_softirq(cpumask_of(cpu), 1); /* From 72826b9f8892957156e3d390b74d8bd5e0835d51 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Mon, 18 Jul 2011 12:25:10 +0530 Subject: [PATCH 14/24] ARM: OMAP4: PM: Use custom omap_do_wfi() for default idle. Default arch_idle() isn't good enough for OMAP4 because of aync bridge errata and necessity of NOPs post WFI to avoid speculative prefetch aborts. Hence Use OMAP4 custom omap_do_wfi() hook for default idle. Later in the series, async bridge errata work-around patch updates the omap_do_wfi() with necessary interconnects barriers. Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/pm44xx.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c index 781aadf98e32..72c745047514 100644 --- a/arch/arm/mach-omap2/pm44xx.c +++ b/arch/arm/mach-omap2/pm44xx.c @@ -107,6 +107,24 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused) return pwrdm_set_next_pwrst(pwrst->pwrdm, pwrst->next_state); } +/** + * omap_default_idle - OMAP4 default ilde routine.' + * + * Implements OMAP4 memory, IO ordering requirements which can't be addressed + * with default arch_idle() hook. Used by all CPUs with !CONFIG_CPUIDLE and + * by secondary CPU with CONFIG_CPUIDLE. + */ +static void omap_default_idle(void) +{ + local_irq_disable(); + local_fiq_disable(); + + omap_do_wfi(); + + local_fiq_enable(); + local_irq_enable(); +} + /** * omap4_pm_init - Init routine for OMAP4 PM * @@ -175,6 +193,9 @@ static int __init omap4_pm_init(void) suspend_set_ops(&omap_pm_ops); #endif /* CONFIG_SUSPEND */ + /* Overwrite the default arch_idle() */ + pm_idle = omap_default_idle; + err2: return ret; } From e44f9a7744de8e39eda0f544171efc6e4b1ed91c Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Wed, 16 Jun 2010 22:19:49 +0530 Subject: [PATCH 15/24] ARM: OMAP4: suspend: Add MPUSS power domain RETENTION support This patch adds MPUSS(MPU Sub System) power domain CSWR(Close Switch Retention) support to system wide suspend. For MPUSS power domain to hit retention(CSWR or OSWR), both CPU0 and CPU1 power domains need to be in OFF or DORMANT state, since CPU power domain CSWR is not supported by hardware Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/omap-mpuss-lowpower.c | 16 ++++++ arch/arm/mach-omap2/pm44xx.c | 66 +++++++++++++++++++++-- 2 files changed, 78 insertions(+), 4 deletions(-) diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c index 9c1c12b8c5e1..f9bb2b3d977b 100644 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c @@ -66,6 +66,7 @@ struct omap4_cpu_pm_info { }; static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info); +static struct powerdomain *mpuss_pd; /* * Program the wakeup routine address for the CPU0 and CPU1 @@ -140,6 +141,13 @@ static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state) * of OMAP4 MPUSS subsystem * @cpu : CPU ID * @power_state: Low power state. + * + * MPUSS states for the context save: + * save_state = + * 0 - Nothing lost and no need to save: MPUSS INACTIVE + * 1 - CPUx L1 and logic lost: MPUSS CSWR + * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR + * 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF */ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) { @@ -169,6 +177,7 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) return -ENXIO; } + pwrdm_clear_all_prev_pwrst(mpuss_pd); clear_cpu_prev_pwrst(cpu); set_cpu_next_pwrst(cpu, power_state); set_cpu_wakeup_addr(cpu, virt_to_phys(omap4_cpu_resume)); @@ -268,6 +277,13 @@ int __init omap4_mpuss_init(void) /* Initialise CPU1 power domain state to ON */ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); + mpuss_pd = pwrdm_lookup("mpu_pwrdm"); + if (!mpuss_pd) { + pr_err("Failed to lookup MPUSS power domain\n"); + return -ENODEV; + } + pwrdm_clear_all_prev_pwrst(mpuss_pd); + /* Save device type on scratchpad for low level code to use */ if (omap_type() != OMAP2_DEVICE_TYPE_GP) __raw_writel(1, sar_base + OMAP_TYPE_OFFSET); diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c index 72c745047514..6dc9bbe0a4a8 100644 --- a/arch/arm/mach-omap2/pm44xx.c +++ b/arch/arm/mach-omap2/pm44xx.c @@ -1,8 +1,9 @@ /* * OMAP4 Power Management Routines * - * Copyright (C) 2010 Texas Instruments, Inc. + * Copyright (C) 2010-2011 Texas Instruments, Inc. * Rajendra Nayak + * Santosh Shilimkar * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -19,6 +20,7 @@ #include "common.h" #include "clockdomain.h" #include "powerdomain.h" +#include "pm.h" struct power_state { struct powerdomain *pwrdm; @@ -34,7 +36,47 @@ static LIST_HEAD(pwrst_list); #ifdef CONFIG_SUSPEND static int omap4_pm_suspend(void) { - do_wfi(); + struct power_state *pwrst; + int state, ret = 0; + u32 cpu_id = smp_processor_id(); + + /* Save current powerdomain state */ + list_for_each_entry(pwrst, &pwrst_list, node) { + pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm); + } + + /* Set targeted power domain states by suspend */ + list_for_each_entry(pwrst, &pwrst_list, node) { + omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state); + } + + /* + * For MPUSS to hit power domain retention(CSWR or OSWR), + * CPU0 and CPU1 power domains need to be in OFF or DORMANT state, + * since CPU power domain CSWR is not supported by hardware + * Only master CPU follows suspend path. All other CPUs follow + * CPU hotplug path in system wide suspend. On OMAP4, CPU power + * domain CSWR is not supported by hardware. + * More details can be found in OMAP4430 TRM section 4.3.4.2. + */ + omap4_enter_lowpower(cpu_id, PWRDM_POWER_OFF); + + /* Restore next powerdomain state */ + list_for_each_entry(pwrst, &pwrst_list, node) { + state = pwrdm_read_prev_pwrst(pwrst->pwrdm); + if (state > pwrst->next_state) { + pr_info("Powerdomain (%s) didn't enter " + "target state %d\n", + pwrst->pwrdm->name, pwrst->next_state); + ret = -1; + } + omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state); + } + if (ret) + pr_crit("Could not enter target state in pm_suspend\n"); + else + pr_info("Successfully put all powerdomains to target state\n"); + return 0; } @@ -97,14 +139,30 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused) if (!pwrdm->pwrsts) return 0; + /* + * Skip CPU0 and CPU1 power domains. CPU1 is programmed + * through hotplug path and CPU0 explicitly programmed + * further down in the code path + */ + if (!strncmp(pwrdm->name, "cpu", 3)) + return 0; + + /* + * FIXME: Remove this check when core retention is supported + * Only MPUSS power domain is added in the list. + */ + if (strcmp(pwrdm->name, "mpu_pwrdm")) + return 0; + pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC); if (!pwrst) return -ENOMEM; + pwrst->pwrdm = pwrdm; - pwrst->next_state = PWRDM_POWER_ON; + pwrst->next_state = PWRDM_POWER_RET; list_add(&pwrst->node, &pwrst_list); - return pwrdm_set_next_pwrst(pwrst->pwrdm, pwrst->next_state); + return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state); } /** From da82ce57a45ac2f295415ed487b9aec051db4f7f Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Mon, 25 Jul 2011 16:22:34 +0530 Subject: [PATCH 16/24] ARM: OMAP4: Remove un-used do_wfi() macro. With OMAP4 suspend, idle and hotplug series, we no longer need do_wfi() macro. Remove the same. Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/common.h | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index c078db1b3de8..3312174d64ba 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -158,17 +158,6 @@ void omap3_intc_resume_idle(void); void omap2_intc_handle_irq(struct pt_regs *regs); void omap3_intc_handle_irq(struct pt_regs *regs); -/* - * wfi used in low power code. Directly opcode is used instead - * of instruction to avoid mulit-omap build break - */ -#ifdef CONFIG_THUMB2_KERNEL -#define do_wfi() __asm__ __volatile__ ("wfi" : : : "memory") -#else -#define do_wfi() \ - __asm__ __volatile__ (".word 0xe320f003" : : : "memory") -#endif - #ifdef CONFIG_CACHE_L2X0 extern void __iomem *omap4_get_l2cache_base(void); #endif From 0f3cf2ec81aeb4747624954bae2cc8decc48e12f Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Wed, 16 Jun 2010 23:29:31 +0530 Subject: [PATCH 17/24] ARM: OMAP4: PM: Add WakeupGen and secure GIC low power support Add WakeupGen and secure GIC low power support to save and restore it's registers. WakeupGen Registers are saved to pre-defined SAR RAM layout and the restore is automatically done by hardware(ROM code) while coming out of MPUSS OSWR or Device off state. Secure GIC is saved using secure API and restored by hardware like WakeupGen. Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/omap-wakeupgen.c | 131 +++++++++++++++++++++++++ arch/arm/mach-omap2/omap4-sar-layout.h | 15 +++ 2 files changed, 146 insertions(+) diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index 701dfecad64b..d3d8971d7f30 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c @@ -22,10 +22,16 @@ #include #include #include +#include +#include #include #include +#include + +#include "omap4-sar-layout.h" +#include "common.h" #define NR_REG_BANKS 4 #define MAX_IRQS 128 @@ -36,6 +42,7 @@ #define CPU1_ID 0x1 static void __iomem *wakeupgen_base; +static void __iomem *sar_base; static DEFINE_PER_CPU(u32 [NR_REG_BANKS], irqmasks); static DEFINE_SPINLOCK(wakeupgen_lock); static unsigned int irq_target_cpu[NR_IRQS]; @@ -55,6 +62,11 @@ static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu) (cpu * CPU_ENA_OFFSET) + (idx * 4)); } +static inline void sar_writel(u32 val, u32 offset, u8 idx) +{ + __raw_writel(val, sar_base + offset + (idx * 4)); +} + static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg) { u8 i; @@ -180,6 +192,93 @@ static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set) spin_unlock_irqrestore(&wakeupgen_lock, flags); } +#ifdef CONFIG_CPU_PM +/* + * Save WakeupGen interrupt context in SAR BANK3. Restore is done by + * ROM code. WakeupGen IP is integrated along with GIC to manage the + * interrupt wakeups from CPU low power states. It manages + * masking/unmasking of Shared peripheral interrupts(SPI). So the + * interrupt enable/disable control should be in sync and consistent + * at WakeupGen and GIC so that interrupts are not lost. + */ +static void irq_save_context(void) +{ + u32 i, val; + + if (omap_rev() == OMAP4430_REV_ES1_0) + return; + + if (!sar_base) + sar_base = omap4_get_sar_ram_base(); + + for (i = 0; i < NR_REG_BANKS; i++) { + /* Save the CPUx interrupt mask for IRQ 0 to 127 */ + val = wakeupgen_readl(i, 0); + sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i); + val = wakeupgen_readl(i, 1); + sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i); + + /* + * Disable the secure interrupts for CPUx. The restore + * code blindly restores secure and non-secure interrupt + * masks from SAR RAM. Secure interrupts are not suppose + * to be enabled from HLOS. So overwrite the SAR location + * so that the secure interrupt remains disabled. + */ + sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i); + sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i); + } + + /* Save AuxBoot* registers */ + val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); + __raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET); + val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); + __raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET); + + /* Save SyncReq generation logic */ + val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); + __raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET); + val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0); + __raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET); + + /* Save SyncReq generation logic */ + val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_MASK); + __raw_writel(val, sar_base + PTMSYNCREQ_MASK_OFFSET); + val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_EN); + __raw_writel(val, sar_base + PTMSYNCREQ_EN_OFFSET); + + /* Set the Backup Bit Mask status */ + val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET); + val |= SAR_BACKUP_STATUS_WAKEUPGEN; + __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET); +} + +/* + * Clear WakeupGen SAR backup status. + */ +void irq_sar_clear(void) +{ + u32 val; + val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET); + val &= ~SAR_BACKUP_STATUS_WAKEUPGEN; + __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET); +} + +/* + * Save GIC and Wakeupgen interrupt context using secure API + * for HS/EMU devices. + */ +static void irq_save_secure_context(void) +{ + u32 ret; + ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX, + FLAG_START_CRITICAL, + 0, 0, 0, 0, 0); + if (ret != API_HAL_RET_VALUE_OK) + pr_err("GIC and Wakeupgen context save failed\n"); +} +#endif + #ifdef CONFIG_HOTPLUG_CPU static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self, unsigned long action, void *hcpu) @@ -210,6 +309,37 @@ static void __init irq_hotplug_init(void) {} #endif +#ifdef CONFIG_CPU_PM +static int irq_notifier(struct notifier_block *self, unsigned long cmd, void *v) +{ + switch (cmd) { + case CPU_CLUSTER_PM_ENTER: + if (omap_type() == OMAP2_DEVICE_TYPE_GP) + irq_save_context(); + else + irq_save_secure_context(); + break; + case CPU_CLUSTER_PM_EXIT: + if (omap_type() == OMAP2_DEVICE_TYPE_GP) + irq_sar_clear(); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block irq_notifier_block = { + .notifier_call = irq_notifier, +}; + +static void __init irq_pm_init(void) +{ + cpu_pm_register_notifier(&irq_notifier_block); +} +#else +static void __init irq_pm_init(void) +{} +#endif + /* * Initialise the wakeupgen module. */ @@ -253,6 +383,7 @@ int __init omap_wakeupgen_init(void) irq_target_cpu[i] = boot_cpu; irq_hotplug_init(); + irq_pm_init(); return 0; } diff --git a/arch/arm/mach-omap2/omap4-sar-layout.h b/arch/arm/mach-omap2/omap4-sar-layout.h index 970a2eef3ab9..aa14a8dd2505 100644 --- a/arch/arm/mach-omap2/omap4-sar-layout.h +++ b/arch/arm/mach-omap2/omap4-sar-layout.h @@ -28,4 +28,19 @@ #define CPU0_WAKEUP_NS_PA_ADDR_OFFSET 0xa04 #define CPU1_WAKEUP_NS_PA_ADDR_OFFSET 0xa08 +#define SAR_BACKUP_STATUS_OFFSET (SAR_BANK3_OFFSET + 0x500) +#define SAR_SECURE_RAM_SIZE_OFFSET (SAR_BANK3_OFFSET + 0x504) +#define SAR_SECRAM_SAVED_AT_OFFSET (SAR_BANK3_OFFSET + 0x508) + +/* WakeUpGen save restore offset from OMAP44XX_SAR_RAM_BASE */ +#define WAKEUPGENENB_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x684) +#define WAKEUPGENENB_SECURE_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x694) +#define WAKEUPGENENB_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0x6a4) +#define WAKEUPGENENB_SECURE_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0x6b4) +#define AUXCOREBOOT0_OFFSET (SAR_BANK3_OFFSET + 0x6c4) +#define AUXCOREBOOT1_OFFSET (SAR_BANK3_OFFSET + 0x6c8) +#define PTMSYNCREQ_MASK_OFFSET (SAR_BANK3_OFFSET + 0x6cc) +#define PTMSYNCREQ_EN_OFFSET (SAR_BANK3_OFFSET + 0x6d0) +#define SAR_BACKUP_STATUS_WAKEUPGEN 0x10 + #endif From 5e94c6e33e7c4726ef09f46c267e9ca232c5148a Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Sun, 9 Jan 2011 02:59:09 +0530 Subject: [PATCH 18/24] ARM: OMAP4: PM: Add L2X0 cache lowpower support When MPUSS hits off-mode, L2 cache is lost. This patch adds L2X0 necessary maintenance operations and context restoration in the low power code. Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- .../arm/mach-omap2/include/mach/omap-secure.h | 5 + arch/arm/mach-omap2/omap-mpuss-lowpower.c | 41 +++++++- arch/arm/mach-omap2/omap4-sar-layout.h | 4 + arch/arm/mach-omap2/sleep44xx.S | 95 +++++++++++++++++++ 4 files changed, 144 insertions(+), 1 deletion(-) diff --git a/arch/arm/mach-omap2/include/mach/omap-secure.h b/arch/arm/mach-omap2/include/mach/omap-secure.h index 5f0763dd5664..c90a43589abe 100644 --- a/arch/arm/mach-omap2/include/mach/omap-secure.h +++ b/arch/arm/mach-omap2/include/mach/omap-secure.h @@ -37,8 +37,13 @@ /* Secure Monitor mode APIs */ #define OMAP4_MON_SCU_PWR_INDEX 0x108 +#define OMAP4_MON_L2X0_DBG_CTRL_INDEX 0x100 +#define OMAP4_MON_L2X0_CTRL_INDEX 0x102 +#define OMAP4_MON_L2X0_AUXCTRL_INDEX 0x109 +#define OMAP4_MON_L2X0_PREFETCH_INDEX 0x113 /* Secure PPA(Primary Protected Application) APIs */ +#define OMAP4_PPA_L2_POR_INDEX 0x23 #define OMAP4_PPA_CPU_ACTRL_SMP_INDEX 0x25 #ifndef __ASSEMBLER__ diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c index f9bb2b3d977b..907a048fe5e9 100644 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c @@ -49,6 +49,7 @@ #include #include #include +#include #include @@ -63,10 +64,12 @@ struct omap4_cpu_pm_info { struct powerdomain *pwrdm; void __iomem *scu_sar_addr; void __iomem *wkup_sar_addr; + void __iomem *l2x0_sar_addr; }; static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info); static struct powerdomain *mpuss_pd; +static void __iomem *sar_base; /* * Program the wakeup routine address for the CPU0 and CPU1 @@ -135,6 +138,36 @@ static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state) __raw_writel(scu_pwr_st, pm_info->scu_sar_addr); } +/* + * Store the CPU cluster state for L2X0 low power operations. + */ +static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state) +{ + struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); + + __raw_writel(save_state, pm_info->l2x0_sar_addr); +} + +/* + * Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to + * in every restore MPUSS OFF path. + */ +#ifdef CONFIG_CACHE_L2X0 +static void save_l2x0_context(void) +{ + u32 val; + void __iomem *l2x0_base = omap4_get_l2cache_base(); + + val = __raw_readl(l2x0_base + L2X0_AUX_CTRL); + __raw_writel(val, sar_base + L2X0_AUXCTRL_OFFSET); + val = __raw_readl(l2x0_base + L2X0_PREFETCH_CTRL); + __raw_writel(val, sar_base + L2X0_PREFETCH_CTRL_OFFSET); +} +#else +static void save_l2x0_context(void) +{} +#endif + /** * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function * The purpose of this function is to manage low power programming @@ -182,6 +215,7 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) set_cpu_next_pwrst(cpu, power_state); set_cpu_wakeup_addr(cpu, virt_to_phys(omap4_cpu_resume)); scu_pwrst_prepare(cpu, power_state); + l2x0_pwrst_prepare(cpu, save_state); /* * Call low level function with targeted low power state. @@ -239,17 +273,19 @@ int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state) int __init omap4_mpuss_init(void) { struct omap4_cpu_pm_info *pm_info; - void __iomem *sar_base = omap4_get_sar_ram_base(); if (omap_rev() == OMAP4430_REV_ES1_0) { WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); return -ENODEV; } + sar_base = omap4_get_sar_ram_base(); + /* Initilaise per CPU PM information */ pm_info = &per_cpu(omap4_pm_info, 0x0); pm_info->scu_sar_addr = sar_base + SCU_OFFSET0; pm_info->wkup_sar_addr = sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET; + pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0; pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm"); if (!pm_info->pwrdm) { pr_err("Lookup failed for CPU0 pwrdm\n"); @@ -265,6 +301,7 @@ int __init omap4_mpuss_init(void) pm_info = &per_cpu(omap4_pm_info, 0x1); pm_info->scu_sar_addr = sar_base + SCU_OFFSET1; pm_info->wkup_sar_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET; + pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1; pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm"); if (!pm_info->pwrdm) { pr_err("Lookup failed for CPU1 pwrdm\n"); @@ -290,6 +327,8 @@ int __init omap4_mpuss_init(void) else __raw_writel(0, sar_base + OMAP_TYPE_OFFSET); + save_l2x0_context(); + return 0; } diff --git a/arch/arm/mach-omap2/omap4-sar-layout.h b/arch/arm/mach-omap2/omap4-sar-layout.h index aa14a8dd2505..fe5b545ad443 100644 --- a/arch/arm/mach-omap2/omap4-sar-layout.h +++ b/arch/arm/mach-omap2/omap4-sar-layout.h @@ -23,6 +23,10 @@ #define SCU_OFFSET0 0xd00 #define SCU_OFFSET1 0xd04 #define OMAP_TYPE_OFFSET 0xd10 +#define L2X0_SAVE_OFFSET0 0xd14 +#define L2X0_SAVE_OFFSET1 0xd18 +#define L2X0_AUXCTRL_OFFSET 0xd1c +#define L2X0_PREFETCH_CTRL_OFFSET 0xd20 /* CPUx Wakeup Non-Secure Physical Address offsets in SAR_BANK3 */ #define CPU0_WAKEUP_NS_PA_ADDR_OFFSET 0xa04 diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S index e5521945ba8e..3154b63def35 100644 --- a/arch/arm/mach-omap2/sleep44xx.S +++ b/arch/arm/mach-omap2/sleep44xx.S @@ -32,6 +32,9 @@ ppa_zero_params: .word 0x0 +ppa_por_params: + .word 1, 0 + /* * ============================= * == CPU suspend finisher == @@ -132,6 +135,54 @@ skip_scu_gp_set: mcrne p15, 0, r0, c1, c0, 1 isb dsb +#ifdef CONFIG_CACHE_L2X0 + /* + * Clean and invalidate the L2 cache. + * Common cache-l2x0.c functions can't be used here since it + * uses spinlocks. We are out of coherency here with data cache + * disabled. The spinlock implementation uses exclusive load/store + * instruction which can fail without data cache being enabled. + * OMAP4 hardware doesn't support exclusive monitor which can + * overcome exclusive access issue. Because of this, CPU can + * lead to deadlock. + */ + bl omap4_get_sar_ram_base + mov r8, r0 + mrc p15, 0, r5, c0, c0, 5 @ Read MPIDR + ands r5, r5, #0x0f + ldreq r0, [r8, #L2X0_SAVE_OFFSET0] @ Retrieve L2 state from SAR + ldrne r0, [r8, #L2X0_SAVE_OFFSET1] @ memory. + cmp r0, #3 + bne do_WFI +#ifdef CONFIG_PL310_ERRATA_727915 + mov r0, #0x03 + mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX + DO_SMC +#endif + bl omap4_get_l2cache_base + mov r2, r0 + ldr r0, =0xffff + str r0, [r2, #L2X0_CLEAN_INV_WAY] +wait: + ldr r0, [r2, #L2X0_CLEAN_INV_WAY] + ldr r1, =0xffff + ands r0, r0, r1 + bne wait +#ifdef CONFIG_PL310_ERRATA_727915 + mov r0, #0x00 + mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX + DO_SMC +#endif +l2x_sync: + bl omap4_get_l2cache_base + mov r2, r0 + mov r0, #0x0 + str r0, [r2, #L2X0_CACHE_SYNC] +sync: + ldr r0, [r2, #L2X0_CACHE_SYNC] + ands r0, r0, #0x1 + bne sync +#endif do_WFI: bl omap_do_wfi @@ -225,6 +276,50 @@ enable_smp_bit: mcreq p15, 0, r0, c1, c0, 1 isb skip_ns_smp_enable: +#ifdef CONFIG_CACHE_L2X0 + /* + * Restore the L2 AUXCTRL and enable the L2 cache. + * OMAP4_MON_L2X0_AUXCTRL_INDEX = Program the L2X0 AUXCTRL + * OMAP4_MON_L2X0_CTRL_INDEX = Enable the L2 using L2X0 CTRL + * register r0 contains value to be programmed. + * L2 cache is already invalidate by ROM code as part + * of MPUSS OFF wakeup path. + */ + ldr r2, =OMAP44XX_L2CACHE_BASE + ldr r0, [r2, #L2X0_CTRL] + and r0, #0x0f + cmp r0, #1 + beq skip_l2en @ Skip if already enabled + ldr r3, =OMAP44XX_SAR_RAM_BASE + ldr r1, [r3, #OMAP_TYPE_OFFSET] + cmp r1, #0x1 @ Check for HS device + bne set_gp_por + ldr r0, =OMAP4_PPA_L2_POR_INDEX + ldr r1, =OMAP44XX_SAR_RAM_BASE + ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET] + adr r3, ppa_por_params + str r4, [r3, #0x04] + mov r1, #0x0 @ Process ID + mov r2, #0x4 @ Flag + mov r6, #0xff + mov r12, #0x00 @ Secure Service ID + DO_SMC + b set_aux_ctrl +set_gp_por: + ldr r1, =OMAP44XX_SAR_RAM_BASE + ldr r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET] + ldr r12, =OMAP4_MON_L2X0_PREFETCH_INDEX @ Setup L2 PREFETCH + DO_SMC +set_aux_ctrl: + ldr r1, =OMAP44XX_SAR_RAM_BASE + ldr r0, [r1, #L2X0_AUXCTRL_OFFSET] + ldr r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX @ Setup L2 AUXCTRL + DO_SMC + mov r0, #0x1 + ldr r12, =OMAP4_MON_L2X0_CTRL_INDEX @ Enable L2 cache + DO_SMC +skip_l2en: +#endif b cpu_resume @ Jump to generic resume ENDPROC(omap4_cpu_resume) From 3ba2a7393e6be48ad7f545a743cd6f46325ba8fd Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Mon, 6 Jun 2011 14:33:29 +0530 Subject: [PATCH 19/24] ARM: OMAP4: PM: Add MPUSS power domain OSWR support This patch adds the MPUSS OSWR (Open Switch Retention) support. The MPUSS OSWR configuration is as below. - CPUx L1 and logic lost, MPUSS logic lost, L2 memory is retained OMAP4460 onwards, MPUSS power domain doesn't support OFF state any more anymore just like CORE power domain. The deepest state supported is OSWR. On OMAP4430 secure devices too, MPUSS off mode can't be used because of a bug which alters Ducati and Tesla states. Hence MPUSS off mode as an independent state isn't supported on OMAP44XX devices. Ofcourse when MPUSS power domain transitions to OSWR along with device off mode, it eventually hits off state since memory contents are lost. Hence the MPUSS off mode independent state is not attempted without device off mode. All the necessary infrastructure code for MPUSS off mode is in place as part of this series. Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/common.h | 6 +++ arch/arm/mach-omap2/omap-mpuss-lowpower.c | 65 +++++++++++++++++++++-- arch/arm/mach-omap2/pm44xx.c | 4 ++ 3 files changed, 72 insertions(+), 3 deletions(-) diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 3312174d64ba..0911e843f079 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -190,6 +190,7 @@ extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state); extern int omap4_finish_suspend(unsigned long cpu_state); extern void omap4_cpu_resume(void); extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state); +extern u32 omap4_mpuss_read_prev_context_state(void); #else static inline int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) @@ -216,6 +217,11 @@ static inline int omap4_finish_suspend(unsigned long cpu_state) static inline void omap4_cpu_resume(void) {} + +static inline u32 omap4_mpuss_read_prev_context_state(void) +{ + return 0; +} #endif #endif /* __ASSEMBLER__ */ #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */ diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c index 907a048fe5e9..549aff1bfecb 100644 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c @@ -24,8 +24,8 @@ * ON ON ON * ON(Inactive) OFF ON(Inactive) * OFF OFF CSWR - * OFF OFF OSWR (*TBD) - * OFF OFF OFF* (*TBD) + * OFF OFF OSWR + * OFF OFF OFF(Device OFF *TBD) * ---------------------------------------------- * * Note: CPU0 is the master core and it is the last CPU to go down @@ -56,7 +56,11 @@ #include "common.h" #include "omap4-sar-layout.h" #include "pm.h" -#include "powerdomain.h" +#include "prcm_mpu44xx.h" +#include "prminst44xx.h" +#include "prcm44xx.h" +#include "prm44xx.h" +#include "prm-regbits-44xx.h" #ifdef CONFIG_SMP @@ -138,6 +142,48 @@ static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state) __raw_writel(scu_pwr_st, pm_info->scu_sar_addr); } +/* Helper functions for MPUSS OSWR */ +static inline void mpuss_clear_prev_logic_pwrst(void) +{ + u32 reg; + + reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, + OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); + omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION, + OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); +} + +static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id) +{ + u32 reg; + + if (cpu_id) { + reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST, + OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET); + omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST, + OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET); + } else { + reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST, + OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET); + omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST, + OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET); + } +} + +/** + * omap4_mpuss_read_prev_context_state: + * Function returns the MPUSS previous context state + */ +u32 omap4_mpuss_read_prev_context_state(void) +{ + u32 reg; + + reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, + OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET); + reg &= OMAP4430_LOSTCONTEXT_DFF_MASK; + return reg; +} + /* * Store the CPU cluster state for L2X0 low power operations. */ @@ -210,8 +256,18 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) return -ENXIO; } + /* + * Check MPUSS next state and save interrupt controller if needed. + * In MPUSS OSWR or device OFF, interrupt controller contest is lost. + */ + mpuss_clear_prev_logic_pwrst(); pwrdm_clear_all_prev_pwrst(mpuss_pd); + if ((pwrdm_read_next_pwrst(mpuss_pd) == PWRDM_POWER_RET) && + (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF)) + save_state = 2; + clear_cpu_prev_pwrst(cpu); + cpu_clear_prev_logic_pwrst(cpu); set_cpu_next_pwrst(cpu, power_state); set_cpu_wakeup_addr(cpu, virt_to_phys(omap4_cpu_resume)); scu_pwrst_prepare(cpu, power_state); @@ -294,6 +350,7 @@ int __init omap4_mpuss_init(void) /* Clear CPU previous power domain state */ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); + cpu_clear_prev_logic_pwrst(0); /* Initialise CPU0 power domain state to ON */ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); @@ -310,6 +367,7 @@ int __init omap4_mpuss_init(void) /* Clear CPU previous power domain state */ pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); + cpu_clear_prev_logic_pwrst(1); /* Initialise CPU1 power domain state to ON */ pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON); @@ -320,6 +378,7 @@ int __init omap4_mpuss_init(void) return -ENODEV; } pwrdm_clear_all_prev_pwrst(mpuss_pd); + mpuss_clear_prev_logic_pwrst(); /* Save device type on scratchpad for low level code to use */ if (omap_type() != OMAP2_DEVICE_TYPE_GP) diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c index 6dc9bbe0a4a8..92daae07d634 100644 --- a/arch/arm/mach-omap2/pm44xx.c +++ b/arch/arm/mach-omap2/pm44xx.c @@ -27,6 +27,7 @@ struct power_state { u32 next_state; #ifdef CONFIG_SUSPEND u32 saved_state; + u32 saved_logic_state; #endif struct list_head node; }; @@ -43,11 +44,13 @@ static int omap4_pm_suspend(void) /* Save current powerdomain state */ list_for_each_entry(pwrst, &pwrst_list, node) { pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm); + pwrst->saved_logic_state = pwrdm_read_logic_retst(pwrst->pwrdm); } /* Set targeted power domain states by suspend */ list_for_each_entry(pwrst, &pwrst_list, node) { omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state); + pwrdm_set_logic_retst(pwrst->pwrdm, PWRDM_POWER_OFF); } /* @@ -71,6 +74,7 @@ static int omap4_pm_suspend(void) ret = -1; } omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state); + pwrdm_set_logic_retst(pwrst->pwrdm, pwrst->saved_logic_state); } if (ret) pr_crit("Could not enter target state in pm_suspend\n"); From 49404dd09f5dc78c247c6044c60d7be7768a71bc Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Mon, 10 Jan 2011 01:02:15 +0530 Subject: [PATCH 20/24] ARM: OMAP4: PM: Add power domain statistics support Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/omap-mpuss-lowpower.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c index 549aff1bfecb..1d5d01056558 100644 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c @@ -256,6 +256,8 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) return -ENXIO; } + pwrdm_pre_transition(); + /* * Check MPUSS next state and save interrupt controller if needed. * In MPUSS OSWR or device OFF, interrupt controller contest is lost. @@ -288,6 +290,8 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) wakeup_cpu = smp_processor_id(); set_cpu_next_pwrst(wakeup_cpu, PWRDM_POWER_ON); + pwrdm_post_transition(); + return 0; } From 137d105d50f6e6c373c1aa759f59045e6239cf66 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Sat, 25 Jun 2011 18:04:31 -0700 Subject: [PATCH 21/24] ARM: OMAP4: Fix errata i688 with MPU interconnect barriers. On OMAP4 SOC, intecronnects has many write buffers in the async bridges and they need to be drained before CPU enters into standby state. Patch 'OMAP4: PM: Add CPUX OFF mode support' added CPU PM support but OMAP errata i688 (Async Bridge Corruption) needs to be taken care to avoid issues like system freeze, CPU deadlocks, random crashes with register accesses, synchronisation loss on initiators operating on both interconnect port simultaneously. As per the errata, if a data is stalled inside asynchronous bridge because of back pressure, it may be accepted multiple times, creating pointer misalignment that will corrupt next transfers on that data path until next reset of the system (No recovery procedure once the issue is hit, the path remains consistently broken). Async bridge can be found on path between MPU to EMIF and MPU to L3 interconnect. This situation can happen only when the idle is initiated by a Master Request Disconnection (which is trigged by software when executing WFI on CPU). The work-around for this errata needs all the initiators connected through async bridge must ensure that data path is properly drained before issuing WFI. This condition will be met if one Strongly ordered access is performed to the target right before executing the WFI. In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained. IO barrier ensure that there is no synchronisation loss on initiators operating on both interconnect port simultaneously. Thanks to Russell for a tip to conver assembly function to C fuction there by reducing 40 odd lines of code from the patch. Signed-off-by: Santosh Shilimkar Signed-off-by: Richard Woodruff Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/Kconfig | 21 +++++++++ arch/arm/mach-omap2/include/mach/barriers.h | 31 +++++++++++++ arch/arm/mach-omap2/io.c | 9 ++++ arch/arm/mach-omap2/omap4-common.c | 51 +++++++++++++++++++++ arch/arm/mach-omap2/sleep44xx.S | 8 ++++ arch/arm/plat-omap/include/plat/sram.h | 6 ++- arch/arm/plat-omap/sram.c | 8 ++++ 7 files changed, 133 insertions(+), 1 deletion(-) create mode 100644 arch/arm/mach-omap2/include/mach/barriers.h diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index b6625130831d..50f43942c1aa 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -353,6 +353,27 @@ config OMAP3_SDRC_AC_TIMING wish to say no. Selecting yes without understanding what is going on could result in system crashes; +config OMAP4_ERRATA_I688 + bool "OMAP4 errata: Async Bridge Corruption" + depends on ARCH_OMAP4 + select ARCH_HAS_BARRIERS + help + If a data is stalled inside asynchronous bridge because of back + pressure, it may be accepted multiple times, creating pointer + misalignment that will corrupt next transfers on that data path + until next reset of the system (No recovery procedure once the + issue is hit, the path remains consistently broken). Async bridge + can be found on path between MPU to EMIF and MPU to L3 interconnect. + This situation can happen only when the idle is initiated by a + Master Request Disconnection (which is trigged by software when + executing WFI on CPU). + The work-around for this errata needs all the initiators connected + through async bridge must ensure that data path is properly drained + before issuing WFI. This condition will be met if one Strongly ordered + access is performed to the target right before executing the WFI. + In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained. + IO barrier ensure that there is no synchronisation loss on initiators + operating on both interconnect port simultaneously. endmenu endif diff --git a/arch/arm/mach-omap2/include/mach/barriers.h b/arch/arm/mach-omap2/include/mach/barriers.h new file mode 100644 index 000000000000..4fa72c7cc7cd --- /dev/null +++ b/arch/arm/mach-omap2/include/mach/barriers.h @@ -0,0 +1,31 @@ +/* + * OMAP memory barrier header. + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Santosh Shilimkar + * Richard Woodruff + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __MACH_BARRIERS_H +#define __MACH_BARRIERS_H + +extern void omap_bus_sync(void); + +#define rmb() dsb() +#define wmb() do { dsb(); outer_sync(); omap_bus_sync(); } while (0) +#define mb() wmb() + +#endif /* __MACH_BARRIERS_H */ diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 3f565dd2ea8d..65843390e7f0 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -237,6 +237,15 @@ static struct map_desc omap44xx_io_desc[] __initdata = { .length = L4_EMU_44XX_SIZE, .type = MT_DEVICE, }, +#ifdef CONFIG_OMAP4_ERRATA_I688 + { + .virtual = OMAP4_SRAM_VA, + .pfn = __phys_to_pfn(OMAP4_SRAM_PA), + .length = PAGE_SIZE, + .type = MT_MEMORY_SO, + }, +#endif + }; #endif diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 1b93d31fe8e9..bc16c818c6b7 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -15,11 +15,14 @@ #include #include #include +#include #include #include +#include #include +#include #include #include @@ -33,6 +36,54 @@ static void __iomem *l2cache_base; static void __iomem *sar_ram_base; +#ifdef CONFIG_OMAP4_ERRATA_I688 +/* Used to implement memory barrier on DRAM path */ +#define OMAP4_DRAM_BARRIER_VA 0xfe600000 + +void __iomem *dram_sync, *sram_sync; + +void omap_bus_sync(void) +{ + if (dram_sync && sram_sync) { + writel_relaxed(readl_relaxed(dram_sync), dram_sync); + writel_relaxed(readl_relaxed(sram_sync), sram_sync); + isb(); + } +} + +static int __init omap_barriers_init(void) +{ + struct map_desc dram_io_desc[1]; + phys_addr_t paddr; + u32 size; + + if (!cpu_is_omap44xx()) + return -ENODEV; + + size = ALIGN(PAGE_SIZE, SZ_1M); + paddr = memblock_alloc(size, SZ_1M); + if (!paddr) { + pr_err("%s: failed to reserve 4 Kbytes\n", __func__); + return -ENOMEM; + } + memblock_free(paddr, size); + memblock_remove(paddr, size); + dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA; + dram_io_desc[0].pfn = __phys_to_pfn(paddr); + dram_io_desc[0].length = size; + dram_io_desc[0].type = MT_MEMORY_SO; + iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc)); + dram_sync = (void __iomem *) dram_io_desc[0].virtual; + sram_sync = (void __iomem *) OMAP4_SRAM_VA; + + pr_info("OMAP4: Map 0x%08llx to 0x%08lx for dram barrier\n", + (long long) paddr, dram_io_desc[0].virtual); + + return 0; +} +core_initcall(omap_barriers_init); +#endif + void __init gic_init_irq(void) { void __iomem *omap_irq_base; diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S index 3154b63def35..abd283400490 100644 --- a/arch/arm/mach-omap2/sleep44xx.S +++ b/arch/arm/mach-omap2/sleep44xx.S @@ -325,8 +325,16 @@ skip_l2en: ENDPROC(omap4_cpu_resume) #endif +#ifndef CONFIG_OMAP4_ERRATA_I688 +ENTRY(omap_bus_sync) + mov pc, lr +ENDPROC(omap_bus_sync) +#endif + ENTRY(omap_do_wfi) stmfd sp!, {lr} + /* Drain interconnect write buffers. */ + bl omap_bus_sync /* * Execute an ISB instruction to ensure that all of the diff --git a/arch/arm/plat-omap/include/plat/sram.h b/arch/arm/plat-omap/include/plat/sram.h index f500fc34d065..75aa1b2bef51 100644 --- a/arch/arm/plat-omap/include/plat/sram.h +++ b/arch/arm/plat-omap/include/plat/sram.h @@ -95,6 +95,10 @@ static inline void omap_push_sram_idle(void) {} */ #define OMAP2_SRAM_PA 0x40200000 #define OMAP3_SRAM_PA 0x40200000 +#ifdef CONFIG_OMAP4_ERRATA_I688 +#define OMAP4_SRAM_PA 0x40304000 +#define OMAP4_SRAM_VA 0xfe404000 +#else #define OMAP4_SRAM_PA 0x40300000 - +#endif #endif diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index 8b28664d1c62..ad6a71a00cef 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c @@ -40,7 +40,11 @@ #define OMAP1_SRAM_PA 0x20000000 #define OMAP2_SRAM_PUB_PA (OMAP2_SRAM_PA + 0xf800) #define OMAP3_SRAM_PUB_PA (OMAP3_SRAM_PA + 0x8000) +#ifdef CONFIG_OMAP4_ERRATA_I688 +#define OMAP4_SRAM_PUB_PA OMAP4_SRAM_PA +#else #define OMAP4_SRAM_PUB_PA (OMAP4_SRAM_PA + 0x4000) +#endif #if defined(CONFIG_ARCH_OMAP2PLUS) #define SRAM_BOOTLOADER_SZ 0x00 @@ -163,6 +167,10 @@ static void __init omap_map_sram(void) if (omap_sram_size == 0) return; +#ifdef CONFIG_OMAP4_ERRATA_I688 + omap_sram_start += PAGE_SIZE; + omap_sram_size -= SZ_16K; +#endif if (cpu_is_omap34xx()) { /* * SRAM must be marked as non-cached on OMAP3 since the From 98272660970a71e21ad1992f695f75b780de833c Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Tue, 16 Aug 2011 17:31:40 +0530 Subject: [PATCH 22/24] ARM: OMAP4: PM: Add CPUidle support Add OMAP4 CPUIDLE support. CPU1 is left with defualt idle and the low power state for it is managed via cpu-hotplug. This patch adds MPUSS low power states in cpuidle. C1 - CPU0 ON + CPU1 ON + MPU ON C2 - CPU0 OFF + CPU1 OFF + MPU CSWR C3 - CPU0 OFF + CPU1 OFF + MPU OSWR OMAP4460 onwards, MPUSS power domain doesn't support OFF state any more anymore just like CORE power domain. The deepest state supported is OSWr. Ofcourse when MPUSS and CORE PD transitions to OSWR along with device off mode, even the memory contemts are lost which is as good as the PD off state. On OMAP4 because of hardware constraints, no low power states are targeted when both CPUs are online and in SMP mode. The low power states are attempted only when secondary CPU gets offline to OFF through hotplug infrastructure. Thanks to Nicole Chalhoub for doing exhaustive C-state latency profiling. Signed-off-by: Rajendra Nayak Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/Makefile | 3 +- arch/arm/mach-omap2/cpuidle44xx.c | 237 ++++++++++++++++++++++++++++++ arch/arm/mach-omap2/pm.h | 1 + arch/arm/mach-omap2/pm44xx.c | 2 + 4 files changed, 242 insertions(+), 1 deletion(-) create mode 100644 arch/arm/mach-omap2/cpuidle44xx.c diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 58de1f6df27c..9a6da52661ce 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -65,7 +65,8 @@ obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \ cpuidle34xx.o -obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o omap-mpuss-lowpower.o +obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o omap-mpuss-lowpower.o \ + cpuidle44xx.o obj-$(CONFIG_PM_DEBUG) += pm-debug.o obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c new file mode 100644 index 000000000000..81386c6256eb --- /dev/null +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -0,0 +1,237 @@ +/* + * OMAP4 CPU idle Routines + * + * Copyright (C) 2011 Texas Instruments, Inc. + * Santosh Shilimkar + * Rajendra Nayak + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include + +#include "common.h" +#include "pm.h" +#include "prm.h" + +#ifdef CONFIG_CPU_IDLE + +/* Machine specific information to be recorded in the C-state driver_data */ +struct omap4_idle_statedata { + u32 cpu_state; + u32 mpu_logic_state; + u32 mpu_state; + u8 valid; +}; + +static struct cpuidle_params cpuidle_params_table[] = { + /* C1 - CPU0 ON + CPU1 ON + MPU ON */ + {.exit_latency = 2 + 2 , .target_residency = 5, .valid = 1}, + /* C2- CPU0 OFF + CPU1 OFF + MPU CSWR */ + {.exit_latency = 328 + 440 , .target_residency = 960, .valid = 1}, + /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ + {.exit_latency = 460 + 518 , .target_residency = 1100, .valid = 1}, +}; + +#define OMAP4_NUM_STATES ARRAY_SIZE(cpuidle_params_table) + +struct omap4_idle_statedata omap4_idle_data[OMAP4_NUM_STATES]; +static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd; + +/** + * omap4_enter_idle - Programs OMAP4 to enter the specified state + * @dev: cpuidle device + * @drv: cpuidle driver + * @index: the index of state to be entered + * + * Called from the CPUidle framework to program the device to the + * specified low power state selected by the governor. + * Returns the amount of time spent in the low power state. + */ +static int omap4_enter_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index) +{ + struct omap4_idle_statedata *cx = + cpuidle_get_statedata(&dev->states_usage[index]); + struct timespec ts_preidle, ts_postidle, ts_idle; + u32 cpu1_state; + int idle_time; + int new_state_idx; + + /* Used to keep track of the total time in idle */ + getnstimeofday(&ts_preidle); + + local_irq_disable(); + local_fiq_disable(); + + /* + * CPU0 has to stay ON (i.e in C1) until CPU1 is OFF state. + * This is necessary to honour hardware recommondation + * of triggeing all the possible low power modes once CPU1 is + * out of coherency and in OFF mode. + * Update dev->last_state so that governor stats reflects right + * data. + */ + cpu1_state = pwrdm_read_pwrst(cpu1_pd); + if (cpu1_state != PWRDM_POWER_OFF) { + new_state_idx = drv->safe_state_index; + cx = cpuidle_get_statedata(&dev->states_usage[new_state_idx]); + } + + /* + * Call idle CPU PM enter notifier chain so that + * VFP and per CPU interrupt context is saved. + */ + if (cx->cpu_state == PWRDM_POWER_OFF) + cpu_pm_enter(); + + pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); + omap_set_pwrdm_state(mpu_pd, cx->mpu_state); + + /* + * Call idle CPU cluster PM enter notifier chain + * to save GIC and wakeupgen context. + */ + if ((cx->mpu_state == PWRDM_POWER_RET) && + (cx->mpu_logic_state == PWRDM_POWER_OFF)) + cpu_cluster_pm_enter(); + + omap4_enter_lowpower(dev->cpu, cx->cpu_state); + + /* + * Call idle CPU PM exit notifier chain to restore + * VFP and per CPU IRQ context. Only CPU0 state is + * considered since CPU1 is managed by CPU hotplug. + */ + if (pwrdm_read_prev_pwrst(cpu0_pd) == PWRDM_POWER_OFF) + cpu_pm_exit(); + + /* + * Call idle CPU cluster PM exit notifier chain + * to restore GIC and wakeupgen context. + */ + if (omap4_mpuss_read_prev_context_state()) + cpu_cluster_pm_exit(); + + getnstimeofday(&ts_postidle); + ts_idle = timespec_sub(ts_postidle, ts_preidle); + + local_irq_enable(); + local_fiq_enable(); + + idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \ + USEC_PER_SEC; + + /* Update cpuidle counters */ + dev->last_residency = idle_time; + + return index; +} + +DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev); + +struct cpuidle_driver omap4_idle_driver = { + .name = "omap4_idle", + .owner = THIS_MODULE, +}; + +static inline void _fill_cstate(struct cpuidle_driver *drv, + int idx, const char *descr) +{ + struct cpuidle_state *state = &drv->states[idx]; + + state->exit_latency = cpuidle_params_table[idx].exit_latency; + state->target_residency = cpuidle_params_table[idx].target_residency; + state->flags = CPUIDLE_FLAG_TIME_VALID; + state->enter = omap4_enter_idle; + sprintf(state->name, "C%d", idx + 1); + strncpy(state->desc, descr, CPUIDLE_DESC_LEN); +} + +static inline struct omap4_idle_statedata *_fill_cstate_usage( + struct cpuidle_device *dev, + int idx) +{ + struct omap4_idle_statedata *cx = &omap4_idle_data[idx]; + struct cpuidle_state_usage *state_usage = &dev->states_usage[idx]; + + cx->valid = cpuidle_params_table[idx].valid; + cpuidle_set_statedata(state_usage, cx); + + return cx; +} + + + +/** + * omap4_idle_init - Init routine for OMAP4 idle + * + * Registers the OMAP4 specific cpuidle driver to the cpuidle + * framework with the valid set of states. + */ +int __init omap4_idle_init(void) +{ + struct omap4_idle_statedata *cx; + struct cpuidle_device *dev; + struct cpuidle_driver *drv = &omap4_idle_driver; + unsigned int cpu_id = 0; + + mpu_pd = pwrdm_lookup("mpu_pwrdm"); + cpu0_pd = pwrdm_lookup("cpu0_pwrdm"); + cpu1_pd = pwrdm_lookup("cpu1_pwrdm"); + if ((!mpu_pd) || (!cpu0_pd) || (!cpu1_pd)) + return -ENODEV; + + + drv->safe_state_index = -1; + dev = &per_cpu(omap4_idle_dev, cpu_id); + dev->cpu = cpu_id; + + /* C1 - CPU0 ON + CPU1 ON + MPU ON */ + _fill_cstate(drv, 0, "MPUSS ON"); + drv->safe_state_index = 0; + cx = _fill_cstate_usage(dev, 0); + cx->valid = 1; /* C1 is always valid */ + cx->cpu_state = PWRDM_POWER_ON; + cx->mpu_state = PWRDM_POWER_ON; + cx->mpu_logic_state = PWRDM_POWER_RET; + + /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ + _fill_cstate(drv, 1, "MPUSS CSWR"); + cx = _fill_cstate_usage(dev, 1); + cx->cpu_state = PWRDM_POWER_OFF; + cx->mpu_state = PWRDM_POWER_RET; + cx->mpu_logic_state = PWRDM_POWER_RET; + + /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ + _fill_cstate(drv, 2, "MPUSS OSWR"); + cx = _fill_cstate_usage(dev, 2); + cx->cpu_state = PWRDM_POWER_OFF; + cx->mpu_state = PWRDM_POWER_RET; + cx->mpu_logic_state = PWRDM_POWER_OFF; + + drv->state_count = OMAP4_NUM_STATES; + cpuidle_register_driver(&omap4_idle_driver); + + dev->state_count = OMAP4_NUM_STATES; + if (cpuidle_register_device(dev)) { + pr_err("%s: CPUidle register device failed\n", __func__); + return -EIO; + } + + return 0; +} +#else +int __init omap4_idle_init(void) +{ + return 0; +} +#endif /* CONFIG_CPU_IDLE */ diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h index 4e166add2f35..b737b11e4499 100644 --- a/arch/arm/mach-omap2/pm.h +++ b/arch/arm/mach-omap2/pm.h @@ -21,6 +21,7 @@ extern void omap_sram_idle(void); extern int omap3_can_sleep(void); extern int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state); extern int omap3_idle_init(void); +extern int omap4_idle_init(void); #if defined(CONFIG_PM_OPP) extern int omap3_opp_init(void); diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c index 92daae07d634..c264ef7219c1 100644 --- a/arch/arm/mach-omap2/pm44xx.c +++ b/arch/arm/mach-omap2/pm44xx.c @@ -258,6 +258,8 @@ static int __init omap4_pm_init(void) /* Overwrite the default arch_idle() */ pm_idle = omap_default_idle; + omap4_idle_init(); + err2: return ret; } From 98be0dde1957a1e47d42cf2c220bf52bacf81d6e Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Sun, 16 Jan 2011 00:42:31 +0530 Subject: [PATCH 23/24] ARM: OMAP4: cpuidle: Switch to gptimer from twd in deeper C-states. CPU local timer(TWD) stops when the CPU is transitioning into deeper C-States. Since these timers are not wakeup capable, we need the wakeup capable global timer to program the wakeup time depending on the next timer expiry. It can be handled by registering a global wakeup capable timer along with local timers marked with (mis)feature flag CLOCK_EVT_FEAT_C3STOP. Then notify the clock events layer from idle code using CLOCK_EVT_NOTIFY_BROADCAST_ENTER/EXIT). ARM local timers are already marked with C3STOP feature. Add the notifiers to OMAP4 CPU idle code for the broadcast entry and exit. Signed-off-by: Santosh Shilimkar Acked-by: Jean Pihet Acked-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/cpuidle44xx.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index 81386c6256eb..cfdbb86bc84e 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -14,6 +14,7 @@ #include #include #include +#include #include @@ -65,6 +66,7 @@ static int omap4_enter_idle(struct cpuidle_device *dev, u32 cpu1_state; int idle_time; int new_state_idx; + int cpu_id = smp_processor_id(); /* Used to keep track of the total time in idle */ getnstimeofday(&ts_preidle); @@ -86,6 +88,9 @@ static int omap4_enter_idle(struct cpuidle_device *dev, cx = cpuidle_get_statedata(&dev->states_usage[new_state_idx]); } + if (index > 0) + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); + /* * Call idle CPU PM enter notifier chain so that * VFP and per CPU interrupt context is saved. @@ -121,6 +126,9 @@ static int omap4_enter_idle(struct cpuidle_device *dev, if (omap4_mpuss_read_prev_context_state()) cpu_cluster_pm_exit(); + if (index > 0) + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); + getnstimeofday(&ts_postidle); ts_idle = timespec_sub(ts_postidle, ts_preidle); From ff819da44258ca12b9f60dfd589884106e5a3129 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Sat, 3 Sep 2011 22:38:27 +0530 Subject: [PATCH 24/24] ARM: OMAP3: CPUidle: Make use of CPU PM notifiers Save VFP CPU context using CPU PM notifier chain. VFP context is lost when CPU hits OFF state. Signed-off-by: Santosh Shilimkar Reviewed-by: Kevin Hilman Tested-by: Vishwanath BS Signed-off-by: Kevin Hilman --- arch/arm/mach-omap2/cpuidle34xx.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c index e20332f4abdc..1f71ebb6c12c 100644 --- a/arch/arm/mach-omap2/cpuidle34xx.c +++ b/arch/arm/mach-omap2/cpuidle34xx.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -124,9 +125,23 @@ static int omap3_enter_idle(struct cpuidle_device *dev, pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); } + /* + * Call idle CPU PM enter notifier chain so that + * VFP context is saved. + */ + if (mpu_state == PWRDM_POWER_OFF) + cpu_pm_enter(); + /* Execute ARM wfi */ omap_sram_idle(); + /* + * Call idle CPU PM enter notifier chain to restore + * VFP context. + */ + if (pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF) + cpu_pm_exit(); + /* Re-allow idle for C1 */ if (index == 0) { pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);