2018-02-23 18:43:54 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
/ *
* Low l e v e l s u s p e n d c o d e f o r A M 3 3 X X S o C s
*
2020-07-19 13:30:33 +03:00
* Copyright ( C ) 2 0 1 2 - 2 0 1 8 T e x a s I n s t r u m e n t s I n c o r p o r a t e d - h t t p s : / / w w w . t i . c o m /
2018-02-23 18:43:54 +03:00
* Dave G e r l a c h , V a i b h a v B e d i a
* /
# include < l i n u x / l i n k a g e . h >
2018-07-09 10:33:16 +03:00
# include < l i n u x / p l a t f o r m _ d a t a / p m 3 3 x x . h >
2018-02-23 18:43:54 +03:00
# include < l i n u x / t i - e m i f - s r a m . h >
# include < a s m / a s s e m b l e r . h >
# include < a s m / m e m o r y . h >
# include " i o m a p . h "
# include " c m 3 3 x x . h "
ARM: OMAP2+: move platform-specific asm-offset.h to arch/arm/mach-omap2
<generated/ti-pm-asm-offsets.h> is only generated and included by
arch/arm/mach-omap2/, so it does not need to reside in the globally
visible include/generated/.
I renamed it to arch/arm/mach-omap2/pm-asm-offsets.h since the prefix
'ti-' is just redundant in mach-omap2/.
My main motivation of this change is to avoid the race condition for
the parallel build (-j) when CONFIG_IKHEADERS is enabled.
When it is enabled, all the headers under include/ are archived into
kernel/kheaders_data.tar.xz and exposed in the sysfs.
In the parallel build, we have no idea in which order files are built.
- If ti-pm-asm-offsets.h is built before kheaders_data.tar.xz,
the header will be included in the archive. Probably nobody will
use it, but it is harmless except that it will increase the archive
size needlessly.
- If kheaders_data.tar.xz is built before ti-pm-asm-offsets.h,
the header will not be included in the archive. However, in the next
build, the archive will be re-generated to include the newly-found
ti-pm-asm-offsets.h. This is not nice from the build system point
of view.
- If ti-pm-asm-offsets.h and kheaders_data.tar.xz are built at the
same time, the corrupted header might be included in the archive,
which does not look nice either.
This commit fixes the race.
Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
Tested-by: Keerthy <j-keerthy@ti.com>
Signed-off-by: Tony Lindgren <tony@atomide.com>
2019-08-23 05:58:08 +03:00
# include " p m - a s m - o f f s e t s . h "
2018-02-23 18:43:54 +03:00
# define A M 3 3 X X _ C M _ C L K C T R L _ M O D U L E S T A T E _ D I S A B L E D 0 x00 0 3 0 0 0 0
# define A M 3 3 X X _ C M _ C L K C T R L _ M O D U L E M O D E _ D I S A B L E 0 x00 0 3
# define A M 3 3 X X _ C M _ C L K C T R L _ M O D U L E M O D E _ E N A B L E 0 x00 0 2
2018-07-09 10:33:16 +03:00
/* replicated define because linux/bitops.h cannot be included in assembly */
# define B I T ( n r ) ( 1 < < ( n r ) )
2018-02-23 18:43:54 +03:00
.arm
2019-05-28 01:40:50 +03:00
.arch armv7 - a
2018-02-23 18:43:54 +03:00
.align 3
ENTRY( a m 3 3 x x _ d o _ w f i )
stmfd s p ! , { r4 - r11 , l r } @ save registers on stack
2018-07-09 10:33:16 +03:00
/* Save wfi_flags arg to data space */
mov r4 , r0
adr r3 , a m 3 3 x x _ p m _ r o _ s r a m _ d a t a
ldr r2 , [ r3 , #A M X 3 _ P M _ R O _ S R A M _ D A T A _ V I R T _ O F F S E T ]
str r4 , [ r2 , #A M X 3 _ P M _ W F I _ F L A G S _ O F F S E T ]
/* Only flush cache is we know we are losing MPU context */
tst r4 , #W F I _ F L A G _ F L U S H _ C A C H E
beq c a c h e _ s k i p _ f l u s h
2018-02-23 18:43:54 +03:00
/ *
* Flush a l l d a t a f r o m t h e L 1 a n d L 2 d a t a c a c h e b e f o r e d i s a b l i n g
* SCTLR. C b i t .
* /
ldr r1 , k e r n e l _ f l u s h
blx r1
/ *
* Clear t h e S C T L R . C b i t t o p r e v e n t f u r t h e r d a t a c a c h e
* allocation. C l e a r i n g S C T L R . C w o u l d m a k e a l l t h e d a t a a c c e s s e s
* strongly o r d e r e d a n d w o u l d n o t h i t t h e c a c h e .
* /
mrc p15 , 0 , r0 , c1 , c0 , 0
bic r0 , r0 , #( 1 < < 2 ) @ Disable the C bit
mcr p15 , 0 , r0 , c1 , c0 , 0
isb
/ *
* Invalidate L 1 a n d L 2 d a t a c a c h e .
* /
ldr r1 , k e r n e l _ f l u s h
blx r1
2018-07-09 10:33:16 +03:00
adr r3 , a m 3 3 x x _ p m _ r o _ s r a m _ d a t a
ldr r2 , [ r3 , #A M X 3 _ P M _ R O _ S R A M _ D A T A _ V I R T _ O F F S E T ]
ldr r4 , [ r2 , #A M X 3 _ P M _ W F I _ F L A G S _ O F F S E T ]
cache_skip_flush :
/* Check if we want self refresh */
tst r4 , #W F I _ F L A G _ S E L F _ R E F R E S H
beq e m i f _ s k i p _ e n t e r _ s r
2018-02-23 18:43:54 +03:00
adr r9 , a m 3 3 x x _ e m i f _ s r a m _ t a b l e
ldr r3 , [ r9 , #E M I F _ P M _ E N T E R _ S R _ O F F S E T ]
blx r3
2018-07-09 10:33:16 +03:00
emif_skip_enter_sr :
/* Only necessary if PER is losing context */
tst r4 , #W F I _ F L A G _ S A V E _ E M I F
beq e m i f _ s k i p _ s a v e
2018-02-23 18:43:54 +03:00
ldr r3 , [ r9 , #E M I F _ P M _ S A V E _ C O N T E X T _ O F F S E T ]
blx r3
2018-07-09 10:33:16 +03:00
emif_skip_save :
/* Only can disable EMIF if we have entered self refresh */
tst r4 , #W F I _ F L A G _ S E L F _ R E F R E S H
beq e m i f _ s k i p _ d i s a b l e
2018-02-23 18:43:54 +03:00
/* Disable EMIF */
ldr r1 , v i r t _ e m i f _ c l k c t r l
ldr r2 , [ r1 ]
bic r2 , r2 , #A M 33 X X _ C M _ C L K C T R L _ M O D U L E M O D E _ D I S A B L E
str r2 , [ r1 ]
ldr r1 , v i r t _ e m i f _ c l k c t r l
wait_emif_disable :
ldr r2 , [ r1 ]
mov r3 , #A M 33 X X _ C M _ C L K C T R L _ M O D U L E S T A T E _ D I S A B L E D
cmp r2 , r3
bne w a i t _ e m i f _ d i s a b l e
2018-07-09 10:33:16 +03:00
emif_skip_disable :
tst r4 , #W F I _ F L A G _ W A K E _ M 3
beq w k u p _ m 3 _ s k i p
2018-02-23 18:43:54 +03:00
/ *
* For t h e M P U W F I t o b e r e g i s t e r e d a s a n i n t e r r u p t
* to W K U P _ M 3 , M P U _ C L K C T R L . M O D U L E M O D E n e e d s t o b e s e t
* to D I S A B L E D
* /
ldr r1 , v i r t _ m p u _ c l k c t r l
ldr r2 , [ r1 ]
bic r2 , r2 , #A M 33 X X _ C M _ C L K C T R L _ M O D U L E M O D E _ D I S A B L E
str r2 , [ r1 ]
2018-07-09 10:33:16 +03:00
wkup_m3_skip :
2018-02-23 18:43:54 +03:00
/ *
* Execute a n I S B i n s t r u c t i o n t o e n s u r e t h a t a l l o f t h e
* CP1 5 r e g i s t e r c h a n g e s h a v e b e e n c o m m i t t e d .
* /
isb
/ *
* Execute a b a r r i e r i n s t r u c t i o n t o e n s u r e t h a t a l l c a c h e ,
* TLB a n d b r a n c h p r e d i c t o r m a i n t e n a n c e o p e r a t i o n s i s s u e d
* have c o m p l e t e d .
* /
dsb
dmb
/ *
* Execute a W F I i n s t r u c t i o n a n d w a i t u n t i l t h e
* STANDBYWFI o u t p u t i s a s s e r t e d t o i n d i c a t e t h a t t h e
* CPU i s i n i d l e a n d l o w p o w e r s t a t e . C P U c a n s p e c u a l a t i v e l y
* prefetch t h e i n s t r u c t i o n s s o a d d N O P s a f t e r W F I . T h i r t e e n
* NOPs a s p e r C o r t e x - A 8 p i p e l i n e .
* /
wfi
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
/* We come here in case of an abort due to a late interrupt */
/* Set MPU_CLKCTRL.MODULEMODE back to ENABLE */
ldr r1 , v i r t _ m p u _ c l k c t r l
mov r2 , #A M 33 X X _ C M _ C L K C T R L _ M O D U L E M O D E _ E N A B L E
str r2 , [ r1 ]
/* Re-enable EMIF */
ldr r1 , v i r t _ e m i f _ c l k c t r l
mov r2 , #A M 33 X X _ C M _ C L K C T R L _ M O D U L E M O D E _ E N A B L E
str r2 , [ r1 ]
wait_emif_enable :
ldr r3 , [ r1 ]
cmp r2 , r3
bne w a i t _ e m i f _ e n a b l e
2018-07-09 10:33:16 +03:00
/* Only necessary if PER is losing context */
tst r4 , #W F I _ F L A G _ S E L F _ R E F R E S H
beq e m i f _ s k i p _ e x i t _ s r _ a b t
2018-02-23 18:43:54 +03:00
2018-07-09 10:33:16 +03:00
adr r9 , a m 3 3 x x _ e m i f _ s r a m _ t a b l e
2018-02-23 18:43:54 +03:00
ldr r1 , [ r9 , #E M I F _ P M _ A B O R T _ S R _ O F F S E T ]
blx r1
2018-07-09 10:33:16 +03:00
emif_skip_exit_sr_abt :
tst r4 , #W F I _ F L A G _ F L U S H _ C A C H E
beq c a c h e _ s k i p _ r e s t o r e
2018-02-23 18:43:54 +03:00
/ *
* Set S C T L R . C b i t t o a l l o w d a t a c a c h e a l l o c a t i o n
* /
mrc p15 , 0 , r0 , c1 , c0 , 0
orr r0 , r0 , #( 1 < < 2 ) @ Enable the C bit
mcr p15 , 0 , r0 , c1 , c0 , 0
isb
2018-07-09 10:33:16 +03:00
cache_skip_restore :
2018-02-23 18:43:54 +03:00
/* Let the suspend code know about the abort */
mov r0 , #1
ldmfd s p ! , { r4 - r11 , p c } @ restore regs and return
ENDPROC( a m 3 3 x x _ d o _ w f i )
.align
ENTRY( a m 3 3 x x _ r e s u m e _ o f f s e t )
.word . - am3 3 x x _ d o _ w f i
ENTRY( a m 3 3 x x _ r e s u m e _ f r o m _ d e e p _ s l e e p )
/* Re-enable EMIF */
ldr r0 , p h y s _ e m i f _ c l k c t r l
mov r1 , #A M 33 X X _ C M _ C L K C T R L _ M O D U L E M O D E _ E N A B L E
str r1 , [ r0 ]
wait_emif_enable1 :
ldr r2 , [ r0 ]
cmp r1 , r2
bne w a i t _ e m i f _ e n a b l e 1
adr r9 , a m 3 3 x x _ e m i f _ s r a m _ t a b l e
ldr r1 , [ r9 , #E M I F _ P M _ R E S T O R E _ C O N T E X T _ O F F S E T ]
blx r1
ldr r1 , [ r9 , #E M I F _ P M _ E X I T _ S R _ O F F S E T ]
blx r1
resume_to_ddr :
/* We are back. Branch to the common CPU resume routine */
mov r0 , #0
ldr p c , r e s u m e _ a d d r
ENDPROC( a m 3 3 x x _ r e s u m e _ f r o m _ d e e p _ s l e e p )
/ *
* Local v a r i a b l e s
* /
.align
kernel_flush :
.word v7_flush_dcache_all
virt_mpu_clkctrl :
.word AM33XX_CM_MPU_MPU_CLKCTRL
virt_emif_clkctrl :
.word AM33XX_CM_PER_EMIF_CLKCTRL
phys_emif_clkctrl :
.word ( AM3 3 X X _ C M _ B A S E + A M 3 3 X X _ C M _ P E R _ M O D + \
AM3 3 X X _ C M _ P E R _ E M I F _ C L K C T R L _ O F F S E T )
.align 3
/* DDR related defines */
am33xx_emif_sram_table :
.space EMIF_PM_FUNCTIONS_SIZE
ENTRY( a m 3 3 x x _ p m _ s r a m )
.word am33xx_do_wfi
.word am33xx_do_wfi_sz
.word am33xx_resume_offset
.word am33xx_emif_sram_table
.word am33xx_pm_ro_sram_data
2018-07-09 10:33:17 +03:00
resume_addr :
.word cpu_resume - PAGE_ O F F S E T + 0 x80 0 0 0 0 0 0
2018-02-23 18:43:54 +03:00
.align 3
ENTRY( a m 3 3 x x _ p m _ r o _ s r a m _ d a t a )
.space AMX3_PM_RO_SRAM_DATA_SIZE
ENTRY( a m 3 3 x x _ d o _ w f i _ s z )
.word . - am3 3 x x _ d o _ w f i