2018-02-23 09:43:55 -06:00
/* SPDX-License-Identifier: GPL-2.0 */
/ *
* Low l e v e l s u s p e n d c o d e f o r A M 4 3 X X S o C s
*
* Copyright ( C ) 2 0 1 3 - 2 0 1 8 T e x a s I n s t r u m e n t s I n c o r p o r a t e d - h t t p : / / w w w . t i . c o m /
* Dave G e r l a c h , V a i b h a v B e d i a
* /
# include < l i n u x / l i n k a g e . h >
# include < l i n u x / t i - e m i f - s r a m . h >
2018-07-09 13:03:16 +05:30
# include < l i n u x / p l a t f o r m _ d a t a / p m 3 3 x x . h >
2018-02-23 09:43:55 -06:00
# include < a s m / a s s e m b l e r . h >
# include < a s m / h a r d w a r e / c a c h e - l 2 x0 . h >
# include < a s m / m e m o r y . h >
# include " c m 3 3 x x . h "
# include " c o m m o n . h "
# include " i o m a p . h "
# include " o m a p - s e c u r e . h "
# include " o m a p44 x x . h "
ARM: OMAP2+: move platform-specific asm-offset.h to arch/arm/mach-omap2
<generated/ti-pm-asm-offsets.h> is only generated and included by
arch/arm/mach-omap2/, so it does not need to reside in the globally
visible include/generated/.
I renamed it to arch/arm/mach-omap2/pm-asm-offsets.h since the prefix
'ti-' is just redundant in mach-omap2/.
My main motivation of this change is to avoid the race condition for
the parallel build (-j) when CONFIG_IKHEADERS is enabled.
When it is enabled, all the headers under include/ are archived into
kernel/kheaders_data.tar.xz and exposed in the sysfs.
In the parallel build, we have no idea in which order files are built.
- If ti-pm-asm-offsets.h is built before kheaders_data.tar.xz,
the header will be included in the archive. Probably nobody will
use it, but it is harmless except that it will increase the archive
size needlessly.
- If kheaders_data.tar.xz is built before ti-pm-asm-offsets.h,
the header will not be included in the archive. However, in the next
build, the archive will be re-generated to include the newly-found
ti-pm-asm-offsets.h. This is not nice from the build system point
of view.
- If ti-pm-asm-offsets.h and kheaders_data.tar.xz are built at the
same time, the corrupted header might be included in the archive,
which does not look nice either.
This commit fixes the race.
Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
Tested-by: Keerthy <j-keerthy@ti.com>
Signed-off-by: Tony Lindgren <tony@atomide.com>
2019-08-23 11:58:08 +09:00
# include " p m - a s m - o f f s e t s . h "
2018-02-23 09:43:55 -06:00
# include " p r m 3 3 x x . h "
# include " p r c m 4 3 x x . h "
2018-07-09 13:03:16 +05:30
/* replicated define because linux/bitops.h cannot be included in assembly */
# define B I T ( n r ) ( 1 < < ( n r ) )
2018-02-23 09:43:55 -06:00
# define A M 3 3 X X _ C M _ C L K C T R L _ M O D U L E S T A T E _ D I S A B L E D 0 x00 0 3 0 0 0 0
# define A M 3 3 X X _ C M _ C L K C T R L _ M O D U L E M O D E _ D I S A B L E 0 x00 0 3
# define A M 3 3 X X _ C M _ C L K C T R L _ M O D U L E M O D E _ E N A B L E 0 x00 0 2
# define A M 4 3 X X _ E M I F _ P O W E R O F F _ E N A B L E 0 x1
# define A M 4 3 X X _ E M I F _ P O W E R O F F _ D I S A B L E 0 x0
# define A M 4 3 X X _ C M _ C L K S T C T R L _ C L K T R C T R L _ S W _ S L E E P 0 x1
# define A M 4 3 X X _ C M _ C L K S T C T R L _ C L K T R C T R L _ H W _ A U T O 0 x3
# define A M 4 3 X X _ C M _ B A S E 0 x44 D F 0 0 0 0
# define A M 4 3 X X _ C M _ R E G A D D R ( i n s t , r e g ) \
AM3 3 X X _ L 4 _ W K _ I O _ A D D R E S S ( A M 4 3 X X _ C M _ B A S E + ( i n s t ) + ( r e g ) )
# define A M 4 3 X X _ C M _ M P U _ C L K S T C T R L A M 4 3 X X _ C M _ R E G A D D R ( A M 4 3 X X _ C M _ M P U _ I N S T , \
AM4 3 X X _ C M _ M P U _ M P U _ C D O F F S )
# define A M 4 3 X X _ C M _ M P U _ M P U _ C L K C T R L A M 4 3 X X _ C M _ R E G A D D R ( A M 4 3 X X _ C M _ M P U _ I N S T , \
AM4 3 X X _ C M _ M P U _ M P U _ C L K C T R L _ O F F S E T )
# define A M 4 3 X X _ C M _ P E R _ E M I F _ C L K C T R L A M 4 3 X X _ C M _ R E G A D D R ( A M 4 3 X X _ C M _ P E R _ I N S T , \
AM4 3 X X _ C M _ P E R _ E M I F _ C L K C T R L _ O F F S E T )
# define A M 4 3 X X _ P R M _ E M I F _ C T R L _ O F F S E T 0 x00 3 0
2018-07-09 13:03:17 +05:30
# define R T C _ S E C O N D S _ R E G 0 x0
# define R T C _ P M I C _ R E G 0 x98
# define R T C _ P M I C _ P O W E R _ E N B I T ( 1 6 )
# define R T C _ P M I C _ E X T _ W A K E U P _ S T S B I T ( 1 2 )
# define R T C _ P M I C _ E X T _ W A K E U P _ P O L B I T ( 4 )
# define R T C _ P M I C _ E X T _ W A K E U P _ E N B I T ( 0 )
2018-02-23 09:43:55 -06:00
.arm
2019-05-28 00:40:50 +02:00
.arch armv7 - a
.arch_extension sec
2018-02-23 09:43:55 -06:00
.align 3
ENTRY( a m 4 3 x x _ d o _ w f i )
stmfd s p ! , { r4 - r11 , l r } @ save registers on stack
2018-07-09 13:03:16 +05:30
/* Save wfi_flags arg to data space */
mov r4 , r0
adr r3 , a m 4 3 x x _ p m _ r o _ s r a m _ d a t a
ldr r2 , [ r3 , #A M X 3 _ P M _ R O _ S R A M _ D A T A _ V I R T _ O F F S E T ]
str r4 , [ r2 , #A M X 3 _ P M _ W F I _ F L A G S _ O F F S E T ]
2018-04-04 12:25:36 +02:00
# ifdef C O N F I G _ C A C H E _ L 2 X 0
2018-02-23 09:43:55 -06:00
/* Retrieve l2 cache virt address BEFORE we shut off EMIF */
ldr r1 , g e t _ l 2 c a c h e _ b a s e
blx r1
mov r8 , r0
2018-04-04 12:25:36 +02:00
# endif
2018-02-23 09:43:55 -06:00
2018-07-09 13:03:16 +05:30
/* Only flush cache is we know we are losing MPU context */
tst r4 , #W F I _ F L A G _ F L U S H _ C A C H E
beq c a c h e _ s k i p _ f l u s h
2018-02-23 09:43:55 -06:00
/ *
* Flush a l l d a t a f r o m t h e L 1 a n d L 2 d a t a c a c h e b e f o r e d i s a b l i n g
* SCTLR. C b i t .
* /
ldr r1 , k e r n e l _ f l u s h
blx r1
/ *
* Clear t h e S C T L R . C b i t t o p r e v e n t f u r t h e r d a t a c a c h e
* allocation. C l e a r i n g S C T L R . C w o u l d m a k e a l l t h e d a t a a c c e s s e s
* strongly o r d e r e d a n d w o u l d n o t h i t t h e c a c h e .
* /
mrc p15 , 0 , r0 , c1 , c0 , 0
bic r0 , r0 , #( 1 < < 2 ) @ Disable the C bit
mcr p15 , 0 , r0 , c1 , c0 , 0
isb
dsb
/ *
* Invalidate L 1 a n d L 2 d a t a c a c h e .
* /
ldr r1 , k e r n e l _ f l u s h
blx r1
# ifdef C O N F I G _ C A C H E _ L 2 X 0
/ *
* Clean a n d i n v a l i d a t e t h e L 2 c a c h e .
* /
# ifdef C O N F I G _ P L 3 1 0 _ E R R A T A _ 7 2 7 9 1 5
mov r0 , #0x03
mov r12 , #O M A P 4 _ M O N _ L 2 X 0 _ D B G _ C T R L _ I N D E X
dsb
smc #0
dsb
# endif
mov r0 , r8
adr r4 , a m 4 3 x x _ p m _ r o _ s r a m _ d a t a
ldr r3 , [ r4 , #A M X 3 _ P M _ R O _ S R A M _ D A T A _ V I R T _ O F F S E T ]
mov r2 , r0
ldr r0 , [ r2 , #L 2 X 0 _ A U X _ C T R L ]
str r0 , [ r3 , #A M X 3 _ P M _ L 2 _ A U X _ C T R L _ V A L _ O F F S E T ]
ldr r0 , [ r2 , #L 310 _ P R E F E T C H _ C T R L ]
str r0 , [ r3 , #A M X 3 _ P M _ L 2 _ P R E F E T C H _ C T R L _ V A L _ O F F S E T ]
ldr r0 , l 2 _ v a l
str r0 , [ r2 , #L 2 X 0 _ C L E A N _ I N V _ W A Y ]
wait :
ldr r0 , [ r2 , #L 2 X 0 _ C L E A N _ I N V _ W A Y ]
ldr r1 , l 2 _ v a l
ands r0 , r0 , r1
bne w a i t
# ifdef C O N F I G _ P L 3 1 0 _ E R R A T A _ 7 2 7 9 1 5
mov r0 , #0x00
mov r12 , #O M A P 4 _ M O N _ L 2 X 0 _ D B G _ C T R L _ I N D E X
dsb
smc #0
dsb
# endif
l2x_sync :
mov r0 , r8
mov r2 , r0
mov r0 , #0x0
str r0 , [ r2 , #L 2 X 0 _ C A C H E _ S Y N C ]
sync :
ldr r0 , [ r2 , #L 2 X 0 _ C A C H E _ S Y N C ]
ands r0 , r0 , #0x1
bne s y n c
# endif
2018-07-09 13:03:16 +05:30
/* Restore wfi_flags */
adr r3 , a m 4 3 x x _ p m _ r o _ s r a m _ d a t a
ldr r2 , [ r3 , #A M X 3 _ P M _ R O _ S R A M _ D A T A _ V I R T _ O F F S E T ]
ldr r4 , [ r2 , #A M X 3 _ P M _ W F I _ F L A G S _ O F F S E T ]
cache_skip_flush :
2018-07-09 13:03:17 +05:30
/ *
* If w e a r e t r y i n g t o e n t e r R T C + D D R m o d e w e m u s t p e r f o r m
* a r e a d f r o m t h e r t c a d d r e s s s p a c e t o e n s u r e t r a n s l a t i o n
* presence i n t h e T L B t o a v o i d p a g e t a b l e w a l k a f t e r D D R
* is u n a v a i l a b l e .
* /
tst r4 , #W F I _ F L A G _ R T C _ O N L Y
beq s k i p _ r t c _ v a _ r e f r e s h
adr r3 , a m 4 3 x x _ p m _ r o _ s r a m _ d a t a
ldr r1 , [ r3 , #A M X 3 _ P M _ R T C _ B A S E _ V I R T _ O F F S E T ]
ldr r0 , [ r1 ]
skip_rtc_va_refresh :
2018-07-09 13:03:16 +05:30
/* Check if we want self refresh */
tst r4 , #W F I _ F L A G _ S E L F _ R E F R E S H
beq e m i f _ s k i p _ e n t e r _ s r
2018-02-23 09:43:55 -06:00
adr r9 , a m 4 3 x x _ e m i f _ s r a m _ t a b l e
ldr r3 , [ r9 , #E M I F _ P M _ E N T E R _ S R _ O F F S E T ]
blx r3
2018-07-09 13:03:16 +05:30
emif_skip_enter_sr :
/* Only necessary if PER is losing context */
tst r4 , #W F I _ F L A G _ S A V E _ E M I F
beq e m i f _ s k i p _ s a v e
2018-02-23 09:43:55 -06:00
ldr r3 , [ r9 , #E M I F _ P M _ S A V E _ C O N T E X T _ O F F S E T ]
2018-07-09 13:03:16 +05:30
blx r3
emif_skip_save :
/* Only can disable EMIF if we have entered self refresh */
tst r4 , #W F I _ F L A G _ S E L F _ R E F R E S H
beq e m i f _ s k i p _ d i s a b l e
2018-02-23 09:43:55 -06:00
/* Disable EMIF */
ldr r1 , a m 4 3 x x _ v i r t _ e m i f _ c l k c t r l
ldr r2 , [ r1 ]
bic r2 , r2 , #A M 33 X X _ C M _ C L K C T R L _ M O D U L E M O D E _ D I S A B L E
str r2 , [ r1 ]
wait_emif_disable :
ldr r2 , [ r1 ]
mov r3 , #A M 33 X X _ C M _ C L K C T R L _ M O D U L E S T A T E _ D I S A B L E D
cmp r2 , r3
bne w a i t _ e m i f _ d i s a b l e
2018-07-09 13:03:16 +05:30
emif_skip_disable :
2018-07-09 13:03:17 +05:30
tst r4 , #W F I _ F L A G _ R T C _ O N L Y
beq s k i p _ r t c _ o n l y
adr r3 , a m 4 3 x x _ p m _ r o _ s r a m _ d a t a
ldr r1 , [ r3 , #A M X 3 _ P M _ R T C _ B A S E _ V I R T _ O F F S E T ]
ldr r0 , [ r1 , #R T C _ P M I C _ R E G ]
orr r0 , r0 , #R T C _ P M I C _ P O W E R _ E N
orr r0 , r0 , #R T C _ P M I C _ E X T _ W A K E U P _ S T S
orr r0 , r0 , #R T C _ P M I C _ E X T _ W A K E U P _ E N
orr r0 , r0 , #R T C _ P M I C _ E X T _ W A K E U P _ P O L
str r0 , [ r1 , #R T C _ P M I C _ R E G ]
ldr r0 , [ r1 , #R T C _ P M I C _ R E G ]
/* Wait for 2 seconds to lose power */
mov r3 , #2
ldr r2 , [ r1 , #R T C _ S E C O N D S _ R E G ]
rtc_loop :
ldr r0 , [ r1 , #R T C _ S E C O N D S _ R E G ]
cmp r0 , r2
beq r t c _ l o o p
mov r2 , r0
subs r3 , r3 , #1
bne r t c _ l o o p
b r e _ e n a b l e _ e m i f
skip_rtc_only :
2018-07-09 13:03:16 +05:30
tst r4 , #W F I _ F L A G _ W A K E _ M 3
beq w k u p _ m 3 _ s k i p
2018-02-23 09:43:55 -06:00
/ *
* For t h e M P U W F I t o b e r e g i s t e r e d a s a n i n t e r r u p t
* to W K U P _ M 3 , M P U _ C L K C T R L . M O D U L E M O D E n e e d s t o b e s e t
* to D I S A B L E D
* /
ldr r1 , a m 4 3 x x _ v i r t _ m p u _ c l k c t r l
ldr r2 , [ r1 ]
bic r2 , r2 , #A M 33 X X _ C M _ C L K C T R L _ M O D U L E M O D E _ D I S A B L E
str r2 , [ r1 ]
/ *
* Put M P U C L K D M t o S W _ S L E E P
* /
ldr r1 , a m 4 3 x x _ v i r t _ m p u _ c l k s t c t r l
mov r2 , #A M 43 X X _ C M _ C L K S T C T R L _ C L K T R C T R L _ S W _ S L E E P
str r2 , [ r1 ]
2018-07-09 13:03:16 +05:30
wkup_m3_skip :
2018-02-23 09:43:55 -06:00
/ *
* Execute a b a r r i e r i n s t r u c t i o n t o e n s u r e t h a t a l l c a c h e ,
* TLB a n d b r a n c h p r e d i c t o r m a i n t e n a n c e o p e r a t i o n s i s s u e d
* have c o m p l e t e d .
* /
dsb
dmb
/ *
* Execute a W F I i n s t r u c t i o n a n d w a i t u n t i l t h e
* STANDBYWFI o u t p u t i s a s s e r t e d t o i n d i c a t e t h a t t h e
* CPU i s i n i d l e a n d l o w p o w e r s t a t e . C P U c a n s p e c u a l a t i v e l y
* prefetch t h e i n s t r u c t i o n s s o a d d N O P s a f t e r W F I . S i x t e e n
* NOPs a s p e r C o r t e x - A 9 p i p e l i n e .
* /
wfi
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
nop
/* We come here in case of an abort due to a late interrupt */
ldr r1 , a m 4 3 x x _ v i r t _ m p u _ c l k s t c t r l
mov r2 , #A M 43 X X _ C M _ C L K S T C T R L _ C L K T R C T R L _ H W _ A U T O
str r2 , [ r1 ]
/* Set MPU_CLKCTRL.MODULEMODE back to ENABLE */
ldr r1 , a m 4 3 x x _ v i r t _ m p u _ c l k c t r l
mov r2 , #A M 33 X X _ C M _ C L K C T R L _ M O D U L E M O D E _ E N A B L E
str r2 , [ r1 ]
2018-07-09 13:03:17 +05:30
re_enable_emif :
2018-02-23 09:43:55 -06:00
/* Re-enable EMIF */
ldr r1 , a m 4 3 x x _ v i r t _ e m i f _ c l k c t r l
mov r2 , #A M 33 X X _ C M _ C L K C T R L _ M O D U L E M O D E _ E N A B L E
str r2 , [ r1 ]
wait_emif_enable :
ldr r3 , [ r1 ]
cmp r2 , r3
bne w a i t _ e m i f _ e n a b l e
2018-07-09 13:03:16 +05:30
tst r4 , #W F I _ F L A G _ F L U S H _ C A C H E
beq c a c h e _ s k i p _ r e s t o r e
2018-02-23 09:43:55 -06:00
/ *
* Set S C T L R . C b i t t o a l l o w d a t a c a c h e a l l o c a t i o n
* /
mrc p15 , 0 , r0 , c1 , c0 , 0
orr r0 , r0 , #( 1 < < 2 ) @ Enable the C bit
mcr p15 , 0 , r0 , c1 , c0 , 0
isb
2018-07-09 13:03:16 +05:30
cache_skip_restore :
/* Only necessary if PER is losing context */
tst r4 , #W F I _ F L A G _ S E L F _ R E F R E S H
beq e m i f _ s k i p _ e x i t _ s r _ a b t
adr r9 , a m 4 3 x x _ e m i f _ s r a m _ t a b l e
ldr r1 , [ r9 , #E M I F _ P M _ A B O R T _ S R _ O F F S E T ]
blx r1
2018-02-23 09:43:55 -06:00
2018-07-09 13:03:16 +05:30
emif_skip_exit_sr_abt :
2018-02-23 09:43:55 -06:00
/* Let the suspend code know about the abort */
mov r0 , #1
ldmfd s p ! , { r4 - r11 , p c } @ restore regs and return
ENDPROC( a m 4 3 x x _ d o _ w f i )
.align
ENTRY( a m 4 3 x x _ r e s u m e _ o f f s e t )
.word . - am4 3 x x _ d o _ w f i
ENTRY( a m 4 3 x x _ r e s u m e _ f r o m _ d e e p _ s l e e p )
/* Set MPU CLKSTCTRL to HW AUTO so that CPUidle works properly */
ldr r1 , a m 4 3 x x _ v i r t _ m p u _ c l k s t c t r l
mov r2 , #A M 43 X X _ C M _ C L K S T C T R L _ C L K T R C T R L _ H W _ A U T O
str r2 , [ r1 ]
/* For AM43xx, use EMIF power down until context is restored */
ldr r2 , a m 4 3 x x _ p h y s _ e m i f _ p o w e r o f f
mov r1 , #A M 43 X X _ E M I F _ P O W E R O F F _ E N A B L E
str r1 , [ r2 , #0x0 ]
/* Re-enable EMIF */
ldr r1 , a m 4 3 x x _ p h y s _ e m i f _ c l k c t r l
mov r2 , #A M 33 X X _ C M _ C L K C T R L _ M O D U L E M O D E _ E N A B L E
str r2 , [ r1 ]
wait_emif_enable1 :
ldr r3 , [ r1 ]
cmp r2 , r3
bne w a i t _ e m i f _ e n a b l e 1
adr r9 , a m 4 3 x x _ e m i f _ s r a m _ t a b l e
ldr r1 , [ r9 , #E M I F _ P M _ R E S T O R E _ C O N T E X T _ O F F S E T ]
blx r1
ldr r1 , [ r9 , #E M I F _ P M _ E X I T _ S R _ O F F S E T ]
blx r1
ldr r2 , a m 4 3 x x _ p h y s _ e m i f _ p o w e r o f f
mov r1 , #A M 43 X X _ E M I F _ P O W E R O F F _ D I S A B L E
str r1 , [ r2 , #0x0 ]
2019-04-02 11:57:43 -05:00
ldr r1 , [ r9 , #E M I F _ P M _ R U N _ H W _ L E V E L I N G ]
blx r1
2018-02-23 09:43:55 -06:00
# ifdef C O N F I G _ C A C H E _ L 2 X 0
ldr r2 , l 2 _ c a c h e _ b a s e
ldr r0 , [ r2 , #L 2 X 0 _ C T R L ]
and r0 , #0x0f
cmp r0 , #1
beq s k i p _ l 2 e n @ Skip if already enabled
adr r4 , a m 4 3 x x _ p m _ r o _ s r a m _ d a t a
ldr r3 , [ r4 , #A M X 3 _ P M _ R O _ S R A M _ D A T A _ P H Y S _ O F F S E T ]
ldr r0 , [ r3 , #A M X 3 _ P M _ L 2 _ P R E F E T C H _ C T R L _ V A L _ O F F S E T ]
ldr r12 , l 2 _ s m c1
dsb
smc #0
dsb
set_aux_ctrl :
ldr r0 , [ r3 , #A M X 3 _ P M _ L 2 _ A U X _ C T R L _ V A L _ O F F S E T ]
ldr r12 , l 2 _ s m c2
dsb
smc #0
dsb
/* L2 invalidate on resume */
ldr r0 , l 2 _ v a l
ldr r2 , l 2 _ c a c h e _ b a s e
str r0 , [ r2 , #L 2 X 0 _ I N V _ W A Y ]
wait2 :
ldr r0 , [ r2 , #L 2 X 0 _ I N V _ W A Y ]
ldr r1 , l 2 _ v a l
ands r0 , r0 , r1
bne w a i t 2
# ifdef C O N F I G _ P L 3 1 0 _ E R R A T A _ 7 2 7 9 1 5
mov r0 , #0x00
mov r12 , #O M A P 4 _ M O N _ L 2 X 0 _ D B G _ C T R L _ I N D E X
dsb
smc #0
dsb
# endif
l2x_sync2 :
ldr r2 , l 2 _ c a c h e _ b a s e
mov r0 , #0x0
str r0 , [ r2 , #L 2 X 0 _ C A C H E _ S Y N C ]
sync2 :
ldr r0 , [ r2 , #L 2 X 0 _ C A C H E _ S Y N C ]
ands r0 , r0 , #0x1
bne s y n c2
mov r0 , #0x1
ldr r12 , l 2 _ s m c3
dsb
smc #0
dsb
# endif
skip_l2en :
/* We are back. Branch to the common CPU resume routine */
mov r0 , #0
ldr p c , r e s u m e _ a d d r
ENDPROC( a m 4 3 x x _ r e s u m e _ f r o m _ d e e p _ s l e e p )
/ *
* Local v a r i a b l e s
* /
.align
kernel_flush :
.word v7_flush_dcache_all
ddr_start :
.word PAGE_OFFSET
am43xx_phys_emif_poweroff :
.word ( AM4 3 X X _ C M _ B A S E + A M 4 3 X X _ P R M _ D E V I C E _ I N S T + \
AM4 3 X X _ P R M _ E M I F _ C T R L _ O F F S E T )
am43xx_virt_mpu_clkstctrl :
.word ( AM4 3 X X _ C M _ M P U _ C L K S T C T R L )
am43xx_virt_mpu_clkctrl :
.word ( AM4 3 X X _ C M _ M P U _ M P U _ C L K C T R L )
am43xx_virt_emif_clkctrl :
.word ( AM4 3 X X _ C M _ P E R _ E M I F _ C L K C T R L )
am43xx_phys_emif_clkctrl :
.word ( AM4 3 X X _ C M _ B A S E + A M 4 3 X X _ C M _ P E R _ I N S T + \
AM4 3 X X _ C M _ P E R _ E M I F _ C L K C T R L _ O F F S E T )
2018-04-04 12:25:36 +02:00
# ifdef C O N F I G _ C A C H E _ L 2 X 0
2018-02-23 09:43:55 -06:00
/* L2 cache related defines for AM437x */
2018-04-04 12:25:36 +02:00
get_l2cache_base :
.word omap4_get_l2cache_base
2018-02-23 09:43:55 -06:00
l2_cache_base :
.word OMAP44XX_L2CACHE_BASE
l2_smc1 :
.word OMAP4_MON_L2X0_PREFETCH_INDEX
l2_smc2 :
.word OMAP4_MON_L2X0_AUXCTRL_INDEX
l2_smc3 :
.word OMAP4_MON_L2X0_CTRL_INDEX
l2_val :
.word 0xffff
2018-04-04 12:25:36 +02:00
# endif
2018-02-23 09:43:55 -06:00
.align 3
/* DDR related defines */
ENTRY( a m 4 3 x x _ e m i f _ s r a m _ t a b l e )
.space EMIF_PM_FUNCTIONS_SIZE
ENTRY( a m 4 3 x x _ p m _ s r a m )
.word am43xx_do_wfi
.word am43xx_do_wfi_sz
.word am43xx_resume_offset
.word am43xx_emif_sram_table
.word am43xx_pm_ro_sram_data
2018-07-09 13:03:17 +05:30
resume_addr :
.word cpu_resume - PAGE_ O F F S E T + 0 x80 0 0 0 0 0 0
2018-02-23 09:43:55 -06:00
.align 3
ENTRY( a m 4 3 x x _ p m _ r o _ s r a m _ d a t a )
.space AMX3_PM_RO_SRAM_DATA_SIZE
ENTRY( a m 4 3 x x _ d o _ w f i _ s z )
.word . - am4 3 x x _ d o _ w f i