2013-01-04 13:32:22 +04:00
/ *
* Copyright ( c ) 2 0 1 2 , N V I D I A C o r p o r a t i o n . A l l r i g h t s r e s e r v e d .
*
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or modify it
* under t h e t e r m s a n d c o n d i t i o n s o f t h e G N U G e n e r a l P u b l i c L i c e n s e ,
* version 2 , a s p u b l i s h e d b y t h e F r e e S o f t w a r e F o u n d a t i o n .
*
* This p r o g r a m i s d i s t r i b u t e d i n t h e h o p e i t w i l l b e u s e f u l , b u t W I T H O U T
* ANY W A R R A N T Y ; without even the implied warranty of MERCHANTABILITY or
* FITNESS F O R A P A R T I C U L A R P U R P O S E . S e e t h e G N U G e n e r a l P u b l i c L i c e n s e f o r
* more d e t a i l s .
*
* You s h o u l d h a v e r e c e i v e d a c o p y o f t h e G N U G e n e r a l P u b l i c L i c e n s e
* along w i t h t h i s p r o g r a m . I f n o t , s e e < h t t p : / / w w w . g n u . o r g / l i c e n s e s / > .
* /
# include < l i n u x / l i n k a g e . h >
# include < l i n u x / i n i t . h >
# include < a s m / c a c h e . h >
# include < a s m / a s m - o f f s e t s . h >
# include < a s m / h a r d w a r e / c a c h e - l 2 x0 . h >
# include " f l o w c t r l . h "
2013-05-20 14:39:24 +04:00
# include " f u s e . h "
2013-01-04 13:32:22 +04:00
# include " i o m a p . h "
# include " r e s e t . h "
# include " s l e e p . h "
# define P M C _ S C R A T C H 4 1 0 x14 0
# define R E S E T _ D A T A ( x ) ( ( T E G R A _ R E S E T _ ## x ) * 4 )
# ifdef C O N F I G _ P M _ S L E E P
/ *
* tegra_ r e s u m e
*
* CPU b o o t v e c t o r w h e n r e s t a r t i n g t h e a C P U f o l l o w i n g
* an L P 2 t r a n s i t i o n . A l s o b r a n c h e d t o b y L P 0 a n d L P 1 r e s u m e a f t e r
* re- e n a b l i n g s d r a m .
2013-05-20 14:39:29 +04:00
*
* r6 : SoC I D
2013-07-03 13:50:37 +04:00
* r8 : CPU p a r t n u m b e r
2013-01-04 13:32:22 +04:00
* /
ENTRY( t e g r a _ r e s u m e )
2013-07-03 13:50:37 +04:00
check_ c p u _ p a r t _ n u m 0 x c09 , r8 , r9
bleq v7 _ i n v a l i d a t e _ l 1
2013-01-04 13:32:22 +04:00
cpu_ i d r0
2013-05-20 14:39:29 +04:00
tegra_ g e t _ s o c _ i d T E G R A _ A P B _ M I S C _ B A S E , r6
cmp r6 , #T E G R A 114
beq n o _ c p u 0 _ c h k
2013-01-04 13:32:22 +04:00
cmp r0 , #0 @ CPU0?
2013-04-16 02:50:54 +04:00
THUMB( i t n e )
2013-01-04 13:32:22 +04:00
bne c p u _ r e s u m e @ no
2013-05-20 14:39:29 +04:00
no_cpu0_chk :
2013-01-04 13:32:22 +04:00
/* Are we on Tegra20? */
2013-05-20 14:39:24 +04:00
cmp r6 , #T E G R A 20
2013-01-04 13:32:22 +04:00
beq 1 f @ Yes
/* Clear the flow controller flags for this CPU. */
2013-06-03 12:10:04 +04:00
cpu_ t o _ c s r _ r e g r1 , r0
2013-05-20 14:39:26 +04:00
mov3 2 r2 , T E G R A _ F L O W _ C T R L _ B A S E
ldr r1 , [ r2 , r1 ]
2013-01-04 13:32:22 +04:00
/* Clear event & intr flag */
orr r1 , r1 , \
# FLOW_ C T R L _ C S R _ I N T R _ F L A G | F L O W _ C T R L _ C S R _ E V E N T _ F L A G
2013-05-20 14:39:26 +04:00
movw r0 , #0x3FFD @ enable, cluster_switch, immed, bitmaps
@ & ext flags for CPU power mgnt
2013-01-04 13:32:22 +04:00
bic r1 , r1 , r0
str r1 , [ r2 ]
1 :
2013-07-03 13:50:37 +04:00
mov3 2 r9 , 0 x c09
cmp r8 , r9
2013-05-20 14:39:26 +04:00
bne n o t _ c a9
2013-01-04 13:32:22 +04:00
# ifdef C O N F I G _ H A V E _ A R M _ S C U
/* enable SCU */
mov3 2 r0 , T E G R A _ A R M _ P E R I F _ B A S E
ldr r1 , [ r0 ]
orr r1 , r1 , #1
str r1 , [ r0 ]
# endif
/* L2 cache resume & re-enable */
l2 _ c a c h e _ r e s u m e r0 , r1 , r2 , l 2 x0 _ s a v e d _ r e g s _ a d d r
2013-05-20 14:39:26 +04:00
not_ca9 :
2013-01-04 13:32:22 +04:00
b c p u _ r e s u m e
ENDPROC( t e g r a _ r e s u m e )
# endif
# ifdef C O N F I G _ C A C H E _ L 2 X 0
.globl l2x0_saved_regs_addr
l2x0_saved_regs_addr :
.long 0
# endif
.align L1_CACHE_SHIFT
ENTRY( _ _ t e g r a _ c p u _ r e s e t _ h a n d l e r _ s t a r t )
/ *
* __tegra_cpu_reset_handler :
*
* Common h a n d l e r f o r a l l C P U r e s e t e v e n t s .
*
* Register u s a g e w i t h i n t h e r e s e t h a n d l e r :
*
ARM: tegra: add CPU errata WARs to Tegra reset handler
The CPU cores in Tegra contain some errata. Workarounds must be applied
for these every time a CPU boots. Implement those workarounds directly
in the Tegra-specific CPU reset vector.
Many of these workarounds duplicate code in the core ARM kernel.
However, the core ARM kernel cannot enable those workarounds when
building a multi-platform kernel, since they require writing to secure-
only registers, and a multi-platform kernel often does not run in secure
mode, and also cannot generically/architecturally detect whether it is
running in secure mode, and hence cannot either unconditionally or
conditionally apply these workarounds.
Instead, the workarounds must be applied in architecture-specific reset
code, which is able to have more direct knowledge of the secure/normal
state. On Tegra, we will be able to detect this using a non-architected
register in the future, although we currently assume the kernel runs only
in secure mode. Other SoCs may never run the kernel in secure mode, and
hence always rely on a secure monitor to enable the workarounds, and
hence never implement them in the kernel.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-03-05 04:05:56 +04:00
* Others : scratch
2013-05-20 14:39:24 +04:00
* R6 = S o C I D
2013-01-04 13:32:22 +04:00
* R7 = C P U p r e s e n t ( t o t h e O S ) m a s k
* R8 = C P U i n L P 1 s t a t e m a s k
* R9 = C P U i n L P 2 s t a t e m a s k
* R1 0 = C P U n u m b e r
* R1 1 = C P U m a s k
* R1 2 = p o i n t e r t o r e s e t h a n d l e r d a t a
*
* NOTE : This c o d e i s c o p i e d t o I R A M . A l l c o d e a n d d a t a a c c e s s e s
* must b e p o s i t i o n - i n d e p e n d e n t .
* /
.align L1_CACHE_SHIFT
ENTRY( _ _ t e g r a _ c p u _ r e s e t _ h a n d l e r )
cpsid a i f , 0 x13 @ SVC mode, interrupts disabled
ARM: tegra: add CPU errata WARs to Tegra reset handler
The CPU cores in Tegra contain some errata. Workarounds must be applied
for these every time a CPU boots. Implement those workarounds directly
in the Tegra-specific CPU reset vector.
Many of these workarounds duplicate code in the core ARM kernel.
However, the core ARM kernel cannot enable those workarounds when
building a multi-platform kernel, since they require writing to secure-
only registers, and a multi-platform kernel often does not run in secure
mode, and also cannot generically/architecturally detect whether it is
running in secure mode, and hence cannot either unconditionally or
conditionally apply these workarounds.
Instead, the workarounds must be applied in architecture-specific reset
code, which is able to have more direct knowledge of the secure/normal
state. On Tegra, we will be able to detect this using a non-architected
register in the future, although we currently assume the kernel runs only
in secure mode. Other SoCs may never run the kernel in secure mode, and
hence always rely on a secure monitor to enable the workarounds, and
hence never implement them in the kernel.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-03-05 04:05:56 +04:00
2013-05-20 14:39:24 +04:00
tegra_ g e t _ s o c _ i d T E G R A _ A P B _ M I S C _ B A S E , r6
ARM: tegra: add CPU errata WARs to Tegra reset handler
The CPU cores in Tegra contain some errata. Workarounds must be applied
for these every time a CPU boots. Implement those workarounds directly
in the Tegra-specific CPU reset vector.
Many of these workarounds duplicate code in the core ARM kernel.
However, the core ARM kernel cannot enable those workarounds when
building a multi-platform kernel, since they require writing to secure-
only registers, and a multi-platform kernel often does not run in secure
mode, and also cannot generically/architecturally detect whether it is
running in secure mode, and hence cannot either unconditionally or
conditionally apply these workarounds.
Instead, the workarounds must be applied in architecture-specific reset
code, which is able to have more direct knowledge of the secure/normal
state. On Tegra, we will be able to detect this using a non-architected
register in the future, although we currently assume the kernel runs only
in secure mode. Other SoCs may never run the kernel in secure mode, and
hence always rely on a secure monitor to enable the workarounds, and
hence never implement them in the kernel.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-03-05 04:05:56 +04:00
# ifdef C O N F I G _ A R C H _ T E G R A _ 2 x _ S O C
t20_check :
2013-05-20 14:39:24 +04:00
cmp r6 , #T E G R A 20
ARM: tegra: add CPU errata WARs to Tegra reset handler
The CPU cores in Tegra contain some errata. Workarounds must be applied
for these every time a CPU boots. Implement those workarounds directly
in the Tegra-specific CPU reset vector.
Many of these workarounds duplicate code in the core ARM kernel.
However, the core ARM kernel cannot enable those workarounds when
building a multi-platform kernel, since they require writing to secure-
only registers, and a multi-platform kernel often does not run in secure
mode, and also cannot generically/architecturally detect whether it is
running in secure mode, and hence cannot either unconditionally or
conditionally apply these workarounds.
Instead, the workarounds must be applied in architecture-specific reset
code, which is able to have more direct knowledge of the secure/normal
state. On Tegra, we will be able to detect this using a non-architected
register in the future, although we currently assume the kernel runs only
in secure mode. Other SoCs may never run the kernel in secure mode, and
hence always rely on a secure monitor to enable the workarounds, and
hence never implement them in the kernel.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-03-05 04:05:56 +04:00
bne a f t e r _ t 2 0 _ c h e c k
t20_errata :
# Tegra2 0 i s a C o r t e x - A 9 r1 p1
mrc p15 , 0 , r0 , c1 , c0 , 0 @ read system control register
orr r0 , r0 , #1 < < 1 4 @ erratum 716044
mcr p15 , 0 , r0 , c1 , c0 , 0 @ write system control register
mrc p15 , 0 , r0 , c15 , c0 , 1 @ read diagnostic register
orr r0 , r0 , #1 < < 4 @ erratum 742230
orr r0 , r0 , #1 < < 1 1 @ erratum 751472
mcr p15 , 0 , r0 , c15 , c0 , 1 @ write diagnostic register
b a f t e r _ e r r a t a
after_t20_check :
# endif
# ifdef C O N F I G _ A R C H _ T E G R A _ 3 x _ S O C
t30_check :
2013-05-20 14:39:24 +04:00
cmp r6 , #T E G R A 30
ARM: tegra: add CPU errata WARs to Tegra reset handler
The CPU cores in Tegra contain some errata. Workarounds must be applied
for these every time a CPU boots. Implement those workarounds directly
in the Tegra-specific CPU reset vector.
Many of these workarounds duplicate code in the core ARM kernel.
However, the core ARM kernel cannot enable those workarounds when
building a multi-platform kernel, since they require writing to secure-
only registers, and a multi-platform kernel often does not run in secure
mode, and also cannot generically/architecturally detect whether it is
running in secure mode, and hence cannot either unconditionally or
conditionally apply these workarounds.
Instead, the workarounds must be applied in architecture-specific reset
code, which is able to have more direct knowledge of the secure/normal
state. On Tegra, we will be able to detect this using a non-architected
register in the future, although we currently assume the kernel runs only
in secure mode. Other SoCs may never run the kernel in secure mode, and
hence always rely on a secure monitor to enable the workarounds, and
hence never implement them in the kernel.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-03-05 04:05:56 +04:00
bne a f t e r _ t 3 0 _ c h e c k
t30_errata :
# Tegra3 0 i s a C o r t e x - A 9 r2 p9
mrc p15 , 0 , r0 , c15 , c0 , 1 @ read diagnostic register
orr r0 , r0 , #1 < < 6 @ erratum 743622
orr r0 , r0 , #1 < < 1 1 @ erratum 751472
mcr p15 , 0 , r0 , c15 , c0 , 1 @ write diagnostic register
b a f t e r _ e r r a t a
after_t30_check :
# endif
after_errata :
2013-01-04 13:32:22 +04:00
mrc p15 , 0 , r10 , c0 , c0 , 5 @ MPIDR
and r10 , r10 , #0x3 @ R10 = CPU number
mov r11 , #1
mov r11 , r11 , l s l r10 @ R11 = CPU mask
adr r12 , _ _ t e g r a _ c p u _ r e s e t _ h a n d l e r _ d a t a
# ifdef C O N F I G _ S M P
/* Does the OS know about this CPU? */
ldr r7 , [ r12 , #R E S E T _ D A T A ( M A S K _ P R E S E N T ) ]
tst r7 , r11 @ if !present
bleq _ _ d i e @ CPU not present (to OS)
# endif
# ifdef C O N F I G _ A R C H _ T E G R A _ 2 x _ S O C
/* Are we on Tegra20? */
2013-05-20 14:39:24 +04:00
cmp r6 , #T E G R A 20
2013-01-04 13:32:22 +04:00
bne 1 f
/* If not CPU0, don't let CPU0 reset CPU1 now that CPU1 is coming up. */
ARM: tegra: add CPU errata WARs to Tegra reset handler
The CPU cores in Tegra contain some errata. Workarounds must be applied
for these every time a CPU boots. Implement those workarounds directly
in the Tegra-specific CPU reset vector.
Many of these workarounds duplicate code in the core ARM kernel.
However, the core ARM kernel cannot enable those workarounds when
building a multi-platform kernel, since they require writing to secure-
only registers, and a multi-platform kernel often does not run in secure
mode, and also cannot generically/architecturally detect whether it is
running in secure mode, and hence cannot either unconditionally or
conditionally apply these workarounds.
Instead, the workarounds must be applied in architecture-specific reset
code, which is able to have more direct knowledge of the secure/normal
state. On Tegra, we will be able to detect this using a non-architected
register in the future, although we currently assume the kernel runs only
in secure mode. Other SoCs may never run the kernel in secure mode, and
hence always rely on a secure monitor to enable the workarounds, and
hence never implement them in the kernel.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-03-05 04:05:56 +04:00
mov3 2 r5 , T E G R A _ P M C _ B A S E
2013-01-04 13:32:22 +04:00
mov r0 , #0
cmp r10 , #0
ARM: tegra: add CPU errata WARs to Tegra reset handler
The CPU cores in Tegra contain some errata. Workarounds must be applied
for these every time a CPU boots. Implement those workarounds directly
in the Tegra-specific CPU reset vector.
Many of these workarounds duplicate code in the core ARM kernel.
However, the core ARM kernel cannot enable those workarounds when
building a multi-platform kernel, since they require writing to secure-
only registers, and a multi-platform kernel often does not run in secure
mode, and also cannot generically/architecturally detect whether it is
running in secure mode, and hence cannot either unconditionally or
conditionally apply these workarounds.
Instead, the workarounds must be applied in architecture-specific reset
code, which is able to have more direct knowledge of the secure/normal
state. On Tegra, we will be able to detect this using a non-architected
register in the future, although we currently assume the kernel runs only
in secure mode. Other SoCs may never run the kernel in secure mode, and
hence always rely on a secure monitor to enable the workarounds, and
hence never implement them in the kernel.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-03-05 04:05:56 +04:00
strne r0 , [ r5 , #P M C _ S C R A T C H 41 ]
2013-01-04 13:32:22 +04:00
1 :
# endif
/* Waking up from LP2? */
ldr r9 , [ r12 , #R E S E T _ D A T A ( M A S K _ L P 2 ) ]
tst r9 , r11 @ if in_lp2
beq _ _ i s _ n o t _ l p2
ldr l r , [ r12 , #R E S E T _ D A T A ( S T A R T U P _ L P 2 ) ]
cmp l r , #0
bleq _ _ d i e @ no LP2 startup handler
bx l r
__is_not_lp2 :
# ifdef C O N F I G _ S M P
/ *
2013-05-20 14:39:29 +04:00
* Can o n l y b e s e c o n d a r y b o o t ( i n i t i a l o r h o t p l u g )
* CPU0 c a n ' t b e h e r e f o r T e g r a20 / 3 0
2013-01-04 13:32:22 +04:00
* /
2013-05-20 14:39:29 +04:00
cmp r6 , #T E G R A 114
beq _ _ n o _ c p u 0 _ c h k
2013-01-04 13:32:22 +04:00
cmp r10 , #0
bleq _ _ d i e @ CPU0 cannot be here
2013-05-20 14:39:29 +04:00
__no_cpu0_chk :
2013-01-04 13:32:22 +04:00
ldr l r , [ r12 , #R E S E T _ D A T A ( S T A R T U P _ S E C O N D A R Y ) ]
cmp l r , #0
bleq _ _ d i e @ no secondary startup handler
bx l r
# endif
/ *
* We d o n ' t k n o w w h y t h e C P U r e s e t . J u s t k i l l i t .
* The L R r e g i s t e r w i l l c o n t a i n t h e a d d r e s s w e d i e d a t + 4 .
* /
__die :
sub l r , l r , #4
mov3 2 r7 , T E G R A _ P M C _ B A S E
str l r , [ r7 , #P M C _ S C R A T C H 41 ]
mov3 2 r7 , T E G R A _ C L K _ R E S E T _ B A S E
/* Are we on Tegra20? */
2013-05-20 14:39:24 +04:00
cmp r6 , #T E G R A 20
2013-01-04 13:32:22 +04:00
bne 1 f
# ifdef C O N F I G _ A R C H _ T E G R A _ 2 x _ S O C
mov3 2 r0 , 0 x11 1 1
mov r1 , r0 , l s l r10
str r1 , [ r7 , #0x340 ] @ CLK_RST_CPU_CMPLX_SET
# endif
1 :
# ifdef C O N F I G _ A R C H _ T E G R A _ 3 x _ S O C
mov3 2 r6 , T E G R A _ F L O W _ C T R L _ B A S E
cmp r10 , #0
moveq r1 , #F L O W _ C T R L _ H A L T _ C P U 0 _ E V E N T S
moveq r2 , #F L O W _ C T R L _ C P U 0 _ C S R
movne r1 , r10 , l s l #3
addne r2 , r1 , #( F L O W _ C T R L _ C P U 1 _ C S R - 8 )
addne r1 , r1 , #( F L O W _ C T R L _ H A L T _ C P U 1 _ E V E N T S - 8 )
/ * Clear C P U " e v e n t " a n d " i n t e r r u p t " f l a g s a n d p o w e r g a t e
it w h e n h a l t i n g b u t n o t b e f o r e i t i s i n t h e " W F I " s t a t e . * /
ldr r0 , [ r6 , + r2 ]
orr r0 , r0 , #F L O W _ C T R L _ C S R _ I N T R _ F L A G | F L O W _ C T R L _ C S R _ E V E N T _ F L A G
orr r0 , r0 , #F L O W _ C T R L _ C S R _ E N A B L E
str r0 , [ r6 , + r2 ]
/* Unconditionally halt this CPU */
mov r0 , #F L O W _ C T R L _ W A I T E V E N T
str r0 , [ r6 , + r1 ]
ldr r0 , [ r6 , + r1 ] @ memory barrier
dsb
isb
wfi @ CPU should be power gated here
/* If the CPU didn't power gate above just kill it's clock. */
mov r0 , r11 , l s l #8
str r0 , [ r7 , #348 ] @ CLK_CPU_CMPLX_SET
# endif
/* If the CPU still isn't dead, just spin here. */
b .
ENDPROC( _ _ t e g r a _ c p u _ r e s e t _ h a n d l e r )
.align L1_CACHE_SHIFT
.type _ _ tegra_ c p u _ r e s e t _ h a n d l e r _ d a t a , % o b j e c t
.globl __tegra_cpu_reset_handler_data
__tegra_cpu_reset_handler_data :
.rept TEGRA_RESET_DATA_SIZE
.long 0
.endr
.align L1_CACHE_SHIFT
ENTRY( _ _ t e g r a _ c p u _ r e s e t _ h a n d l e r _ e n d )