2019-06-04 10:11:33 +02:00
/* SPDX-License-Identifier: GPL-2.0-only */
2006-03-27 14:58:25 +01:00
/ *
* linux/ a r c h / a r m / k e r n e l / h e a d - n o m m u . S
*
* Copyright ( C ) 1 9 9 4 - 2 0 0 2 R u s s e l l K i n g
* Copyright ( C ) 2 0 0 3 - 2 0 0 6 H y o k S . C h o i
*
* Common k e r n e l s t a r t u p c o d e ( n o n - p a g e d M M )
* /
# include < l i n u x / l i n k a g e . h >
# include < l i n u x / i n i t . h >
2017-10-16 12:54:05 +01:00
# include < l i n u x / e r r n o . h >
2006-03-27 14:58:25 +01:00
# include < a s m / a s s e m b l e r . h >
# include < a s m / p t r a c e . h >
2006-05-05 15:11:14 +01:00
# include < a s m / a s m - o f f s e t s . h >
ARM: mm: Make virt_to_pfn() a static inline
Making virt_to_pfn() a static inline taking a strongly typed
(const void *) makes the contract of a passing a pointer of that
type to the function explicit and exposes any misuse of the
macro virt_to_pfn() acting polymorphic and accepting many types
such as (void *), (unitptr_t) or (unsigned long) as arguments
without warnings.
Doing this is a bit intrusive: virt_to_pfn() requires
PHYS_PFN_OFFSET and PAGE_SHIFT to be defined, and this is defined in
<asm/page.h>, so this must be included *before* <asm/memory.h>.
The use of macros were obscuring the unclear inclusion order here,
as the macros would eventually be resolved, but a static inline
like this cannot be compiled with unresolved macros.
The naive solution to include <asm/page.h> at the top of
<asm/memory.h> does not work, because <asm/memory.h> sometimes
includes <asm/page.h> at the end of itself, which would create a
confusing inclusion loop. So instead, take the approach to always
unconditionally include <asm/page.h> at the end of <asm/memory.h>
arch/arm uses <asm/memory.h> explicitly in a lot of places,
however it turns out that if we just unconditionally include
<asm/memory.h> into <asm/page.h> and switch all inclusions of
<asm/memory.h> to <asm/page.h> instead, we enforce the right
order and <asm/memory.h> will always have access to the
definitions.
Put an inclusion guard in place making it impossible to include
<asm/memory.h> explicitly.
Link: https://lore.kernel.org/linux-mm/20220701160004.2ffff4e5ab59a55499f4c736@linux-foundation.org/
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
2022-06-02 10:18:32 +02:00
# include < a s m / p a g e . h >
2012-03-28 18:30:01 +01:00
# include < a s m / c p15 . h >
2006-04-24 09:45:35 +01:00
# include < a s m / t h r e a d _ i n f o . h >
2010-05-21 18:06:41 +01:00
# include < a s m / v7 m . h >
2013-02-22 17:48:56 +00:00
# include < a s m / m p u . h >
2006-03-27 14:58:25 +01:00
/ *
* Kernel s t a r t u p e n t r y p o i n t .
* - - - - - - - - - - - - - - - - - - - - - - - - - - -
*
* This i s n o r m a l l y c a l l e d f r o m t h e d e c o m p r e s s o r c o d e . T h e r e q u i r e m e n t s
* are : MMU = o f f , D - c a c h e = o f f , I - c a c h e = d o n t c a r e , r0 = 0 ,
* r1 = m a c h i n e n r .
*
* See l i n u x / a r c h / a r m / t o o l s / m a c h - t y p e s f o r t h e c o m p l e t e l i s t o f m a c h i n e
* numbers f o r r1 .
*
* /
2011-07-13 15:53:30 +01:00
2009-10-02 16:32:46 -04:00
_ _ HEAD
2011-12-09 20:52:10 +01:00
# ifdef C O N F I G _ C P U _ T H U M B O N L Y
.thumb
ENTRY( s t e x t )
# else
.arm
2006-03-27 14:58:25 +01:00
ENTRY( s t e x t )
2011-07-13 15:53:30 +01:00
2015-04-21 14:17:25 +01:00
THUMB( b a d r r9 , 1 f ) @ Kernel is always entered in ARM.
2011-07-13 15:53:30 +01:00
THUMB( b x r9 ) @ If this is a Thumb-2 kernel,
THUMB( . t h u m b ) @ switch to Thumb now.
THUMB( 1 : )
2011-12-09 20:52:10 +01:00
# endif
2011-07-13 15:53:30 +01:00
2018-07-23 09:37:09 +01:00
# ifdef C O N F I G _ A R M _ V I R T _ E X T
bl _ _ h y p _ s t u b _ i n s t a l l
# endif
@ ensure svc mode and all interrupts masked
safe_ s v c m o d e _ m a s k a l l r9
2006-03-27 14:58:25 +01:00
@ and irqs disabled
2010-05-21 18:06:41 +01:00
# if d e f i n e d ( C O N F I G _ C P U _ C P 1 5 )
2006-03-27 14:58:25 +01:00
mrc p15 , 0 , r9 , c0 , c0 @ get processor id
2010-05-21 18:06:41 +01:00
# elif d e f i n e d ( C O N F I G _ C P U _ V 7 M )
ldr r9 , =BASEADDR_V7M_SCB
ldr r9 , [ r9 , V 7 M _ S C B _ C P U I D ]
# else
ldr r9 , =CONFIG_PROCESSOR_ID
2006-09-26 17:36:37 +09:00
# endif
2006-03-27 14:58:25 +01:00
bl _ _ l o o k u p _ p r o c e s s o r _ t y p e @ r5=procinfo r9=cpuid
movs r10 , r5 @ invalid processor (r5=0)?
beq _ _ e r r o r _ p @ yes, error 'p'
2013-02-22 17:48:56 +00:00
# ifdef C O N F I G _ A R M _ M P U
bl _ _ s e t u p _ m p u
# endif
2015-06-02 20:43:24 +01:00
2015-04-21 14:17:25 +01:00
badr l r , 1 f @ return (PIC) address
2015-04-19 20:28:53 +01:00
ldr r12 , [ r10 , #P R O C I N F O _ I N I T F U N C ]
add r12 , r12 , r10
ret r12
2018-04-03 10:37:47 +01:00
1 : ldr l r , =__mmap_switched
b _ _ a f t e r _ p r o c _ i n i t
2008-08-28 11:22:32 +01:00
ENDPROC( s t e x t )
2006-03-27 14:58:25 +01:00
2012-02-28 11:50:32 +00:00
# ifdef C O N F I G _ S M P
2013-07-31 11:37:17 +01:00
.text
2012-02-28 11:50:32 +00:00
ENTRY( s e c o n d a r y _ s t a r t u p )
/ *
* Common e n t r y p o i n t f o r s e c o n d a r y C P U s .
*
* Ensure t h a t w e ' r e i n S V C m o d e , a n d I R Q s a r e d i s a b l e d . L o o k u p
* the p r o c e s s o r t y p e - t h e r e i s n o n e e d t o c h e c k t h e m a c h i n e t y p e
* as i t h a s a l r e a d y b e e n v a l i d a t e d b y t h e p r i m a r y p r o c e s s o r .
* /
2018-07-23 09:37:09 +01:00
# ifdef C O N F I G _ A R M _ V I R T _ E X T
bl _ _ h y p _ s t u b _ i n s t a l l _ s e c o n d a r y
# endif
safe_ s v c m o d e _ m a s k a l l r9
2012-02-28 11:50:32 +00:00
# ifndef C O N F I G _ C P U _ C P 1 5
ldr r9 , =CONFIG_PROCESSOR_ID
# else
mrc p15 , 0 , r9 , c0 , c0 @ get processor id
# endif
bl _ _ l o o k u p _ p r o c e s s o r _ t y p e @ r5=procinfo r9=cpuid
movs r10 , r5 @ invalid processor?
beq _ _ e r r o r _ p @ yes, error 'p'
2015-06-02 20:43:24 +01:00
ldr r7 , _ _ s e c o n d a r y _ d a t a
2013-02-22 18:51:30 +00:00
# ifdef C O N F I G _ A R M _ M P U
2017-10-16 12:54:05 +01:00
bl _ _ s e c o n d a r y _ s e t u p _ m p u @ Initialize the MPU
2013-02-22 18:51:30 +00:00
# endif
2015-06-02 20:43:24 +01:00
badr l r , 1 f @ return (PIC) address
2015-04-19 20:28:53 +01:00
ldr r12 , [ r10 , #P R O C I N F O _ I N I T F U N C ]
add r12 , r12 , r10
ret r12
2015-06-02 20:43:24 +01:00
1 : bl _ _ a f t e r _ p r o c _ i n i t
2021-11-25 12:05:19 +01:00
ldr r7 , _ _ s e c o n d a r y _ d a t a @ reload r7
2015-04-04 20:09:46 +01:00
ldr s p , [ r7 , #12 ] @ set up the stack pointer
2021-09-18 10:44:35 +02:00
ldr r0 , [ r7 , #16 ] @ set up task pointer
2012-02-28 11:50:32 +00:00
mov f p , #0
b s e c o n d a r y _ s t a r t _ k e r n e l
2015-06-02 20:43:24 +01:00
ENDPROC( s e c o n d a r y _ s t a r t u p )
2012-02-28 11:50:32 +00:00
.type _ _ secondary_ d a t a , % o b j e c t
__secondary_data :
.long secondary_data
# endif / * C O N F I G _ S M P * /
2006-03-27 14:58:25 +01:00
/ *
* Set t h e C o n t r o l R e g i s t e r a n d R e a d t h e p r o c e s s I D .
* /
2018-04-03 10:37:47 +01:00
.text
2006-03-27 14:58:25 +01:00
__after_proc_init :
2018-04-03 10:38:37 +01:00
M_ C L A S S ( m o v w r12 , #: l o w e r 16 : B A S E A D D R _ V 7 M _ S C B )
M_ C L A S S ( m o v t r12 , #: u p p e r 16 : B A S E A D D R _ V 7 M _ S C B )
2019-04-05 14:16:13 +01:00
# ifdef C O N F I G _ A R M _ M P U
2018-04-03 10:38:37 +01:00
M_ C L A S S ( l d r r3 , [ r12 , 0 x50 ] )
AR_ C L A S S ( m r c p15 , 0 , r3 , c0 , c1 , 4 ) @ Read ID_MMFR0
and r3 , r3 , #( M M F R 0 _ P M S A ) @ PMSA field
teq r3 , #( M M F R 0 _ P M S A v7 ) @ PMSA v7
2018-04-03 10:39:23 +01:00
beq 1 f
teq r3 , #( M M F R 0 _ P M S A v8 ) @ PMSA v8
/ *
* Memory r e g i o n a t t r i b u t e s f o r P M S A v8 :
*
* n = A t t r I n d x [ 2 : 0 ]
* n M A I R
* DEVICE_ n G n R n E 0 0 0 0 0 0 0 0 0 0 0
* NORMAL 0 0 1 1 1 1 1 1 1 1 1
* /
ldreq r3 , =PMSAv8_MAIR ( 0 x00 , P M S A v8 _ R G N _ D E V I C E _ n G n R n E ) | \
PMSAv8 _ M A I R ( 0 x f f , P M S A v8 _ R G N _ N O R M A L )
AR_ C L A S S ( m c r e q p15 , 0 , r3 , c10 , c2 , 0 ) @ MAIR 0
M_ C L A S S ( s t r e q r3 , [ r12 , #P M S A v 8 _ M A I R 0 ] )
moveq r3 , #0
AR_ C L A S S ( m c r e q p15 , 0 , r3 , c10 , c2 , 1 ) @ MAIR 1
M_ C L A S S ( s t r e q r3 , [ r12 , #P M S A v 8 _ M A I R 1 ] )
1 :
2018-04-03 10:38:37 +01:00
# endif
2006-09-26 17:36:37 +09:00
# ifdef C O N F I G _ C P U _ C P 1 5
2009-07-24 12:34:59 +01:00
/ *
* CP1 5 s y s t e m c o n t r o l r e g i s t e r v a l u e r e t u r n e d i n r0 f r o m
* the C P U i n i t f u n c t i o n .
* /
2018-04-03 10:38:37 +01:00
# ifdef C O N F I G _ A R M _ M P U
biceq r0 , r0 , #C R _ B R @ D i s a b l e t h e ' d e f a u l t m e m - m a p '
orreq r0 , r0 , #C R _ M @ S e t S C T R L . M ( M P U o n )
# endif
2012-12-04 10:34:39 +01:00
# if d e f i n e d ( C O N F I G _ A L I G N M E N T _ T R A P ) & & _ _ L I N U X _ A R M _ A R C H _ _ < 6
2006-03-27 14:58:25 +01:00
orr r0 , r0 , #C R _ A
# else
bic r0 , r0 , #C R _ A
# endif
# ifdef C O N F I G _ C P U _ D C A C H E _ D I S A B L E
bic r0 , r0 , #C R _ C
# endif
# ifdef C O N F I G _ C P U _ B P R E D I C T _ D I S A B L E
bic r0 , r0 , #C R _ Z
# endif
# ifdef C O N F I G _ C P U _ I C A C H E _ D I S A B L E
bic r0 , r0 , #C R _ I
# endif
mcr p15 , 0 , r0 , c1 , c0 , 0 @ write control reg
2018-06-18 14:33:03 +01:00
instr_ s y n c
2016-08-30 17:31:22 +01:00
# elif d e f i n e d ( C O N F I G _ C P U _ V 7 M )
2018-04-03 10:38:37 +01:00
# ifdef C O N F I G _ A R M _ M P U
ldreq r3 , [ r12 , M P U _ C T R L ]
biceq r3 , #M P U _ C T R L _ P R I V D E F E N A
orreq r3 , #M P U _ C T R L _ E N A B L E
streq r3 , [ r12 , M P U _ C T R L ]
isb
# endif
2016-08-30 17:31:22 +01:00
/* For V7M systems we want to modify the CCR similarly to the SCTLR */
# ifdef C O N F I G _ C P U _ D C A C H E _ D I S A B L E
bic r0 , r0 , #V 7 M _ S C B _ C C R _ D C
# endif
# ifdef C O N F I G _ C P U _ B P R E D I C T _ D I S A B L E
bic r0 , r0 , #V 7 M _ S C B _ C C R _ B P
# endif
# ifdef C O N F I G _ C P U _ I C A C H E _ D I S A B L E
bic r0 , r0 , #V 7 M _ S C B _ C C R _ I C
# endif
2018-04-03 10:38:37 +01:00
str r0 , [ r12 , V 7 M _ S C B _ C C R ]
2019-10-10 10:12:20 +01:00
/* Pass exc_ret to __mmap_switched */
mov r0 , r10
2016-08-30 17:31:22 +01:00
# endif / * C O N F I G _ C P U _ C P 1 5 e l i f C O N F I G _ C P U _ V 7 M * /
2015-06-02 20:43:24 +01:00
ret l r
2008-08-28 11:22:32 +01:00
ENDPROC( _ _ a f t e r _ p r o c _ i n i t )
2006-04-24 09:45:35 +01:00
.ltorg
2006-03-27 14:58:25 +01:00
2013-02-22 17:48:56 +00:00
# ifdef C O N F I G _ A R M _ M P U
2017-10-16 12:57:48 +01:00
# ifndef C O N F I G _ C P U _ V 7 M
2013-02-22 17:48:56 +00:00
/* Set which MPU region should be programmed */
2017-10-16 12:57:48 +01:00
.macro set_region_nr tmp, r g n r , u n u s e d
2013-02-22 17:48:56 +00:00
mov \ t m p , \ r g n r @ Use static region numbers
mcr p15 , 0 , \ t m p , c6 , c2 , 0 @ Write RGNR
.endm
/* Setup a single MPU region, either D or I side (D-side for unified) */
2018-04-03 10:36:37 +01:00
.macro setup_region bar, a c r , s r , s i d e = P M S A v7 _ D A T A _ S I D E , u n u s e d
2013-02-22 17:48:56 +00:00
mcr p15 , 0 , \ b a r , c6 , c1 , ( 0 + \ s i d e ) @ I/DRBAR
mcr p15 , 0 , \ a c r , c6 , c1 , ( 4 + \ s i d e ) @ I/DRACR
mcr p15 , 0 , \ s r , c6 , c1 , ( 2 + \ s i d e ) @ I/DRSR
.endm
2017-10-16 12:57:48 +01:00
# else
.macro set_region_nr tmp, r g n r , b a s e
mov \ t m p , \ r g n r
2018-04-03 10:36:37 +01:00
str \ t m p , [ \ b a s e , #P M S A v 7 _ R N R ]
2017-10-16 12:57:48 +01:00
.endm
.macro setup_region bar, a c r , s r , u n u s e d , b a s e
lsl \ a c r , \ a c r , #16
orr \ a c r , \ a c r , \ s r
2018-04-03 10:36:37 +01:00
str \ b a r , [ \ b a s e , #P M S A v 7 _ R B A R ]
str \ a c r , [ \ b a s e , #P M S A v 7 _ R A S R ]
2017-10-16 12:57:48 +01:00
.endm
2013-02-22 17:48:56 +00:00
2017-10-16 12:57:48 +01:00
# endif
2013-02-22 17:48:56 +00:00
/ *
* Setup t h e M P U a n d i n i t i a l M P U R e g i o n s . W e c r e a t e t h e f o l l o w i n g r e g i o n s :
* Region 0 : U s e t h i s f o r p r o b i n g t h e M P U d e t a i l s , s o l e a v e d i s a b l e d .
* Region 1 : B a c k g r o u n d r e g i o n - c o v e r s t h e w h o l e o f R A M a s s t r o n g l y o r d e r e d
* Region 2 : N o r m a l , S h a r e d , c a c h e a b l e f o r R A M . F r o m P H Y S _ O F F S E T , s i z e f r o m r6
2013-04-18 18:37:24 +01:00
* Region 3 : N o r m a l , s h a r e d , i n a c c e s s i b l e f r o m P L 0 t o p r o t e c t t h e v e c t o r s p a g e
2013-02-22 17:48:56 +00:00
*
2018-04-03 10:36:37 +01:00
* r6 : Value t o b e w r i t t e n t o D R S R ( a n d I R S R i f r e q u i r e d ) f o r P M S A v7 _ R A M _ R E G I O N
2013-02-22 17:48:56 +00:00
* /
2018-04-03 10:37:47 +01:00
_ _ HEAD
2013-02-22 17:48:56 +00:00
ENTRY( _ _ s e t u p _ m p u )
/* Probe for v7 PMSA compliance */
2017-10-16 12:57:48 +01:00
M_ C L A S S ( m o v w r12 , #: l o w e r 16 : B A S E A D D R _ V 7 M _ S C B )
M_ C L A S S ( m o v t r12 , #: u p p e r 16 : B A S E A D D R _ V 7 M _ S C B )
AR_ C L A S S ( m r c p15 , 0 , r0 , c0 , c1 , 4 ) @ Read ID_MMFR0
M_ C L A S S ( l d r r0 , [ r12 , 0 x50 ] )
2013-02-22 17:48:56 +00:00
and r0 , r0 , #( M M F R 0 _ P M S A ) @ PMSA field
teq r0 , #( M M F R 0 _ P M S A v7 ) @ PMSA v7
2018-04-03 10:36:37 +01:00
beq _ _ s e t u p _ p m s a _ v7
2018-04-03 10:39:23 +01:00
teq r0 , #( M M F R 0 _ P M S A v8 ) @ PMSA v8
beq _ _ s e t u p _ p m s a _ v8
2018-04-03 10:36:37 +01:00
ret l r
ENDPROC( _ _ s e t u p _ m p u )
ENTRY( _ _ s e t u p _ p m s a _ v7 )
/* Calculate the size of a region covering just the kernel */
ldr r5 , =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
ldr r6 , = ( _ e n d ) @ Cover whole kernel
sub r6 , r6 , r5 @ Minimum size of region to map
clz r6 , r6 @ Region size must be 2^N...
rsb r6 , r6 , #31 @ ...so round up region size
lsl r6 , r6 , #P M S A v 7 _ R S R _ S Z @ Put size in right field
orr r6 , r6 , #( 1 < < P M S A v7 _ R S R _ E N ) @ Set region enabled bit
2013-02-22 17:48:56 +00:00
/ * Determine w h e t h e r t h e D / I - s i d e m e m o r y m a p i s u n i f i e d . W e s e t t h e
* flags h e r e a n d c o n t i n u e t o u s e t h e m f o r t h e r e s t o f t h i s f u n c t i o n * /
2017-10-16 12:57:48 +01:00
AR_ C L A S S ( m r c p15 , 0 , r0 , c0 , c0 , 4 ) @ MPUIR
M_ C L A S S ( l d r r0 , [ r12 , #M P U _ T Y P E ] )
2013-02-22 17:48:56 +00:00
ands r5 , r0 , #M P U I R _ D R E G I O N _ S Z M A S K @ 0 s i z e d r e g i o n = > N o M P U
2017-10-16 12:54:05 +01:00
bxeq l r
2013-02-22 17:48:56 +00:00
tst r0 , #M P U I R _ n U @ M P U I R _ n U = 0 f o r u n i f i e d
/* Setup second region first to free up r6 */
2018-04-03 10:36:37 +01:00
set_ r e g i o n _ n r r0 , #P M S A v 7 _ R A M _ R E G I O N , r12
2013-02-22 17:48:56 +00:00
isb
/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
2013-12-10 19:21:08 +00:00
ldr r0 , =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
2018-04-03 10:36:37 +01:00
ldr r5 ,= ( P M S A v7 _ A P _ P L 1 R W _ P L 0 R W | P M S A v7 _ R G N _ N O R M A L )
2013-02-22 17:48:56 +00:00
2018-04-03 10:36:37 +01:00
setup_ r e g i o n r0 , r5 , r6 , P M S A v7 _ D A T A _ S I D E , r12 @ PHYS_OFFSET, shared, enabled
2017-10-16 12:57:48 +01:00
beq 1 f @ Memory-map not unified
2018-04-03 10:36:37 +01:00
setup_ r e g i o n r0 , r5 , r6 , P M S A v7 _ I N S T R _ S I D E , r12 @ PHYS_OFFSET, shared, enabled
2013-02-22 17:48:56 +00:00
1 : isb
/* First/background region */
2018-04-03 10:36:37 +01:00
set_ r e g i o n _ n r r0 , #P M S A v 7 _ B G _ R E G I O N , r12
2013-02-22 17:48:56 +00:00
isb
/* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */
mov r0 , #0 @ BG region starts at 0x0
2018-04-03 10:36:37 +01:00
ldr r5 ,= ( P M S A v7 _ A C R _ X N | P M S A v7 _ R G N _ S T R O N G L Y _ O R D E R E D | P M S A v7 _ A P _ P L 1 R W _ P L 0 N A )
mov r6 , #P M S A v 7 _ R S R _ A L L _ M E M @ 4GB region, enabled
2013-02-22 17:48:56 +00:00
2018-04-03 10:36:37 +01:00
setup_ r e g i o n r0 , r5 , r6 , P M S A v7 _ D A T A _ S I D E , r12 @ 0x0, BG region, enabled
2017-10-16 12:57:48 +01:00
beq 2 f @ Memory-map not unified
2018-04-03 10:36:37 +01:00
setup_ r e g i o n r0 , r5 , r6 , P M S A v7 _ I N S T R _ S I D E r12 @ 0x0, BG region, enabled
2013-02-22 17:48:56 +00:00
2 : isb
2017-10-16 13:00:45 +01:00
# ifdef C O N F I G _ X I P _ K E R N E L
2018-04-03 10:36:37 +01:00
set_ r e g i o n _ n r r0 , #P M S A v 7 _ R O M _ R E G I O N , r12
2017-10-16 13:00:45 +01:00
isb
2018-04-03 10:36:37 +01:00
ldr r5 ,= ( P M S A v7 _ A P _ P L 1 R O _ P L 0 N A | P M S A v7 _ R G N _ N O R M A L )
2017-10-16 13:00:45 +01:00
ldr r0 , =CONFIG_XIP_PHYS_ADDR @ ROM start
ldr r6 , = ( _ e x i p r o m ) @ ROM end
sub r6 , r6 , r0 @ Minimum size of region to map
clz r6 , r6 @ Region size must be 2^N...
rsb r6 , r6 , #31 @ ...so round up region size
2018-04-03 10:36:37 +01:00
lsl r6 , r6 , #P M S A v 7 _ R S R _ S Z @ Put size in right field
orr r6 , r6 , #( 1 < < P M S A v7 _ R S R _ E N ) @ Set region enabled bit
2017-10-16 13:00:45 +01:00
2018-04-03 10:36:37 +01:00
setup_ r e g i o n r0 , r5 , r6 , P M S A v7 _ D A T A _ S I D E , r12 @ XIP_PHYS_ADDR, shared, enabled
2017-10-16 13:00:45 +01:00
beq 3 f @ Memory-map not unified
2018-04-03 10:36:37 +01:00
setup_ r e g i o n r0 , r5 , r6 , P M S A v7 _ I N S T R _ S I D E , r12 @ XIP_PHYS_ADDR, shared, enabled
2017-10-16 13:00:45 +01:00
3 : isb
# endif
2017-10-16 12:54:05 +01:00
ret l r
2018-04-03 10:36:37 +01:00
ENDPROC( _ _ s e t u p _ p m s a _ v7 )
2017-10-16 12:54:05 +01:00
2018-04-03 10:39:23 +01:00
ENTRY( _ _ s e t u p _ p m s a _ v8 )
mov r0 , #0
AR_ C L A S S ( m c r p15 , 0 , r0 , c6 , c2 , 1 ) @ PRSEL
M_ C L A S S ( s t r r0 , [ r12 , #P M S A v 8 _ R N R ] )
isb
# ifdef C O N F I G _ X I P _ K E R N E L
ldr r5 , =CONFIG_XIP_PHYS_ADDR @ ROM start
ldr r6 , = ( _ e x i p r o m ) @ ROM end
sub r6 , r6 , #1
bic r6 , r6 , #( P M S A v 8 _ M I N A L I G N - 1 )
orr r5 , r5 , #( P M S A v 8 _ A P _ P L 1 R W _ P L 0 N A | P M S A v8 _ R G N _ S H A R E D )
orr r6 , r6 , #( P M S A v 8 _ L A R _ I D X ( P M S A v8 _ R G N _ N O R M A L ) | P M S A v8 _ L A R _ E N )
AR_ C L A S S ( m c r p15 , 0 , r5 , c6 , c8 , 0 ) @ PRBAR0
AR_ C L A S S ( m c r p15 , 0 , r6 , c6 , c8 , 1 ) @ PRLAR0
M_ C L A S S ( s t r r5 , [ r12 , #P M S A v 8 _ R B A R _ A ( 0 ) ] )
M_ C L A S S ( s t r r6 , [ r12 , #P M S A v 8 _ R L A R _ A ( 0 ) ] )
# endif
ldr r5 , =KERNEL_START
ldr r6 , =KERNEL_END
sub r6 , r6 , #1
bic r6 , r6 , #( P M S A v 8 _ M I N A L I G N - 1 )
orr r5 , r5 , #( P M S A v 8 _ A P _ P L 1 R W _ P L 0 N A | P M S A v8 _ R G N _ S H A R E D )
orr r6 , r6 , #( P M S A v 8 _ L A R _ I D X ( P M S A v8 _ R G N _ N O R M A L ) | P M S A v8 _ L A R _ E N )
AR_ C L A S S ( m c r p15 , 0 , r5 , c6 , c8 , 4 ) @ PRBAR1
AR_ C L A S S ( m c r p15 , 0 , r6 , c6 , c8 , 5 ) @ PRLAR1
M_ C L A S S ( s t r r5 , [ r12 , #P M S A v 8 _ R B A R _ A ( 1 ) ] )
M_ C L A S S ( s t r r6 , [ r12 , #P M S A v 8 _ R L A R _ A ( 1 ) ] )
/* Setup Background: 0x0 - min(KERNEL_START, XIP_PHYS_ADDR) */
# ifdef C O N F I G _ X I P _ K E R N E L
ldr r6 , =KERNEL_START
ldr r5 , =CONFIG_XIP_PHYS_ADDR
cmp r6 , r5
movcs r6 , r5
# else
ldr r6 , =KERNEL_START
# endif
cmp r6 , #0
beq 1 f
mov r5 , #0
sub r6 , r6 , #1
bic r6 , r6 , #( P M S A v 8 _ M I N A L I G N - 1 )
orr r5 , r5 , #( P M S A v 8 _ A P _ P L 1 R W _ P L 0 N A | P M S A v8 _ R G N _ S H A R E D | P M S A v8 _ B A R _ X N )
orr r6 , r6 , #( P M S A v 8 _ L A R _ I D X ( P M S A v8 _ R G N _ D E V I C E _ n G n R n E ) | P M S A v8 _ L A R _ E N )
AR_ C L A S S ( m c r p15 , 0 , r5 , c6 , c9 , 0 ) @ PRBAR2
AR_ C L A S S ( m c r p15 , 0 , r6 , c6 , c9 , 1 ) @ PRLAR2
M_ C L A S S ( s t r r5 , [ r12 , #P M S A v 8 _ R B A R _ A ( 2 ) ] )
M_ C L A S S ( s t r r6 , [ r12 , #P M S A v 8 _ R L A R _ A ( 2 ) ] )
1 :
/* Setup Background: max(KERNEL_END, _exiprom) - 0xffffffff */
# ifdef C O N F I G _ X I P _ K E R N E L
ldr r5 , =KERNEL_END
ldr r6 , = ( _ e x i p r o m )
cmp r5 , r6
movcc r5 , r6
# else
ldr r5 , =KERNEL_END
# endif
mov r6 , #0xffffffff
bic r6 , r6 , #( P M S A v 8 _ M I N A L I G N - 1 )
orr r5 , r5 , #( P M S A v 8 _ A P _ P L 1 R W _ P L 0 N A | P M S A v8 _ R G N _ S H A R E D | P M S A v8 _ B A R _ X N )
orr r6 , r6 , #( P M S A v 8 _ L A R _ I D X ( P M S A v8 _ R G N _ D E V I C E _ n G n R n E ) | P M S A v8 _ L A R _ E N )
AR_ C L A S S ( m c r p15 , 0 , r5 , c6 , c9 , 4 ) @ PRBAR3
AR_ C L A S S ( m c r p15 , 0 , r6 , c6 , c9 , 5 ) @ PRLAR3
M_ C L A S S ( s t r r5 , [ r12 , #P M S A v 8 _ R B A R _ A ( 3 ) ] )
M_ C L A S S ( s t r r6 , [ r12 , #P M S A v 8 _ R L A R _ A ( 3 ) ] )
# ifdef C O N F I G _ X I P _ K E R N E L
/* Setup Background: min(_exiprom, KERNEL_END) - max(KERNEL_START, XIP_PHYS_ADDR) */
ldr r5 , = ( _ e x i p r o m )
ldr r6 , =KERNEL_END
cmp r5 , r6
movcs r5 , r6
ldr r6 , =KERNEL_START
ldr r0 , =CONFIG_XIP_PHYS_ADDR
cmp r6 , r0
movcc r6 , r0
sub r6 , r6 , #1
bic r6 , r6 , #( P M S A v 8 _ M I N A L I G N - 1 )
orr r5 , r5 , #( P M S A v 8 _ A P _ P L 1 R W _ P L 0 N A | P M S A v8 _ R G N _ S H A R E D | P M S A v8 _ B A R _ X N )
orr r6 , r6 , #( P M S A v 8 _ L A R _ I D X ( P M S A v8 _ R G N _ D E V I C E _ n G n R n E ) | P M S A v8 _ L A R _ E N )
# ifdef C O N F I G _ C P U _ V 7 M
/* There is no alias for n == 4 */
mov r0 , #4
str r0 , [ r12 , #P M S A v 8 _ R N R ] @ PRSEL
isb
str r5 , [ r12 , #P M S A v 8 _ R B A R _ A ( 0 ) ]
str r6 , [ r12 , #P M S A v 8 _ R L A R _ A ( 0 ) ]
# else
ARM: 8849/1: NOMMU: Fix encodings for PMSAv8's PRBAR4/PRLAR4
To access PRBARn, where n is referenced as a binary number:
MRC p15, 0, <Rt>, c6, c8+n[3:1], 4*n[0] ; Read PRBARn into Rt
MCR p15, 0, <Rt>, c6, c8+n[3:1], 4*n[0] ; Write Rt into PRBARn
To access PRLARn, where n is referenced as a binary number:
MRC p15, 0, <Rt>, c6, c8+n[3:1], 4*n[0]+1 ; Read PRLARn into Rt
MCR p15, 0, <Rt>, c6, c8+n[3:1], 4*n[0]+1 ; Write Rt into PRLARn
For PR{B,L}AR4, n is 4, n[0] is 0, n[3:1] is 2, while current encoding
done with n[0] set to 1 which is wrong. Use proper encoding instead.
Fixes: 046835b4aa22b9ab6aa0bb274e3b71047c4b887d ("ARM: 8757/1: NOMMU: Support PMSAv8 MPU")
Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
2019-02-20 15:00:53 +01:00
mcr p15 , 0 , r5 , c6 , c10 , 0 @ PRBAR4
mcr p15 , 0 , r6 , c6 , c10 , 1 @ PRLAR4
2018-04-03 10:39:23 +01:00
# endif
# endif
ret l r
ENDPROC( _ _ s e t u p _ p m s a _ v8 )
2017-10-16 12:54:05 +01:00
# ifdef C O N F I G _ S M P
/ *
* r6 : pointer a t m p u _ r g n _ i n f o
* /
2018-04-03 10:37:47 +01:00
.text
2017-10-16 12:54:05 +01:00
ENTRY( _ _ s e c o n d a r y _ s e t u p _ m p u )
2018-04-03 10:36:37 +01:00
/* Use MPU region info supplied by __cpu_up */
ldr r6 , [ r7 ] @ get secondary_data.mpu_rgn_info
2017-10-16 12:54:05 +01:00
/* Probe for v7 PMSA compliance */
mrc p15 , 0 , r0 , c0 , c1 , 4 @ Read ID_MMFR0
and r0 , r0 , #( M M F R 0 _ P M S A ) @ PMSA field
teq r0 , #( M M F R 0 _ P M S A v7 ) @ PMSA v7
2018-04-03 10:36:37 +01:00
beq _ _ s e c o n d a r y _ s e t u p _ p m s a _ v7
2018-04-03 10:39:23 +01:00
teq r0 , #( M M F R 0 _ P M S A v8 ) @ PMSA v8
beq _ _ s e c o n d a r y _ s e t u p _ p m s a _ v8
2018-04-03 10:36:37 +01:00
b _ _ e r r o r _ p
ENDPROC( _ _ s e c o n d a r y _ s e t u p _ m p u )
2017-10-16 12:54:05 +01:00
2018-04-03 10:36:37 +01:00
/ *
* r6 : pointer a t m p u _ r g n _ i n f o
* /
ENTRY( _ _ s e c o n d a r y _ s e t u p _ p m s a _ v7 )
2017-10-16 12:54:05 +01:00
/ * Determine w h e t h e r t h e D / I - s i d e m e m o r y m a p i s u n i f i e d . W e s e t t h e
* flags h e r e a n d c o n t i n u e t o u s e t h e m f o r t h e r e s t o f t h i s f u n c t i o n * /
mrc p15 , 0 , r0 , c0 , c0 , 4 @ MPUIR
ands r5 , r0 , #M P U I R _ D R E G I O N _ S Z M A S K @ 0 s i z e d r e g i o n = > N o M P U
beq _ _ e r r o r _ p
ldr r4 , [ r6 , #M P U _ R N G _ I N F O _ U S E D ]
mov r5 , #M P U _ R N G _ S I Z E
add r3 , r6 , #M P U _ R N G _ I N F O _ R N G S
mla r3 , r4 , r5 , r3
1 :
tst r0 , #M P U I R _ n U @ M P U I R _ n U = 0 f o r u n i f i e d
sub r3 , r3 , #M P U _ R N G _ S I Z E
sub r4 , r4 , #1
set_ r e g i o n _ n r r0 , r4
2013-04-18 18:37:24 +01:00
isb
2017-10-16 12:54:05 +01:00
ldr r0 , [ r3 , #M P U _ R G N _ D R B A R ]
ldr r6 , [ r3 , #M P U _ R G N _ D R S R ]
ldr r5 , [ r3 , #M P U _ R G N _ D R A C R ]
2018-04-03 10:36:37 +01:00
setup_ r e g i o n r0 , r5 , r6 , P M S A v7 _ D A T A _ S I D E
2017-10-16 12:54:05 +01:00
beq 2 f
2018-04-03 10:36:37 +01:00
setup_ r e g i o n r0 , r5 , r6 , P M S A v7 _ I N S T R _ S I D E
2017-10-16 12:54:05 +01:00
2 : isb
mrc p15 , 0 , r0 , c0 , c0 , 4 @ Reevaluate the MPUIR
cmp r4 , #0
bgt 1 b
2013-04-18 18:37:24 +01:00
2014-06-30 16:29:12 +01:00
ret l r
2018-04-03 10:36:37 +01:00
ENDPROC( _ _ s e c o n d a r y _ s e t u p _ p m s a _ v7 )
2017-10-16 12:54:05 +01:00
2018-04-03 10:39:23 +01:00
ENTRY( _ _ s e c o n d a r y _ s e t u p _ p m s a _ v8 )
ldr r4 , [ r6 , #M P U _ R N G _ I N F O _ U S E D ]
# ifndef C O N F I G _ X I P _ K E R N E L
add r4 , r4 , #1
# endif
mov r5 , #M P U _ R N G _ S I Z E
add r3 , r6 , #M P U _ R N G _ I N F O _ R N G S
mla r3 , r4 , r5 , r3
1 :
sub r3 , r3 , #M P U _ R N G _ S I Z E
sub r4 , r4 , #1
mcr p15 , 0 , r4 , c6 , c2 , 1 @ PRSEL
isb
ldr r5 , [ r3 , #M P U _ R G N _ P R B A R ]
ldr r6 , [ r3 , #M P U _ R G N _ P R L A R ]
mcr p15 , 0 , r5 , c6 , c3 , 0 @ PRBAR
mcr p15 , 0 , r6 , c6 , c3 , 1 @ PRLAR
cmp r4 , #0
bgt 1 b
ret l r
ENDPROC( _ _ s e c o n d a r y _ s e t u p _ p m s a _ v8 )
2017-10-16 12:54:05 +01:00
# endif / * C O N F I G _ S M P * /
# endif / * C O N F I G _ A R M _ M P U * /
2006-03-27 14:58:25 +01:00
# include " h e a d - c o m m o n . S "