2019-06-04 10:11:33 +02:00
/* SPDX-License-Identifier: GPL-2.0-only */
2005-04-16 15:20:36 -07:00
/ *
* linux/ a r c h / a r m / k e r n e l / h e a d . S
*
* Copyright ( C ) 1 9 9 4 - 2 0 0 2 R u s s e l l K i n g
2005-06-18 09:33:31 +01:00
* Copyright ( c ) 2 0 0 3 A R M L i m i t e d
* All R i g h t s R e s e r v e d
2005-04-16 15:20:36 -07:00
*
* Kernel s t a r t u p c o d e f o r a l l 3 2 - b i t C P U s
* /
# include < l i n u x / l i n k a g e . h >
# include < l i n u x / i n i t . h >
2020-06-08 21:32:42 -07:00
# include < l i n u x / p g t a b l e . h >
2005-04-16 15:20:36 -07:00
# include < a s m / a s s e m b l e r . h >
2012-01-19 10:05:41 +00:00
# include < a s m / c p15 . h >
2005-04-16 15:20:36 -07:00
# include < a s m / d o m a i n . h >
# include < a s m / p t r a c e . h >
2005-09-09 21:08:59 +02:00
# include < a s m / a s m - o f f s e t s . h >
ARM: mm: Make virt_to_pfn() a static inline
Making virt_to_pfn() a static inline taking a strongly typed
(const void *) makes the contract of a passing a pointer of that
type to the function explicit and exposes any misuse of the
macro virt_to_pfn() acting polymorphic and accepting many types
such as (void *), (unitptr_t) or (unsigned long) as arguments
without warnings.
Doing this is a bit intrusive: virt_to_pfn() requires
PHYS_PFN_OFFSET and PAGE_SHIFT to be defined, and this is defined in
<asm/page.h>, so this must be included *before* <asm/memory.h>.
The use of macros were obscuring the unclear inclusion order here,
as the macros would eventually be resolved, but a static inline
like this cannot be compiled with unresolved macros.
The naive solution to include <asm/page.h> at the top of
<asm/memory.h> does not work, because <asm/memory.h> sometimes
includes <asm/page.h> at the end of itself, which would create a
confusing inclusion loop. So instead, take the approach to always
unconditionally include <asm/page.h> at the end of <asm/memory.h>
arch/arm uses <asm/memory.h> explicitly in a lot of places,
however it turns out that if we just unconditionally include
<asm/memory.h> into <asm/page.h> and switch all inclusions of
<asm/memory.h> to <asm/page.h> instead, we enforce the right
order and <asm/memory.h> will always have access to the
definitions.
Put an inclusion guard in place making it impossible to include
<asm/memory.h> explicitly.
Link: https://lore.kernel.org/linux-mm/20220701160004.2ffff4e5ab59a55499f4c736@linux-foundation.org/
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
2022-06-02 10:18:32 +02:00
# include < a s m / p a g e . h >
2005-05-05 13:11:00 +01:00
# include < a s m / t h r e a d _ i n f o . h >
2005-04-16 15:20:36 -07:00
2012-08-31 00:03:46 -05:00
# if d e f i n e d ( C O N F I G _ D E B U G _ L L ) & & ! d e f i n e d ( C O N F I G _ D E B U G _ S E M I H O S T I N G )
# include C O N F I G _ D E B U G _ L L _ I N C L U D E
2010-07-07 11:19:48 +08:00
# endif
2005-04-16 15:20:36 -07:00
/ *
2005-10-29 21:44:56 +01:00
* swapper_ p g _ d i r i s t h e v i r t u a l a d d r e s s o f t h e i n i t i a l p a g e t a b l e .
2006-12-11 22:29:16 +00:00
* We p l a c e t h e p a g e t a b l e s 1 6 K b e l o w K E R N E L _ R A M _ V A D D R . T h e r e f o r e , w e m u s t
* make s u r e t h a t K E R N E L _ R A M _ V A D D R i s c o r r e c t l y s e t . C u r r e n t l y , w e e x p e c t
2005-10-29 21:44:56 +01:00
* the l e a s t s i g n i f i c a n t 1 6 b i t s t o b e 0 x80 0 0 , b u t w e c o u l d p r o b a b l y
2006-12-11 22:29:16 +00:00
* relax t h i s r e s t r i c t i o n t o K E R N E L _ R A M _ V A D D R > = P A G E _ O F F S E T + 0 x40 0 0 .
2005-04-16 15:20:36 -07:00
* /
2021-06-03 09:50:16 +01:00
# define K E R N E L _ R A M _ V A D D R ( K E R N E L _ O F F S E T + T E X T _ O F F S E T )
2006-12-11 22:29:16 +00:00
# if ( K E R N E L _ R A M _ V A D D R & 0 x f f f f ) ! = 0 x80 0 0
# error K E R N E L _ R A M _ V A D D R m u s t s t a r t a t 0 x X X X X 8 0 0 0
2005-04-16 15:20:36 -07:00
# endif
2011-11-22 17:30:29 +00:00
# ifdef C O N F I G _ A R M _ L P A E
/* LPAE requires an additional page for the PGD */
# define P G _ D I R _ S I Z E 0 x50 0 0
2022-07-05 18:47:08 +03:00
# define P M D _ E N T R Y _ O R D E R 3 / * P M D e n t r y s i z e i s 2 ^ P M D _ E N T R Y _ O R D E R * /
2011-11-22 17:30:29 +00:00
# else
2011-08-23 14:07:23 +01:00
# define P G _ D I R _ S I Z E 0 x40 0 0
2022-07-05 18:47:08 +03:00
# define P M D _ E N T R Y _ O R D E R 2
2011-11-22 17:30:29 +00:00
# endif
2011-08-23 14:07:23 +01:00
2005-04-16 15:20:36 -07:00
.globl swapper_pg_dir
2011-08-23 14:07:23 +01:00
.equ swapper_ p g _ d i r , K E R N E L _ R A M _ V A D D R - P G _ D I R _ S I Z E
2005-04-16 15:20:36 -07:00
2021-06-03 09:51:21 +01:00
/ *
* This n e e d s t o b e a s s i g n e d a t r u n t i m e w h e n t h e l i n k e r s y m b o l s a r e
2021-08-09 12:57:19 +01:00
* resolved. T h e s e a r e u n s i g n e d 6 4 b i t r e a l l y , b u t i n t h i s a s s e m b l y c o d e
* We s t o r e t h e m a s 3 2 b i t .
2021-06-03 09:51:21 +01:00
* /
.pushsection .data
.align 2
.globl kernel_sec_start
.globl kernel_sec_end
kernel_sec_start :
.long 0
2021-08-09 12:57:19 +01:00
.long 0
2021-06-03 09:51:21 +01:00
kernel_sec_end :
2021-08-09 12:57:19 +01:00
.long 0
2021-06-03 09:51:21 +01:00
.long 0
.popsection
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
.macro pgtbl, r d , p h y s
2014-01-21 16:25:34 +01:00
add \ r d , \ p h y s , #T E X T _ O F F S E T
sub \ r d , \ r d , #P G _ D I R _ S I Z E
2005-04-16 15:20:36 -07:00
.endm
/ *
* Kernel s t a r t u p e n t r y p o i n t .
* - - - - - - - - - - - - - - - - - - - - - - - - - - -
*
* This i s n o r m a l l y c a l l e d f r o m t h e d e c o m p r e s s o r c o d e . T h e r e q u i r e m e n t s
* are : MMU = o f f , D - c a c h e = o f f , I - c a c h e = d o n t c a r e , r0 = 0 ,
2011-04-28 14:27:20 -06:00
* r1 = m a c h i n e n r , r2 = a t a g s o r d t b p o i n t e r .
2005-04-16 15:20:36 -07:00
*
* This c o d e i s m o s t l y p o s i t i o n i n d e p e n d e n t , s o i f y o u l i n k t h e k e r n e l a t
* 0 xc0 0 0 8 0 0 0 , y o u c a l l t h i s a t _ _ p a ( 0 x c00 0 8 0 0 0 ) .
*
* See l i n u x / a r c h / a r m / t o o l s / m a c h - t y p e s f o r t h e c o m p l e t e l i s t o f m a c h i n e
* numbers f o r r1 .
*
* We' r e t r y i n g t o k e e p c r a p t o a m i n i m u m ; DO NOT add any machine specific
* crap h e r e - t h a t ' s w h a t t h e b o o t l o a d e r ( o r i n e x t r e m e , w e l l j u s t i f i e d
* circumstances, z I m a g e ) i s f o r .
* /
2011-07-13 15:53:30 +01:00
.arm
2009-10-02 16:32:46 -04:00
_ _ HEAD
2005-04-16 15:20:36 -07:00
ENTRY( s t e x t )
2013-02-01 09:40:42 +00:00
ARM_ B E 8 ( s e t e n d b e ) @ ensure we are in BE8 mode
2011-07-13 15:53:30 +01:00
2015-04-21 14:17:25 +01:00
THUMB( b a d r r9 , 1 f ) @ Kernel is always entered in ARM.
2011-07-13 15:53:30 +01:00
THUMB( b x r9 ) @ If this is a Thumb-2 kernel,
THUMB( . t h u m b ) @ switch to Thumb now.
THUMB( 1 : )
2012-02-09 08:47:17 -08:00
# ifdef C O N F I G _ A R M _ V I R T _ E X T
bl _ _ h y p _ s t u b _ i n s t a l l
# endif
@ ensure svc mode and all interrupts masked
safe_ s v c m o d e _ m a s k a l l r9
2006-02-24 21:04:56 +00:00
mrc p15 , 0 , r9 , c0 , c0 @ get processor id
2005-04-16 15:20:36 -07:00
bl _ _ l o o k u p _ p r o c e s s o r _ t y p e @ r5=procinfo r9=cpuid
movs r10 , r5 @ invalid processor (r5=0)?
2010-11-29 19:43:28 +01:00
THUMB( i t e q ) @ force fixup-able long branch encoding
2005-11-25 15:43:22 +00:00
beq _ _ e r r o r _ p @ yes, error 'p'
2010-11-22 12:06:28 +00:00
2012-01-09 12:24:47 +01:00
# ifdef C O N F I G _ A R M _ L P A E
mrc p15 , 0 , r3 , c0 , c1 , 4 @ read ID_MMFR0
and r3 , r3 , #0xf @ extract VMSA support
cmp r3 , #5 @ long-descriptor translation table format?
THUMB( i t l o ) @ force fixup-able long branch encoding
ARM: 7980/1: kernel: improve error message when LPAE config doesn't match CPU
Currently, when the kernel is configured with LPAE support, but the
CPU doesn't support it, the error message is fairly cryptic:
Error: unrecognized/unsupported processor variant (0x561f5811).
This messages is normally shown when there is an issue when comparing
the processor ID (CP15 0, c0, c0) with the values/masks described in
proc-v7.S. However, the same message is displayed when LPAE support is
enabled in the kernel configuration, but not available in the CPU,
after looking at ID_MMFR0 (CP15 0, c0, c1, 4). Having the same error
message is highly misleading.
This commit improves this by showing a different error message when
this situation occurs:
Error: Kernel with LPAE support, but CPU does not support LPAE.
Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-02-18 17:02:54 +01:00
blo _ _ e r r o r _ l p a e @ only classic page table format
2012-01-09 12:24:47 +01:00
# endif
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
# ifndef C O N F I G _ X I P _ K E R N E L
2020-09-14 11:25:46 +03:00
adr_ l r8 , _ t e x t @ __pa(_text)
sub r8 , r8 , #T E X T _ O F F S E T @ P H Y S _ O F F S E T
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
# else
2013-12-10 19:21:08 +00:00
ldr r8 , =PLAT_PHYS_OFFSET @ always constant in this case
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
# endif
2010-11-22 12:06:28 +00:00
/ *
2011-04-28 14:27:20 -06:00
* r1 = m a c h i n e n o , r2 = a t a g s o r d t b ,
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
* r8 = p h y s _ o f f s e t , r9 = c p u i d , r10 = p r o c i n f o
2010-11-22 12:06:28 +00:00
* /
2007-05-31 22:02:22 +01:00
bl _ _ v e t _ a t a g s
2010-09-04 10:47:48 +01:00
# ifdef C O N F I G _ S M P _ O N _ U P
bl _ _ f i x u p _ s m p
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
# endif
# ifdef C O N F I G _ A R M _ P A T C H _ P H Y S _ V I R T
bl _ _ f i x u p _ p v _ t a b l e
2010-09-04 10:47:48 +01:00
# endif
2005-04-16 15:20:36 -07:00
bl _ _ c r e a t e _ p a g e _ t a b l e s
/ *
* The f o l l o w i n g c a l l s C P U s p e c i f i c c o d e i n a p o s i t i o n i n d e p e n d e n t
* manner. S e e a r c h / a r m / m m / p r o c - * . S f o r d e t a i l s . r10 = b a s e o f
2011-01-12 17:50:42 +00:00
* xxx_ p r o c _ i n f o s t r u c t u r e s e l e c t e d b y _ _ l o o k u p _ p r o c e s s o r _ t y p e
2015-04-04 20:09:46 +01:00
* above.
*
* The p r o c e s s o r i n i t f u n c t i o n w i l l b e c a l l e d w i t h :
* r1 - m a c h i n e t y p e
* r2 - b o o t d a t a ( a t a g s / d t ) p o i n t e r
* r4 - t r a n s l a t i o n t a b l e b a s e ( l o w w o r d )
* r5 - t r a n s l a t i o n t a b l e b a s e ( h i g h w o r d , i f L P A E )
* r8 - t r a n s l a t i o n t a b l e b a s e 1 ( p f n i f L P A E )
* r9 - c p u i d
* r1 3 - v i r t u a l a d d r e s s f o r _ _ e n a b l e _ m m u - > _ _ t u r n _ m m u _ o n
*
* On r e t u r n , t h e C P U w i l l b e r e a d y f o r t h e M M U t o b e t u r n e d o n ,
* r0 w i l l h o l d t h e C P U c o n t r o l r e g i s t e r v a l u e , r1 , r2 , r4 , a n d
* r9 w i l l b e p r e s e r v e d . r5 w i l l a l s o b e p r e s e r v e d i f L P A E .
2005-04-16 15:20:36 -07:00
* /
2010-10-04 16:22:34 +01:00
ldr r13 , =__mmap_switched @ address to jump to after
2005-04-16 15:20:36 -07:00
@ mmu has been enabled
2015-04-21 14:17:25 +01:00
badr l r , 1 f @ return (PIC) address
2015-04-04 20:09:46 +01:00
# ifdef C O N F I G _ A R M _ L P A E
mov r5 , #0 @ high TTBR0
mov r8 , r4 , l s r #12 @ TTBR1 is swapper_pg_dir pfn
# else
2011-05-26 11:22:44 +01:00
mov r8 , r4 @ set TTBR1 to swapper_pg_dir
2015-04-04 20:09:46 +01:00
# endif
2015-03-18 07:29:32 +01:00
ldr r12 , [ r10 , #P R O C I N F O _ I N I T F U N C ]
add r12 , r12 , r10
ret r12
2010-10-04 17:56:13 +01:00
1 : b _ _ e n a b l e _ m m u
2008-08-28 11:22:32 +01:00
ENDPROC( s t e x t )
2010-10-04 16:22:34 +01:00
.ltorg
2005-04-16 15:20:36 -07:00
/ *
* Setup t h e i n i t i a l p a g e t a b l e s . W e o n l y s e t u p t h e b a r e s t
* amount w h i c h a r e r e q u i r e d t o g e t t h e k e r n e l r u n n i n g , w h i c h
* generally m e a n s m a p p i n g i n t h e k e r n e l c o d e .
*
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
* r8 = p h y s _ o f f s e t , r9 = c p u i d , r10 = p r o c i n f o
2005-04-16 15:20:36 -07:00
*
* Returns :
2010-10-04 17:51:54 +01:00
* r0 , r3 , r5 - r7 c o r r u p t e d
2015-04-04 20:09:46 +01:00
* r4 = p h y s i c a l p a g e t a b l e a d d r e s s
2005-04-16 15:20:36 -07:00
* /
__create_page_tables :
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
pgtbl r4 , r8 @ page table address
2005-04-16 15:20:36 -07:00
/ *
2011-08-23 14:07:23 +01:00
* Clear t h e s w a p p e r p a g e t a b l e
2005-04-16 15:20:36 -07:00
* /
mov r0 , r4
mov r3 , #0
2011-08-23 14:07:23 +01:00
add r6 , r0 , #P G _ D I R _ S I Z E
2005-04-16 15:20:36 -07:00
1 : str r3 , [ r0 ] , #4
str r3 , [ r0 ] , #4
str r3 , [ r0 ] , #4
str r3 , [ r0 ] , #4
teq r0 , r6
bne 1 b
2011-11-22 17:30:29 +00:00
# ifdef C O N F I G _ A R M _ L P A E
/ *
* Build t h e P G D t a b l e ( f i r s t l e v e l ) t o p o i n t t o t h e P M D t a b l e . A P G D
* entry i s 6 4 - b i t w i d e .
* /
mov r0 , r4
add r3 , r4 , #0x1000 @ first PMD table address
orr r3 , r3 , #3 @ PGD block type
mov r6 , #4 @ PTRS_PER_PGD
mov r7 , #1 < < ( 5 5 - 3 2 ) @ L_PGD_SWAPPER
2013-02-28 17:46:16 +01:00
1 :
# ifdef C O N F I G _ C P U _ E N D I A N _ B E 8
2011-11-22 17:30:29 +00:00
str r7 , [ r0 ] , #4 @ set top PGD entry bits
2013-02-28 17:46:16 +01:00
str r3 , [ r0 ] , #4 @ set bottom PGD entry bits
# else
str r3 , [ r0 ] , #4 @ set bottom PGD entry bits
str r7 , [ r0 ] , #4 @ set top PGD entry bits
# endif
2011-11-22 17:30:29 +00:00
add r3 , r3 , #0x1000 @ next PMD table
subs r6 , r6 , #1
bne 1 b
add r4 , r4 , #0x1000 @ point to the PMD tables
2013-02-28 17:46:16 +01:00
# ifdef C O N F I G _ C P U _ E N D I A N _ B E 8
add r4 , r4 , #4 @ we only write the bottom word
# endif
2011-11-22 17:30:29 +00:00
# endif
2006-06-29 18:24:21 +01:00
ldr r7 , [ r10 , #P R O C I N F O _ M M _ M M U F L A G S ] @ m m _ m m u f l a g s
2005-04-16 15:20:36 -07:00
/ *
2010-10-04 17:51:54 +01:00
* Create i d e n t i t y m a p p i n g t o c a t e r f o r _ _ e n a b l e _ m m u .
* This i d e n t i t y m a p p i n g w i l l b e r e m o v e d b y p a g i n g _ i n i t ( ) .
2005-04-16 15:20:36 -07:00
* /
2020-09-14 11:25:16 +03:00
adr_ l r5 , _ _ t u r n _ m m u _ o n @ _pa(__turn_mmu_on)
adr_ l r6 , _ _ t u r n _ m m u _ o n _ e n d @ _pa(__turn_mmu_on_end)
2011-08-23 14:07:23 +01:00
mov r5 , r5 , l s r #S E C T I O N _ S H I F T
mov r6 , r6 , l s r #S E C T I O N _ S H I F T
2010-10-04 17:51:54 +01:00
2011-08-23 14:07:23 +01:00
1 : orr r3 , r7 , r5 , l s l #S E C T I O N _ S H I F T @ f l a g s + k e r n e l b a s e
2022-07-05 18:47:08 +03:00
str r3 , [ r4 , r5 , l s l #P M D _ E N T R Y _ O R D E R ] @ i d e n t i t y m a p p i n g
2011-08-23 14:07:23 +01:00
cmp r5 , r6
addlo r5 , r5 , #1 @ next section
blo 1 b
2005-04-16 15:20:36 -07:00
/ *
2021-06-03 09:51:21 +01:00
* The m a i n m a t t e r : m a p i n t h e k e r n e l u s i n g s e c t i o n m a p p i n g s , a n d
* set t w o v a r i a b l e s t o i n d i c a t e t h e p h y s i c a l s t a r t a n d e n d o f t h e
* kernel.
2005-04-16 15:20:36 -07:00
* /
2022-07-05 18:47:08 +03:00
add r0 , r4 , #K E R N E L _ O F F S E T > > ( S E C T I O N _ S H I F T - P M D _ E N T R Y _ O R D E R )
2012-07-04 04:58:12 +01:00
ldr r6 , = ( _ e n d - 1 )
2021-06-03 09:51:21 +01:00
adr_ l r5 , k e r n e l _ s e c _ s t a r t @ _pa(kernel_sec_start)
2021-10-21 10:26:57 +01:00
# if d e f i n e d C O N F I G _ C P U _ E N D I A N _ B E 8 | | d e f i n e d C O N F I G _ C P U _ E N D I A N _ B E 3 2
2021-08-09 12:57:19 +01:00
str r8 , [ r5 , #4 ] @ Save physical start of kernel (BE)
# else
str r8 , [ r5 ] @ Save physical start of kernel (LE)
# endif
2021-06-03 09:51:21 +01:00
orr r3 , r8 , r7 @ Add the MMU flags
2022-07-05 18:47:08 +03:00
add r6 , r4 , r6 , l s r #( S E C T I O N _ S H I F T - P M D _ E N T R Y _ O R D E R )
1 : str r3 , [ r0 ] , #1 < < P M D _ E N T R Y _ O R D E R
2011-08-23 14:07:23 +01:00
add r3 , r3 , #1 < < S E C T I O N _ S H I F T
2012-07-04 04:58:12 +01:00
cmp r0 , r6
2007-02-22 16:18:09 +01:00
bls 1 b
2021-06-03 09:51:21 +01:00
eor r3 , r3 , r7 @ Remove the MMU flags
adr_ l r5 , k e r n e l _ s e c _ e n d @ _pa(kernel_sec_end)
2021-10-21 10:26:57 +01:00
# if d e f i n e d C O N F I G _ C P U _ E N D I A N _ B E 8 | | d e f i n e d C O N F I G _ C P U _ E N D I A N _ B E 3 2
2021-08-09 12:57:19 +01:00
str r3 , [ r5 , #4 ] @ Save physical end of kernel (BE)
# else
str r3 , [ r5 ] @ Save physical end of kernel (LE)
# endif
2005-04-16 15:20:36 -07:00
2007-02-21 15:32:28 +01:00
# ifdef C O N F I G _ X I P _ K E R N E L
/ *
2012-07-04 04:58:12 +01:00
* Map t h e k e r n e l i m a g e s e p a r a t e l y a s i t i s n o t l o c a t e d i n R A M .
2007-02-21 15:32:28 +01:00
* /
2012-07-04 04:58:12 +01:00
# define X I P _ S T A R T X I P _ V I R T _ A D D R ( C O N F I G _ X I P _ P H Y S _ A D D R )
mov r3 , p c
mov r3 , r3 , l s r #S E C T I O N _ S H I F T
orr r3 , r7 , r3 , l s l #S E C T I O N _ S H I F T
2022-07-05 18:47:08 +03:00
add r0 , r4 , #( X I P _ S T A R T & 0xff000000 ) > > ( S E C T I O N _ S H I F T - P M D _ E N T R Y _ O R D E R )
str r3 , [ r0 , #( ( X I P _ S T A R T & 0x00f00000 ) > > S E C T I O N _ S H I F T ) < < P M D _ E N T R Y _ O R D E R ] !
2012-07-04 04:58:12 +01:00
ldr r6 , = ( _ e d a t a _ l o c - 1 )
2022-07-05 18:47:08 +03:00
add r0 , r0 , #1 < < P M D _ E N T R Y _ O R D E R
add r6 , r4 , r6 , l s r #( S E C T I O N _ S H I F T - P M D _ E N T R Y _ O R D E R )
2007-02-21 15:32:28 +01:00
1 : cmp r0 , r6
2012-07-04 04:58:12 +01:00
add r3 , r3 , #1 < < S E C T I O N _ S H I F T
2022-07-05 18:47:08 +03:00
strls r3 , [ r0 ] , #1 < < P M D _ E N T R Y _ O R D E R
2007-02-21 15:32:28 +01:00
bls 1 b
# endif
2005-04-16 15:20:36 -07:00
/ *
2012-07-04 04:58:12 +01:00
* Then m a p b o o t p a r a m s a d d r e s s i n r2 i f s p e c i f i e d .
2013-01-15 18:51:32 +01:00
* We m a p 2 s e c t i o n s i n c a s e t h e A T A G s / D T B c r o s s e s a s e c t i o n b o u n d a r y .
2005-04-16 15:20:36 -07:00
* /
2011-08-23 14:07:23 +01:00
mov r0 , r2 , l s r #S E C T I O N _ S H I F T
2020-11-17 08:41:01 +01:00
cmp r2 , #0
2022-07-05 18:47:08 +03:00
ldrne r3 , =FDT_FIXED_BASE > > ( S E C T I O N _ S H I F T - P M D _ E N T R Y _ O R D E R )
ARM: 9012/1: move device tree mapping out of linear region
On ARM, setting up the linear region is tricky, given the constraints
around placement and alignment of the memblocks, and how the kernel
itself as well as the DT are placed in physical memory.
Let's simplify matters a bit, by moving the device tree mapping to the
top of the address space, right between the end of the vmalloc region
and the start of the the fixmap region, and create a read-only mapping
for it that is independent of the size of the linear region, and how it
is organized.
Since this region was formerly used as a guard region, which will now be
populated fully on LPAE builds by this read-only mapping (which will
still be able to function as a guard region for stray writes), bump the
start of the [underutilized] fixmap region by 512 KB as well, to ensure
that there is always a proper guard region here. Doing so still leaves
ample room for the fixmap space, even with NR_CPUS set to its maximum
value of 32.
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Nicolas Pitre <nico@fluxnic.net>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
2020-10-11 10:21:37 +01:00
addne r3 , r3 , r4
2020-11-17 08:41:01 +01:00
orrne r6 , r7 , r0 , l s l #S E C T I O N _ S H I F T
2022-07-05 18:47:08 +03:00
strne r6 , [ r3 ] , #1 < < P M D _ E N T R Y _ O R D E R
2013-01-15 18:51:32 +01:00
addne r6 , r6 , #1 < < S E C T I O N _ S H I F T
2012-07-04 04:58:12 +01:00
strne r6 , [ r3 ]
2005-04-16 15:20:36 -07:00
2013-04-03 12:24:45 +01:00
# if d e f i n e d ( C O N F I G _ A R M _ L P A E ) & & d e f i n e d ( C O N F I G _ C P U _ E N D I A N _ B E 8 )
2013-02-28 17:46:16 +01:00
sub r4 , r4 , #4 @ Fixup page table pointer
@ for 64-bit descriptors
# endif
2005-07-01 11:56:55 +01:00
# ifdef C O N F I G _ D E B U G _ L L
2012-02-22 21:58:03 +01:00
# if ! d e f i n e d ( C O N F I G _ D E B U G _ I C E D C C ) & & ! d e f i n e d ( C O N F I G _ D E B U G _ S E M I H O S T I N G )
2005-04-16 15:20:36 -07:00
/ *
* Map i n I O s p a c e f o r s e r i a l d e b u g g i n g .
* This a l l o w s d e b u g m e s s a g e s t o b e o u t p u t
* via a s e r i a l c o n s o l e b e f o r e p a g i n g _ i n i t .
* /
2011-08-31 22:55:46 -04:00
addruart r7 , r3 , r0
2010-07-07 11:19:48 +08:00
2011-08-23 14:07:23 +01:00
mov r3 , r3 , l s r #S E C T I O N _ S H I F T
2022-07-05 18:47:08 +03:00
mov r3 , r3 , l s l #P M D _ E N T R Y _ O R D E R
2010-07-07 11:19:48 +08:00
2005-04-16 15:20:36 -07:00
add r0 , r4 , r3
2011-08-23 14:07:23 +01:00
mov r3 , r7 , l s r #S E C T I O N _ S H I F T
2010-07-07 11:19:48 +08:00
ldr r7 , [ r10 , #P R O C I N F O _ I O _ M M U F L A G S ] @ i o _ m m u f l a g s
2011-08-23 14:07:23 +01:00
orr r3 , r7 , r3 , l s l #S E C T I O N _ S H I F T
2011-11-22 17:30:29 +00:00
# ifdef C O N F I G _ A R M _ L P A E
mov r7 , #1 < < ( 5 4 - 3 2 ) @ XN
2013-02-28 17:46:16 +01:00
# ifdef C O N F I G _ C P U _ E N D I A N _ B E 8
str r7 , [ r0 ] , #4
str r3 , [ r0 ] , #4
2011-11-22 17:30:29 +00:00
# else
2012-03-18 20:29:42 +01:00
str r3 , [ r0 ] , #4
2011-11-22 17:30:29 +00:00
str r7 , [ r0 ] , #4
# endif
2013-02-28 17:46:16 +01:00
# else
orr r3 , r3 , #P M D _ S E C T _ X N
str r3 , [ r0 ] , #4
# endif
2010-07-07 11:19:48 +08:00
2012-02-22 21:58:03 +01:00
# else / * C O N F I G _ D E B U G _ I C E D C C | | C O N F I G _ D E B U G _ S E M I H O S T I N G * /
/* we don't need any serial debugging mappings */
2010-07-07 11:19:48 +08:00
ldr r7 , [ r10 , #P R O C I N F O _ I O _ M M U F L A G S ] @ i o _ m m u f l a g s
2012-02-22 21:58:03 +01:00
# endif
2010-07-07 11:19:48 +08:00
2022-09-29 15:39:13 +02:00
# if d e f i n e d ( C O N F I G _ A R C H _ N E T W I N D E R )
2005-04-16 15:20:36 -07:00
/ *
2005-11-25 15:43:22 +00:00
* If w e ' r e u s i n g t h e N e t W i n d e r o r C A T S , w e a l s o n e e d t o m a p
* in t h e 1 6 5 5 0 - t y p e s e r i a l p o r t f o r t h e d e b u g m e s s a g e s
2005-04-16 15:20:36 -07:00
* /
2022-07-05 18:47:08 +03:00
add r0 , r4 , #0xff000000 > > ( S E C T I O N _ S H I F T - P M D _ E N T R Y _ O R D E R )
2005-07-01 11:56:55 +01:00
orr r3 , r7 , #0x7c000000
str r3 , [ r0 ]
2005-04-16 15:20:36 -07:00
# endif
# ifdef C O N F I G _ A R C H _ R P C
/ *
* Map i n s c r e e n a t 0 x02 0 0 0 0 0 0 & S C R E E N 2 _ B A S E
* Similar r e a s o n s h e r e - f o r d e b u g . T h i s i s
* only f o r A c o r n R i s c P C a r c h i t e c t u r e s .
* /
2022-07-05 18:47:08 +03:00
add r0 , r4 , #0x02000000 > > ( S E C T I O N _ S H I F T - P M D _ E N T R Y _ O R D E R )
2005-07-01 11:56:55 +01:00
orr r3 , r7 , #0x02000000
2005-04-16 15:20:36 -07:00
str r3 , [ r0 ]
2022-07-05 18:47:08 +03:00
add r0 , r4 , #0xd8000000 > > ( S E C T I O N _ S H I F T - P M D _ E N T R Y _ O R D E R )
2005-04-16 15:20:36 -07:00
str r3 , [ r0 ]
2005-07-01 11:56:55 +01:00
# endif
2011-11-22 17:30:29 +00:00
# endif
# ifdef C O N F I G _ A R M _ L P A E
sub r4 , r4 , #0x1000 @ point to the PGD table
2005-04-16 15:20:36 -07:00
# endif
2014-06-30 16:29:12 +01:00
ret l r
2008-08-28 11:22:32 +01:00
ENDPROC( _ _ c r e a t e _ p a g e _ t a b l e s )
2005-04-16 15:20:36 -07:00
.ltorg
2010-10-04 17:56:13 +01:00
# if d e f i n e d ( C O N F I G _ S M P )
2013-07-31 11:37:17 +01:00
.text
2015-01-31 00:25:30 +01:00
.arm
2015-05-18 09:04:31 +01:00
ENTRY( s e c o n d a r y _ s t a r t u p _ a r m )
2015-04-21 14:17:25 +01:00
THUMB( b a d r r9 , 1 f ) @ Kernel is entered in ARM.
2015-01-31 00:25:30 +01:00
THUMB( b x r9 ) @ If this is a Thumb-2 kernel,
THUMB( . t h u m b ) @ switch to Thumb now.
THUMB( 1 : )
2010-10-04 17:56:13 +01:00
ENTRY( s e c o n d a r y _ s t a r t u p )
/ *
* Common e n t r y p o i n t f o r s e c o n d a r y C P U s .
*
* Ensure t h a t w e ' r e i n S V C m o d e , a n d I R Q s a r e d i s a b l e d . L o o k u p
* the p r o c e s s o r t y p e - t h e r e i s n o n e e d t o c h e c k t h e m a c h i n e t y p e
* as i t h a s a l r e a d y b e e n v a l i d a t e d b y t h e p r i m a r y p r o c e s s o r .
* /
2013-02-01 09:40:42 +00:00
ARM_ B E 8 ( s e t e n d b e ) @ ensure we are in BE8 mode
2012-02-09 08:47:17 -08:00
# ifdef C O N F I G _ A R M _ V I R T _ E X T
2013-01-04 17:44:14 +00:00
bl _ _ h y p _ s t u b _ i n s t a l l _ s e c o n d a r y
2012-02-09 08:47:17 -08:00
# endif
safe_ s v c m o d e _ m a s k a l l r9
2010-10-04 17:56:13 +01:00
mrc p15 , 0 , r9 , c0 , c0 @ get processor id
bl _ _ l o o k u p _ p r o c e s s o r _ t y p e
movs r10 , r5 @ invalid processor?
moveq r0 , #' p ' @ yes, error 'p'
2010-11-29 19:43:28 +01:00
THUMB( i t e q ) @ force fixup-able long branch encoding
2010-10-04 17:56:13 +01:00
beq _ _ e r r o r _ p
/ *
* Use t h e p a g e t a b l e s s u p p l i e d f r o m _ _ c p u _ u p .
* /
2020-09-14 11:25:23 +03:00
adr_ l r3 , s e c o n d a r y _ d a t a
mov_ l r12 , _ _ s e c o n d a r y _ s w i t c h e d
2018-11-09 04:26:39 +01:00
ldrd r4 , r5 , [ r3 , #0 ] @ get secondary_data.pgdir
2015-08-06 15:07:04 +01:00
ARM_ B E 8 ( e o r r4 , r4 , r5 ) @ Swap r5 and r4 in BE:
ARM_ B E 8 ( e o r r5 , r4 , r5 ) @ it can be done in 3 steps
ARM_ B E 8 ( e o r r4 , r4 , r5 ) @ without using a temp reg.
2015-04-04 20:09:46 +01:00
ldr r8 , [ r3 , #8 ] @ get secondary_data.swapper_pg_dir
2015-04-21 14:17:25 +01:00
badr l r , _ _ e n a b l e _ m m u @ return address
2010-10-04 17:56:13 +01:00
mov r13 , r12 @ __secondary_switched address
2015-03-18 07:29:32 +01:00
ldr r12 , [ r10 , #P R O C I N F O _ I N I T F U N C ]
add r12 , r12 , r10 @ initialise processor
@ (return control reg)
ret r12
2010-10-04 17:56:13 +01:00
ENDPROC( s e c o n d a r y _ s t a r t u p )
2015-01-31 00:25:30 +01:00
ENDPROC( s e c o n d a r y _ s t a r t u p _ a r m )
2010-10-04 17:56:13 +01:00
ENTRY( _ _ s e c o n d a r y _ s w i t c h e d )
2022-01-24 19:51:58 +01:00
# if d e f i n e d ( C O N F I G _ V M A P _ S T A C K ) & & ! d e f i n e d ( C O N F I G _ A R M _ L P A E )
@ Before using the vmap'ed stack, we have to switch to swapper_pg_dir
@ as the ID map does not cover the vmalloc region.
mrc p15 , 0 , i p , c2 , c0 , 1 @ read TTBR1
mcr p15 , 0 , i p , c2 , c0 , 0 @ set TTBR0
instr_ s y n c
# endif
2021-09-18 10:44:35 +02:00
adr_ l r7 , s e c o n d a r y _ d a t a + 1 2 @ get secondary_data.stack
ldr s p , [ r7 ]
ldr r0 , [ r7 , #4 ] @ get secondary_data.task
2010-10-04 17:56:13 +01:00
mov f p , #0
b s e c o n d a r y _ s t a r t _ k e r n e l
ENDPROC( _ _ s e c o n d a r y _ s w i t c h e d )
# endif / * d e f i n e d ( C O N F I G _ S M P ) * /
/ *
* Setup c o m m o n b i t s b e f o r e f i n a l l y e n a b l i n g t h e M M U . E s s e n t i a l l y
* this i s j u s t l o a d i n g t h e p a g e t a b l e p o i n t e r a n d d o m a i n a c c e s s
2015-04-04 20:09:46 +01:00
* registers. A l l t h e s e r e g i s t e r s n e e d t o b e p r e s e r v e d b y t h e
* processor s e t u p f u n c t i o n ( o r s e t i n t h e c a s e o f r0 )
2010-10-04 18:02:59 +01:00
*
* r0 = c p #15 c o n t r o l r e g i s t e r
* r1 = m a c h i n e I D
2011-04-28 14:27:20 -06:00
* r2 = a t a g s o r d t b p o i n t e r
2015-04-04 20:09:46 +01:00
* r4 = T T B R p o i n t e r ( l o w w o r d )
* r5 = T T B R p o i n t e r ( h i g h w o r d i f L P A E )
2010-10-04 18:02:59 +01:00
* r9 = p r o c e s s o r I D
* r1 3 = * v i r t u a l * a d d r e s s t o j u m p t o u p o n c o m p l e t i o n
2010-10-04 17:56:13 +01:00
* /
__enable_mmu :
2011-11-07 18:05:53 +01:00
# if d e f i n e d ( C O N F I G _ A L I G N M E N T _ T R A P ) & & _ _ L I N U X _ A R M _ A R C H _ _ < 6
2010-10-04 17:56:13 +01:00
orr r0 , r0 , #C R _ A
# else
bic r0 , r0 , #C R _ A
# endif
# ifdef C O N F I G _ C P U _ D C A C H E _ D I S A B L E
bic r0 , r0 , #C R _ C
# endif
# ifdef C O N F I G _ C P U _ B P R E D I C T _ D I S A B L E
bic r0 , r0 , #C R _ Z
# endif
# ifdef C O N F I G _ C P U _ I C A C H E _ D I S A B L E
bic r0 , r0 , #C R _ I
# endif
2015-04-04 20:09:46 +01:00
# ifdef C O N F I G _ A R M _ L P A E
mcrr p15 , 0 , r4 , r5 , c2 @ load TTBR0
# else
2015-08-21 09:23:26 +01:00
mov r5 , #D A C R _ I N I T
2010-10-04 17:56:13 +01:00
mcr p15 , 0 , r5 , c3 , c0 , 0 @ load domain access register
mcr p15 , 0 , r4 , c2 , c0 , 0 @ load page table pointer
2011-11-22 17:30:29 +00:00
# endif
2010-10-04 17:56:13 +01:00
b _ _ t u r n _ m m u _ o n
ENDPROC( _ _ e n a b l e _ m m u )
/ *
* Enable t h e M M U . T h i s c o m p l e t e l y c h a n g e s t h e s t r u c t u r e o f t h e v i s i b l e
* memory s p a c e . Y o u w i l l n o t b e a b l e t o t r a c e e x e c u t i o n t h r o u g h t h i s .
* If y o u h a v e a n e n q u i r y a b o u t t h i s , * p l e a s e * c h e c k t h e l i n u x - a r m - k e r n e l
* mailing l i s t a r c h i v e s B E F O R E s e n d i n g a n o t h e r p o s t t o t h e l i s t .
*
* r0 = c p #15 c o n t r o l r e g i s t e r
2010-10-04 18:02:59 +01:00
* r1 = m a c h i n e I D
2011-04-28 14:27:20 -06:00
* r2 = a t a g s o r d t b p o i n t e r
2010-10-04 18:02:59 +01:00
* r9 = p r o c e s s o r I D
2010-10-04 17:56:13 +01:00
* r1 3 = * v i r t u a l * a d d r e s s t o j u m p t o u p o n c o m p l e t i o n
*
* other r e g i s t e r s d e p e n d o n t h e f u n c t i o n c a l l e d u p o n c o m p l e t i o n
* /
.align 5
2011-11-23 12:26:25 +00:00
.pushsection .idmap .text , " ax"
ENTRY( _ _ t u r n _ m m u _ o n )
2010-10-04 17:56:13 +01:00
mov r0 , r0
2011-11-22 17:30:28 +00:00
instr_ s y n c
2010-10-04 17:56:13 +01:00
mcr p15 , 0 , r0 , c1 , c0 , 0 @ write control reg
mrc p15 , 0 , r3 , c0 , c0 , 0 @ read id reg
2011-11-22 17:30:28 +00:00
instr_ s y n c
2010-10-04 17:56:13 +01:00
mov r3 , r3
mov r3 , r13
2014-06-30 16:29:12 +01:00
ret r3
2011-11-23 12:03:27 +00:00
__turn_mmu_on_end :
2010-10-04 17:56:13 +01:00
ENDPROC( _ _ t u r n _ m m u _ o n )
2011-11-23 12:26:25 +00:00
.popsection
2010-10-04 17:56:13 +01:00
2005-04-16 15:20:36 -07:00
2010-09-04 10:47:48 +01:00
# ifdef C O N F I G _ S M P _ O N _ U P
2014-04-16 15:38:26 +01:00
_ _ HEAD
2010-09-04 10:47:48 +01:00
__fixup_smp :
2011-01-30 16:40:20 +00:00
and r3 , r9 , #0x000f0000 @ architecture version
teq r3 , #0x000f0000 @ CPU ID supported?
2010-09-04 10:47:48 +01:00
bne _ _ f i x u p _ s m p _ o n _ u p @ no, assume UP
2011-01-30 16:40:20 +00:00
bic r3 , r9 , #0x00ff0000
bic r3 , r3 , #0x0000000f @ mask 0xff00fff0
mov r4 , #0x41000000
2010-11-22 12:06:28 +00:00
orr r4 , r4 , #0x0000b000
2011-01-30 16:40:20 +00:00
orr r4 , r4 , #0x00000020 @ val 0x4100b020
teq r3 , r4 @ ARM 11MPCore?
2014-06-30 16:29:12 +01:00
reteq l r @ yes, assume SMP
2010-09-04 10:47:48 +01:00
mrc p15 , 0 , r0 , c0 , c0 , 5 @ read MPIDR
2011-01-30 16:40:20 +00:00
and r0 , r0 , #0xc0000000 @ multiprocessing extensions and
teq r0 , #0x80000000 @ not part of a uniprocessor system?
2013-09-27 21:56:31 +01:00
bne _ _ f i x u p _ s m p _ o n _ u p @ no, assume UP
@ Core indicates it is SMP. Check for Aegis SOC where a single
@ Cortex-A9 CPU is present but SMP operations fault.
mov r4 , #0x41000000
orr r4 , r4 , #0x0000c000
orr r4 , r4 , #0x00000090
teq r3 , r4 @ Check for ARM Cortex-A9
2014-06-30 16:29:12 +01:00
retne l r @ Not ARM Cortex-A9,
2013-09-27 21:56:31 +01:00
@ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
@ below address check will need to be #ifdef'd or equivalent
@ for the Aegis platform.
mrc p15 , 4 , r0 , c15 , c0 @ get SCU base address
teq r0 , #0x0 @ '0' on actual UP A9 hardware
beq _ _ f i x u p _ s m p _ o n _ u p @ So its an A9 UP
ldr r0 , [ r0 , #4 ] @ read SCU Config
2013-11-07 08:42:40 +01:00
ARM_ B E 8 ( r e v r0 , r0 ) @ byteswap if big endian
2013-09-27 21:56:31 +01:00
and r0 , r0 , #0x3 @ number of CPUs
teq r0 , #0x0 @ is 1?
2014-06-30 16:29:12 +01:00
retne l r
2010-09-04 10:47:48 +01:00
__fixup_smp_on_up :
2020-09-14 11:25:29 +03:00
adr_ l r4 , _ _ s m p a l t _ b e g i n
adr_ l r5 , _ _ s m p a l t _ e n d
2011-02-10 15:25:18 +00:00
b _ _ d o _ f i x u p _ s m p _ o n _ u p
2010-09-04 10:47:48 +01:00
ENDPROC( _ _ f i x u p _ s m p )
.pushsection .data
2017-07-26 12:49:31 +01:00
.align 2
2010-09-04 10:47:48 +01:00
.globl smp_on_up
smp_on_up :
ALT_ S M P ( . l o n g 1 )
ALT_ U P ( . l o n g 0 )
.popsection
2011-02-10 15:25:18 +00:00
# endif
2010-09-04 10:47:48 +01:00
2011-02-10 15:25:18 +00:00
.text
__do_fixup_smp_on_up :
cmp r4 , r5
2014-06-30 16:29:12 +01:00
reths l r
2020-09-14 11:48:20 +03:00
ldmia r4 , { r0 , r6 }
ARM( s t r r6 , [ r0 , r4 ] )
THUMB( a d d r0 , r0 , r4 )
add r4 , r4 , #8
2011-02-10 15:25:18 +00:00
# ifdef _ _ A R M E B _ _
THUMB( m o v r6 , r6 , r o r #16 ) @ Convert word order for big-endian.
2010-09-04 10:47:48 +01:00
# endif
2011-02-10 15:25:18 +00:00
THUMB( s t r h r6 , [ r0 ] , #2 ) @ For Thumb-2, store as two halfwords
2020-09-14 11:48:20 +03:00
THUMB( m o v r6 , r6 , l s r #16 ) @ to be robust against misaligned r0.
2011-02-10 15:25:18 +00:00
THUMB( s t r h r6 , [ r0 ] )
b _ _ d o _ f i x u p _ s m p _ o n _ u p
ENDPROC( _ _ d o _ f i x u p _ s m p _ o n _ u p )
ENTRY( f i x u p _ s m p )
stmfd s p ! , { r4 - r6 , l r }
mov r4 , r0
add r5 , r0 , r1
bl _ _ d o _ f i x u p _ s m p _ o n _ u p
ldmfd s p ! , { r4 - r6 , p c }
ENDPROC( f i x u p _ s m p )
2010-09-04 10:47:48 +01:00
2006-03-27 14:58:25 +01:00
# include " h e a d - c o m m o n . S "