2019-06-04 10:11:33 +02:00
/* SPDX-License-Identifier: GPL-2.0-only */
2005-04-16 15:20:36 -07:00
/ *
* linux/ a r c h / a r m / k e r n e l / h e a d . S
*
* Copyright ( C ) 1 9 9 4 - 2 0 0 2 R u s s e l l K i n g
2005-06-18 09:33:31 +01:00
* Copyright ( c ) 2 0 0 3 A R M L i m i t e d
* All R i g h t s R e s e r v e d
2005-04-16 15:20:36 -07:00
*
* Kernel s t a r t u p c o d e f o r a l l 3 2 - b i t C P U s
* /
# include < l i n u x / l i n k a g e . h >
# include < l i n u x / i n i t . h >
2020-06-08 21:32:42 -07:00
# include < l i n u x / p g t a b l e . h >
2005-04-16 15:20:36 -07:00
# include < a s m / a s s e m b l e r . h >
2012-01-19 10:05:41 +00:00
# include < a s m / c p15 . h >
2005-04-16 15:20:36 -07:00
# include < a s m / d o m a i n . h >
# include < a s m / p t r a c e . h >
2005-09-09 21:08:59 +02:00
# include < a s m / a s m - o f f s e t s . h >
2005-10-29 21:44:55 +01:00
# include < a s m / m e m o r y . h >
2005-05-05 13:11:00 +01:00
# include < a s m / t h r e a d _ i n f o . h >
2005-04-16 15:20:36 -07:00
2012-08-31 00:03:46 -05:00
# if d e f i n e d ( C O N F I G _ D E B U G _ L L ) & & ! d e f i n e d ( C O N F I G _ D E B U G _ S E M I H O S T I N G )
# include C O N F I G _ D E B U G _ L L _ I N C L U D E
2010-07-07 11:19:48 +08:00
# endif
2005-04-16 15:20:36 -07:00
/ *
2005-10-29 21:44:56 +01:00
* swapper_ p g _ d i r i s t h e v i r t u a l a d d r e s s o f t h e i n i t i a l p a g e t a b l e .
2006-12-11 22:29:16 +00:00
* We p l a c e t h e p a g e t a b l e s 1 6 K b e l o w K E R N E L _ R A M _ V A D D R . T h e r e f o r e , w e m u s t
* make s u r e t h a t K E R N E L _ R A M _ V A D D R i s c o r r e c t l y s e t . C u r r e n t l y , w e e x p e c t
2005-10-29 21:44:56 +01:00
* the l e a s t s i g n i f i c a n t 1 6 b i t s t o b e 0 x80 0 0 , b u t w e c o u l d p r o b a b l y
2006-12-11 22:29:16 +00:00
* relax t h i s r e s t r i c t i o n t o K E R N E L _ R A M _ V A D D R > = P A G E _ O F F S E T + 0 x40 0 0 .
2005-04-16 15:20:36 -07:00
* /
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
# define K E R N E L _ R A M _ V A D D R ( P A G E _ O F F S E T + T E X T _ O F F S E T )
2006-12-11 22:29:16 +00:00
# if ( K E R N E L _ R A M _ V A D D R & 0 x f f f f ) ! = 0 x80 0 0
# error K E R N E L _ R A M _ V A D D R m u s t s t a r t a t 0 x X X X X 8 0 0 0
2005-04-16 15:20:36 -07:00
# endif
2011-11-22 17:30:29 +00:00
# ifdef C O N F I G _ A R M _ L P A E
/* LPAE requires an additional page for the PGD */
# define P G _ D I R _ S I Z E 0 x50 0 0
# define P M D _ O R D E R 3
# else
2011-08-23 14:07:23 +01:00
# define P G _ D I R _ S I Z E 0 x40 0 0
# define P M D _ O R D E R 2
2011-11-22 17:30:29 +00:00
# endif
2011-08-23 14:07:23 +01:00
2005-04-16 15:20:36 -07:00
.globl swapper_pg_dir
2011-08-23 14:07:23 +01:00
.equ swapper_ p g _ d i r , K E R N E L _ R A M _ V A D D R - P G _ D I R _ S I Z E
2005-04-16 15:20:36 -07:00
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
.macro pgtbl, r d , p h y s
2014-01-21 16:25:34 +01:00
add \ r d , \ p h y s , #T E X T _ O F F S E T
sub \ r d , \ r d , #P G _ D I R _ S I Z E
2005-04-16 15:20:36 -07:00
.endm
/ *
* Kernel s t a r t u p e n t r y p o i n t .
* - - - - - - - - - - - - - - - - - - - - - - - - - - -
*
* This i s n o r m a l l y c a l l e d f r o m t h e d e c o m p r e s s o r c o d e . T h e r e q u i r e m e n t s
* are : MMU = o f f , D - c a c h e = o f f , I - c a c h e = d o n t c a r e , r0 = 0 ,
2011-04-28 14:27:20 -06:00
* r1 = m a c h i n e n r , r2 = a t a g s o r d t b p o i n t e r .
2005-04-16 15:20:36 -07:00
*
* This c o d e i s m o s t l y p o s i t i o n i n d e p e n d e n t , s o i f y o u l i n k t h e k e r n e l a t
* 0 xc0 0 0 8 0 0 0 , y o u c a l l t h i s a t _ _ p a ( 0 x c00 0 8 0 0 0 ) .
*
* See l i n u x / a r c h / a r m / t o o l s / m a c h - t y p e s f o r t h e c o m p l e t e l i s t o f m a c h i n e
* numbers f o r r1 .
*
* We' r e t r y i n g t o k e e p c r a p t o a m i n i m u m ; DO NOT add any machine specific
* crap h e r e - t h a t ' s w h a t t h e b o o t l o a d e r ( o r i n e x t r e m e , w e l l j u s t i f i e d
* circumstances, z I m a g e ) i s f o r .
* /
2011-07-13 15:53:30 +01:00
.arm
2009-10-02 16:32:46 -04:00
_ _ HEAD
2005-04-16 15:20:36 -07:00
ENTRY( s t e x t )
2013-02-01 09:40:42 +00:00
ARM_ B E 8 ( s e t e n d b e ) @ ensure we are in BE8 mode
2011-07-13 15:53:30 +01:00
2015-04-21 14:17:25 +01:00
THUMB( b a d r r9 , 1 f ) @ Kernel is always entered in ARM.
2011-07-13 15:53:30 +01:00
THUMB( b x r9 ) @ If this is a Thumb-2 kernel,
THUMB( . t h u m b ) @ switch to Thumb now.
THUMB( 1 : )
2012-02-09 08:47:17 -08:00
# ifdef C O N F I G _ A R M _ V I R T _ E X T
bl _ _ h y p _ s t u b _ i n s t a l l
# endif
@ ensure svc mode and all interrupts masked
safe_ s v c m o d e _ m a s k a l l r9
2006-02-24 21:04:56 +00:00
mrc p15 , 0 , r9 , c0 , c0 @ get processor id
2005-04-16 15:20:36 -07:00
bl _ _ l o o k u p _ p r o c e s s o r _ t y p e @ r5=procinfo r9=cpuid
movs r10 , r5 @ invalid processor (r5=0)?
2010-11-29 19:43:28 +01:00
THUMB( i t e q ) @ force fixup-able long branch encoding
2005-11-25 15:43:22 +00:00
beq _ _ e r r o r _ p @ yes, error 'p'
2010-11-22 12:06:28 +00:00
2012-01-09 12:24:47 +01:00
# ifdef C O N F I G _ A R M _ L P A E
mrc p15 , 0 , r3 , c0 , c1 , 4 @ read ID_MMFR0
and r3 , r3 , #0xf @ extract VMSA support
cmp r3 , #5 @ long-descriptor translation table format?
THUMB( i t l o ) @ force fixup-able long branch encoding
ARM: 7980/1: kernel: improve error message when LPAE config doesn't match CPU
Currently, when the kernel is configured with LPAE support, but the
CPU doesn't support it, the error message is fairly cryptic:
Error: unrecognized/unsupported processor variant (0x561f5811).
This messages is normally shown when there is an issue when comparing
the processor ID (CP15 0, c0, c0) with the values/masks described in
proc-v7.S. However, the same message is displayed when LPAE support is
enabled in the kernel configuration, but not available in the CPU,
after looking at ID_MMFR0 (CP15 0, c0, c1, 4). Having the same error
message is highly misleading.
This commit improves this by showing a different error message when
this situation occurs:
Error: Kernel with LPAE support, but CPU does not support LPAE.
Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-02-18 17:02:54 +01:00
blo _ _ e r r o r _ l p a e @ only classic page table format
2012-01-09 12:24:47 +01:00
# endif
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
# ifndef C O N F I G _ X I P _ K E R N E L
adr r3 , 2 f
ldmia r3 , { r4 , r8 }
sub r4 , r3 , r4 @ (PHYS_OFFSET - PAGE_OFFSET)
add r8 , r8 , r4 @ PHYS_OFFSET
# else
2013-12-10 19:21:08 +00:00
ldr r8 , =PLAT_PHYS_OFFSET @ always constant in this case
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
# endif
2010-11-22 12:06:28 +00:00
/ *
2011-04-28 14:27:20 -06:00
* r1 = m a c h i n e n o , r2 = a t a g s o r d t b ,
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
* r8 = p h y s _ o f f s e t , r9 = c p u i d , r10 = p r o c i n f o
2010-11-22 12:06:28 +00:00
* /
2007-05-31 22:02:22 +01:00
bl _ _ v e t _ a t a g s
2010-09-04 10:47:48 +01:00
# ifdef C O N F I G _ S M P _ O N _ U P
bl _ _ f i x u p _ s m p
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
# endif
# ifdef C O N F I G _ A R M _ P A T C H _ P H Y S _ V I R T
bl _ _ f i x u p _ p v _ t a b l e
2010-09-04 10:47:48 +01:00
# endif
2005-04-16 15:20:36 -07:00
bl _ _ c r e a t e _ p a g e _ t a b l e s
/ *
* The f o l l o w i n g c a l l s C P U s p e c i f i c c o d e i n a p o s i t i o n i n d e p e n d e n t
* manner. S e e a r c h / a r m / m m / p r o c - * . S f o r d e t a i l s . r10 = b a s e o f
2011-01-12 17:50:42 +00:00
* xxx_ p r o c _ i n f o s t r u c t u r e s e l e c t e d b y _ _ l o o k u p _ p r o c e s s o r _ t y p e
2015-04-04 20:09:46 +01:00
* above.
*
* The p r o c e s s o r i n i t f u n c t i o n w i l l b e c a l l e d w i t h :
* r1 - m a c h i n e t y p e
* r2 - b o o t d a t a ( a t a g s / d t ) p o i n t e r
* r4 - t r a n s l a t i o n t a b l e b a s e ( l o w w o r d )
* r5 - t r a n s l a t i o n t a b l e b a s e ( h i g h w o r d , i f L P A E )
* r8 - t r a n s l a t i o n t a b l e b a s e 1 ( p f n i f L P A E )
* r9 - c p u i d
* r1 3 - v i r t u a l a d d r e s s f o r _ _ e n a b l e _ m m u - > _ _ t u r n _ m m u _ o n
*
* On r e t u r n , t h e C P U w i l l b e r e a d y f o r t h e M M U t o b e t u r n e d o n ,
* r0 w i l l h o l d t h e C P U c o n t r o l r e g i s t e r v a l u e , r1 , r2 , r4 , a n d
* r9 w i l l b e p r e s e r v e d . r5 w i l l a l s o b e p r e s e r v e d i f L P A E .
2005-04-16 15:20:36 -07:00
* /
2010-10-04 16:22:34 +01:00
ldr r13 , =__mmap_switched @ address to jump to after
2005-04-16 15:20:36 -07:00
@ mmu has been enabled
2015-04-21 14:17:25 +01:00
badr l r , 1 f @ return (PIC) address
2015-04-04 20:09:46 +01:00
# ifdef C O N F I G _ A R M _ L P A E
mov r5 , #0 @ high TTBR0
mov r8 , r4 , l s r #12 @ TTBR1 is swapper_pg_dir pfn
# else
2011-05-26 11:22:44 +01:00
mov r8 , r4 @ set TTBR1 to swapper_pg_dir
2015-04-04 20:09:46 +01:00
# endif
2015-03-18 07:29:32 +01:00
ldr r12 , [ r10 , #P R O C I N F O _ I N I T F U N C ]
add r12 , r12 , r10
ret r12
2010-10-04 17:56:13 +01:00
1 : b _ _ e n a b l e _ m m u
2008-08-28 11:22:32 +01:00
ENDPROC( s t e x t )
2010-10-04 16:22:34 +01:00
.ltorg
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
# ifndef C O N F I G _ X I P _ K E R N E L
2 : .long .
.long PAGE_OFFSET
# endif
2005-04-16 15:20:36 -07:00
/ *
* Setup t h e i n i t i a l p a g e t a b l e s . W e o n l y s e t u p t h e b a r e s t
* amount w h i c h a r e r e q u i r e d t o g e t t h e k e r n e l r u n n i n g , w h i c h
* generally m e a n s m a p p i n g i n t h e k e r n e l c o d e .
*
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
* r8 = p h y s _ o f f s e t , r9 = c p u i d , r10 = p r o c i n f o
2005-04-16 15:20:36 -07:00
*
* Returns :
2010-10-04 17:51:54 +01:00
* r0 , r3 , r5 - r7 c o r r u p t e d
2015-04-04 20:09:46 +01:00
* r4 = p h y s i c a l p a g e t a b l e a d d r e s s
2005-04-16 15:20:36 -07:00
* /
__create_page_tables :
ARM: P2V: eliminate head.S use of PHYS_OFFSET for !XIP_KERNEL
head.S makes use of PHYS_OFFSET. When it becomes a variable, the
assembler won't understand this. Compute PHYS_OFFSET by the following
method. This code is linked at its virtual address, but run at before
the MMU is enabled, so at his physical address.
1: .long .
.long PAGE_OFFSET
adr r0, 1b @ r0 = physical ','
ldmia r0, {r1, r2} @ r1 = virtual '.', r2 = PAGE_OFFSET
sub r1, r0, r1 @ r1 = physical-virtual
add r2, r2, r1 @ r2 = PAGE_OFFSET + physical-virtual
@ := PHYS_OFFSET.
Switch XIP users of PHYS_OFFSET to use PLAT_PHYS_OFFSET - we can't
use this method for XIP kernels as the code doesn't execute in RAM.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:04:00 +00:00
pgtbl r4 , r8 @ page table address
2005-04-16 15:20:36 -07:00
/ *
2011-08-23 14:07:23 +01:00
* Clear t h e s w a p p e r p a g e t a b l e
2005-04-16 15:20:36 -07:00
* /
mov r0 , r4
mov r3 , #0
2011-08-23 14:07:23 +01:00
add r6 , r0 , #P G _ D I R _ S I Z E
2005-04-16 15:20:36 -07:00
1 : str r3 , [ r0 ] , #4
str r3 , [ r0 ] , #4
str r3 , [ r0 ] , #4
str r3 , [ r0 ] , #4
teq r0 , r6
bne 1 b
2011-11-22 17:30:29 +00:00
# ifdef C O N F I G _ A R M _ L P A E
/ *
* Build t h e P G D t a b l e ( f i r s t l e v e l ) t o p o i n t t o t h e P M D t a b l e . A P G D
* entry i s 6 4 - b i t w i d e .
* /
mov r0 , r4
add r3 , r4 , #0x1000 @ first PMD table address
orr r3 , r3 , #3 @ PGD block type
mov r6 , #4 @ PTRS_PER_PGD
mov r7 , #1 < < ( 5 5 - 3 2 ) @ L_PGD_SWAPPER
2013-02-28 17:46:16 +01:00
1 :
# ifdef C O N F I G _ C P U _ E N D I A N _ B E 8
2011-11-22 17:30:29 +00:00
str r7 , [ r0 ] , #4 @ set top PGD entry bits
2013-02-28 17:46:16 +01:00
str r3 , [ r0 ] , #4 @ set bottom PGD entry bits
# else
str r3 , [ r0 ] , #4 @ set bottom PGD entry bits
str r7 , [ r0 ] , #4 @ set top PGD entry bits
# endif
2011-11-22 17:30:29 +00:00
add r3 , r3 , #0x1000 @ next PMD table
subs r6 , r6 , #1
bne 1 b
add r4 , r4 , #0x1000 @ point to the PMD tables
2013-02-28 17:46:16 +01:00
# ifdef C O N F I G _ C P U _ E N D I A N _ B E 8
add r4 , r4 , #4 @ we only write the bottom word
# endif
2011-11-22 17:30:29 +00:00
# endif
2006-06-29 18:24:21 +01:00
ldr r7 , [ r10 , #P R O C I N F O _ M M _ M M U F L A G S ] @ m m _ m m u f l a g s
2005-04-16 15:20:36 -07:00
/ *
2010-10-04 17:51:54 +01:00
* Create i d e n t i t y m a p p i n g t o c a t e r f o r _ _ e n a b l e _ m m u .
* This i d e n t i t y m a p p i n g w i l l b e r e m o v e d b y p a g i n g _ i n i t ( ) .
2005-04-16 15:20:36 -07:00
* /
2011-11-23 12:03:27 +00:00
adr r0 , _ _ t u r n _ m m u _ o n _ l o c
2010-10-04 17:51:54 +01:00
ldmia r0 , { r3 , r5 , r6 }
sub r0 , r0 , r3 @ virt->phys offset
2011-11-23 12:03:27 +00:00
add r5 , r5 , r0 @ phys __turn_mmu_on
add r6 , r6 , r0 @ phys __turn_mmu_on_end
2011-08-23 14:07:23 +01:00
mov r5 , r5 , l s r #S E C T I O N _ S H I F T
mov r6 , r6 , l s r #S E C T I O N _ S H I F T
2010-10-04 17:51:54 +01:00
2011-08-23 14:07:23 +01:00
1 : orr r3 , r7 , r5 , l s l #S E C T I O N _ S H I F T @ f l a g s + k e r n e l b a s e
str r3 , [ r4 , r5 , l s l #P M D _ O R D E R ] @ i d e n t i t y m a p p i n g
cmp r5 , r6
addlo r5 , r5 , #1 @ next section
blo 1 b
2005-04-16 15:20:36 -07:00
/ *
2012-07-04 04:58:12 +01:00
* Map o u r R A M f r o m t h e s t a r t t o t h e e n d o f t h e k e r n e l . b s s s e c t i o n .
2005-04-16 15:20:36 -07:00
* /
2012-07-04 04:58:12 +01:00
add r0 , r4 , #P A G E _ O F F S E T > > ( S E C T I O N _ S H I F T - P M D _ O R D E R )
ldr r6 , = ( _ e n d - 1 )
orr r3 , r8 , r7
2011-08-23 14:07:23 +01:00
add r6 , r4 , r6 , l s r #( S E C T I O N _ S H I F T - P M D _ O R D E R )
2012-07-04 04:58:12 +01:00
1 : str r3 , [ r0 ] , #1 < < P M D _ O R D E R
2011-08-23 14:07:23 +01:00
add r3 , r3 , #1 < < S E C T I O N _ S H I F T
2012-07-04 04:58:12 +01:00
cmp r0 , r6
2007-02-22 16:18:09 +01:00
bls 1 b
2005-04-16 15:20:36 -07:00
2007-02-21 15:32:28 +01:00
# ifdef C O N F I G _ X I P _ K E R N E L
/ *
2012-07-04 04:58:12 +01:00
* Map t h e k e r n e l i m a g e s e p a r a t e l y a s i t i s n o t l o c a t e d i n R A M .
2007-02-21 15:32:28 +01:00
* /
2012-07-04 04:58:12 +01:00
# define X I P _ S T A R T X I P _ V I R T _ A D D R ( C O N F I G _ X I P _ P H Y S _ A D D R )
mov r3 , p c
mov r3 , r3 , l s r #S E C T I O N _ S H I F T
orr r3 , r7 , r3 , l s l #S E C T I O N _ S H I F T
add r0 , r4 , #( X I P _ S T A R T & 0xff000000 ) > > ( S E C T I O N _ S H I F T - P M D _ O R D E R )
str r3 , [ r0 , #( ( X I P _ S T A R T & 0x00f00000 ) > > S E C T I O N _ S H I F T ) < < P M D _ O R D E R ] !
ldr r6 , = ( _ e d a t a _ l o c - 1 )
add r0 , r0 , #1 < < P M D _ O R D E R
2011-08-23 14:07:23 +01:00
add r6 , r4 , r6 , l s r #( S E C T I O N _ S H I F T - P M D _ O R D E R )
2007-02-21 15:32:28 +01:00
1 : cmp r0 , r6
2012-07-04 04:58:12 +01:00
add r3 , r3 , #1 < < S E C T I O N _ S H I F T
strls r3 , [ r0 ] , #1 < < P M D _ O R D E R
2007-02-21 15:32:28 +01:00
bls 1 b
# endif
2005-04-16 15:20:36 -07:00
/ *
2012-07-04 04:58:12 +01:00
* Then m a p b o o t p a r a m s a d d r e s s i n r2 i f s p e c i f i e d .
2013-01-15 18:51:32 +01:00
* We m a p 2 s e c t i o n s i n c a s e t h e A T A G s / D T B c r o s s e s a s e c t i o n b o u n d a r y .
2005-04-16 15:20:36 -07:00
* /
2011-08-23 14:07:23 +01:00
mov r0 , r2 , l s r #S E C T I O N _ S H I F T
movs r0 , r0 , l s l #S E C T I O N _ S H I F T
2012-07-04 04:58:12 +01:00
subne r3 , r0 , r8
addne r3 , r3 , #P A G E _ O F F S E T
addne r3 , r4 , r3 , l s r #( S E C T I O N _ S H I F T - P M D _ O R D E R )
orrne r6 , r7 , r0
2013-01-15 18:51:32 +01:00
strne r6 , [ r3 ] , #1 < < P M D _ O R D E R
addne r6 , r6 , #1 < < S E C T I O N _ S H I F T
2012-07-04 04:58:12 +01:00
strne r6 , [ r3 ]
2005-04-16 15:20:36 -07:00
2013-04-03 12:24:45 +01:00
# if d e f i n e d ( C O N F I G _ A R M _ L P A E ) & & d e f i n e d ( C O N F I G _ C P U _ E N D I A N _ B E 8 )
2013-02-28 17:46:16 +01:00
sub r4 , r4 , #4 @ Fixup page table pointer
@ for 64-bit descriptors
# endif
2005-07-01 11:56:55 +01:00
# ifdef C O N F I G _ D E B U G _ L L
2012-02-22 21:58:03 +01:00
# if ! d e f i n e d ( C O N F I G _ D E B U G _ I C E D C C ) & & ! d e f i n e d ( C O N F I G _ D E B U G _ S E M I H O S T I N G )
2005-04-16 15:20:36 -07:00
/ *
* Map i n I O s p a c e f o r s e r i a l d e b u g g i n g .
* This a l l o w s d e b u g m e s s a g e s t o b e o u t p u t
* via a s e r i a l c o n s o l e b e f o r e p a g i n g _ i n i t .
* /
2011-08-31 22:55:46 -04:00
addruart r7 , r3 , r0
2010-07-07 11:19:48 +08:00
2011-08-23 14:07:23 +01:00
mov r3 , r3 , l s r #S E C T I O N _ S H I F T
mov r3 , r3 , l s l #P M D _ O R D E R
2010-07-07 11:19:48 +08:00
2005-04-16 15:20:36 -07:00
add r0 , r4 , r3
2011-08-23 14:07:23 +01:00
mov r3 , r7 , l s r #S E C T I O N _ S H I F T
2010-07-07 11:19:48 +08:00
ldr r7 , [ r10 , #P R O C I N F O _ I O _ M M U F L A G S ] @ i o _ m m u f l a g s
2011-08-23 14:07:23 +01:00
orr r3 , r7 , r3 , l s l #S E C T I O N _ S H I F T
2011-11-22 17:30:29 +00:00
# ifdef C O N F I G _ A R M _ L P A E
mov r7 , #1 < < ( 5 4 - 3 2 ) @ XN
2013-02-28 17:46:16 +01:00
# ifdef C O N F I G _ C P U _ E N D I A N _ B E 8
str r7 , [ r0 ] , #4
str r3 , [ r0 ] , #4
2011-11-22 17:30:29 +00:00
# else
2012-03-18 20:29:42 +01:00
str r3 , [ r0 ] , #4
2011-11-22 17:30:29 +00:00
str r7 , [ r0 ] , #4
# endif
2013-02-28 17:46:16 +01:00
# else
orr r3 , r3 , #P M D _ S E C T _ X N
str r3 , [ r0 ] , #4
# endif
2010-07-07 11:19:48 +08:00
2012-02-22 21:58:03 +01:00
# else / * C O N F I G _ D E B U G _ I C E D C C | | C O N F I G _ D E B U G _ S E M I H O S T I N G * /
/* we don't need any serial debugging mappings */
2010-07-07 11:19:48 +08:00
ldr r7 , [ r10 , #P R O C I N F O _ I O _ M M U F L A G S ] @ i o _ m m u f l a g s
2012-02-22 21:58:03 +01:00
# endif
2010-07-07 11:19:48 +08:00
2005-04-16 15:20:36 -07:00
# if d e f i n e d ( C O N F I G _ A R C H _ N E T W I N D E R ) | | d e f i n e d ( C O N F I G _ A R C H _ C A T S )
/ *
2005-11-25 15:43:22 +00:00
* If w e ' r e u s i n g t h e N e t W i n d e r o r C A T S , w e a l s o n e e d t o m a p
* in t h e 1 6 5 5 0 - t y p e s e r i a l p o r t f o r t h e d e b u g m e s s a g e s
2005-04-16 15:20:36 -07:00
* /
2011-08-23 14:07:23 +01:00
add r0 , r4 , #0xff000000 > > ( S E C T I O N _ S H I F T - P M D _ O R D E R )
2005-07-01 11:56:55 +01:00
orr r3 , r7 , #0x7c000000
str r3 , [ r0 ]
2005-04-16 15:20:36 -07:00
# endif
# ifdef C O N F I G _ A R C H _ R P C
/ *
* Map i n s c r e e n a t 0 x02 0 0 0 0 0 0 & S C R E E N 2 _ B A S E
* Similar r e a s o n s h e r e - f o r d e b u g . T h i s i s
* only f o r A c o r n R i s c P C a r c h i t e c t u r e s .
* /
2011-08-23 14:07:23 +01:00
add r0 , r4 , #0x02000000 > > ( S E C T I O N _ S H I F T - P M D _ O R D E R )
2005-07-01 11:56:55 +01:00
orr r3 , r7 , #0x02000000
2005-04-16 15:20:36 -07:00
str r3 , [ r0 ]
2011-08-23 14:07:23 +01:00
add r0 , r4 , #0xd8000000 > > ( S E C T I O N _ S H I F T - P M D _ O R D E R )
2005-04-16 15:20:36 -07:00
str r3 , [ r0 ]
2005-07-01 11:56:55 +01:00
# endif
2011-11-22 17:30:29 +00:00
# endif
# ifdef C O N F I G _ A R M _ L P A E
sub r4 , r4 , #0x1000 @ point to the PGD table
2005-04-16 15:20:36 -07:00
# endif
2014-06-30 16:29:12 +01:00
ret l r
2008-08-28 11:22:32 +01:00
ENDPROC( _ _ c r e a t e _ p a g e _ t a b l e s )
2005-04-16 15:20:36 -07:00
.ltorg
2010-11-29 19:43:24 +01:00
.align
2011-11-23 12:03:27 +00:00
__turn_mmu_on_loc :
2010-10-04 17:51:54 +01:00
.long .
2011-11-23 12:03:27 +00:00
.long __turn_mmu_on
.long __turn_mmu_on_end
2005-04-16 15:20:36 -07:00
2010-10-04 17:56:13 +01:00
# if d e f i n e d ( C O N F I G _ S M P )
2013-07-31 11:37:17 +01:00
.text
2015-01-31 00:25:30 +01:00
.arm
2015-05-18 09:04:31 +01:00
ENTRY( s e c o n d a r y _ s t a r t u p _ a r m )
2015-04-21 14:17:25 +01:00
THUMB( b a d r r9 , 1 f ) @ Kernel is entered in ARM.
2015-01-31 00:25:30 +01:00
THUMB( b x r9 ) @ If this is a Thumb-2 kernel,
THUMB( . t h u m b ) @ switch to Thumb now.
THUMB( 1 : )
2010-10-04 17:56:13 +01:00
ENTRY( s e c o n d a r y _ s t a r t u p )
/ *
* Common e n t r y p o i n t f o r s e c o n d a r y C P U s .
*
* Ensure t h a t w e ' r e i n S V C m o d e , a n d I R Q s a r e d i s a b l e d . L o o k u p
* the p r o c e s s o r t y p e - t h e r e i s n o n e e d t o c h e c k t h e m a c h i n e t y p e
* as i t h a s a l r e a d y b e e n v a l i d a t e d b y t h e p r i m a r y p r o c e s s o r .
* /
2013-02-01 09:40:42 +00:00
ARM_ B E 8 ( s e t e n d b e ) @ ensure we are in BE8 mode
2012-02-09 08:47:17 -08:00
# ifdef C O N F I G _ A R M _ V I R T _ E X T
2013-01-04 17:44:14 +00:00
bl _ _ h y p _ s t u b _ i n s t a l l _ s e c o n d a r y
2012-02-09 08:47:17 -08:00
# endif
safe_ s v c m o d e _ m a s k a l l r9
2010-10-04 17:56:13 +01:00
mrc p15 , 0 , r9 , c0 , c0 @ get processor id
bl _ _ l o o k u p _ p r o c e s s o r _ t y p e
movs r10 , r5 @ invalid processor?
moveq r0 , #' p ' @ yes, error 'p'
2010-11-29 19:43:28 +01:00
THUMB( i t e q ) @ force fixup-able long branch encoding
2010-10-04 17:56:13 +01:00
beq _ _ e r r o r _ p
/ *
* Use t h e p a g e t a b l e s s u p p l i e d f r o m _ _ c p u _ u p .
* /
adr r4 , _ _ s e c o n d a r y _ d a t a
ldmia r4 , { r5 , r7 , r12 } @ address to jump to after
2011-05-26 11:22:44 +01:00
sub l r , r4 , r5 @ mmu has been enabled
2015-04-04 20:09:46 +01:00
add r3 , r7 , l r
2018-11-09 04:26:39 +01:00
ldrd r4 , r5 , [ r3 , #0 ] @ get secondary_data.pgdir
2015-08-06 15:07:04 +01:00
ARM_ B E 8 ( e o r r4 , r4 , r5 ) @ Swap r5 and r4 in BE:
ARM_ B E 8 ( e o r r5 , r4 , r5 ) @ it can be done in 3 steps
ARM_ B E 8 ( e o r r4 , r4 , r5 ) @ without using a temp reg.
2015-04-04 20:09:46 +01:00
ldr r8 , [ r3 , #8 ] @ get secondary_data.swapper_pg_dir
2015-04-21 14:17:25 +01:00
badr l r , _ _ e n a b l e _ m m u @ return address
2010-10-04 17:56:13 +01:00
mov r13 , r12 @ __secondary_switched address
2015-03-18 07:29:32 +01:00
ldr r12 , [ r10 , #P R O C I N F O _ I N I T F U N C ]
add r12 , r12 , r10 @ initialise processor
@ (return control reg)
ret r12
2010-10-04 17:56:13 +01:00
ENDPROC( s e c o n d a r y _ s t a r t u p )
2015-01-31 00:25:30 +01:00
ENDPROC( s e c o n d a r y _ s t a r t u p _ a r m )
2010-10-04 17:56:13 +01:00
/ *
* r6 = & s e c o n d a r y _ d a t a
* /
ENTRY( _ _ s e c o n d a r y _ s w i t c h e d )
2015-04-04 20:09:46 +01:00
ldr s p , [ r7 , #12 ] @ get secondary_data.stack
2010-10-04 17:56:13 +01:00
mov f p , #0
b s e c o n d a r y _ s t a r t _ k e r n e l
ENDPROC( _ _ s e c o n d a r y _ s w i t c h e d )
2010-11-29 19:43:24 +01:00
.align
2010-10-04 17:56:13 +01:00
.type _ _ secondary_ d a t a , % o b j e c t
__secondary_data :
.long .
.long secondary_data
.long __secondary_switched
# endif / * d e f i n e d ( C O N F I G _ S M P ) * /
/ *
* Setup c o m m o n b i t s b e f o r e f i n a l l y e n a b l i n g t h e M M U . E s s e n t i a l l y
* this i s j u s t l o a d i n g t h e p a g e t a b l e p o i n t e r a n d d o m a i n a c c e s s
2015-04-04 20:09:46 +01:00
* registers. A l l t h e s e r e g i s t e r s n e e d t o b e p r e s e r v e d b y t h e
* processor s e t u p f u n c t i o n ( o r s e t i n t h e c a s e o f r0 )
2010-10-04 18:02:59 +01:00
*
* r0 = c p #15 c o n t r o l r e g i s t e r
* r1 = m a c h i n e I D
2011-04-28 14:27:20 -06:00
* r2 = a t a g s o r d t b p o i n t e r
2015-04-04 20:09:46 +01:00
* r4 = T T B R p o i n t e r ( l o w w o r d )
* r5 = T T B R p o i n t e r ( h i g h w o r d i f L P A E )
2010-10-04 18:02:59 +01:00
* r9 = p r o c e s s o r I D
* r1 3 = * v i r t u a l * a d d r e s s t o j u m p t o u p o n c o m p l e t i o n
2010-10-04 17:56:13 +01:00
* /
__enable_mmu :
2011-11-07 18:05:53 +01:00
# if d e f i n e d ( C O N F I G _ A L I G N M E N T _ T R A P ) & & _ _ L I N U X _ A R M _ A R C H _ _ < 6
2010-10-04 17:56:13 +01:00
orr r0 , r0 , #C R _ A
# else
bic r0 , r0 , #C R _ A
# endif
# ifdef C O N F I G _ C P U _ D C A C H E _ D I S A B L E
bic r0 , r0 , #C R _ C
# endif
# ifdef C O N F I G _ C P U _ B P R E D I C T _ D I S A B L E
bic r0 , r0 , #C R _ Z
# endif
# ifdef C O N F I G _ C P U _ I C A C H E _ D I S A B L E
bic r0 , r0 , #C R _ I
# endif
2015-04-04 20:09:46 +01:00
# ifdef C O N F I G _ A R M _ L P A E
mcrr p15 , 0 , r4 , r5 , c2 @ load TTBR0
# else
2015-08-21 09:23:26 +01:00
mov r5 , #D A C R _ I N I T
2010-10-04 17:56:13 +01:00
mcr p15 , 0 , r5 , c3 , c0 , 0 @ load domain access register
mcr p15 , 0 , r4 , c2 , c0 , 0 @ load page table pointer
2011-11-22 17:30:29 +00:00
# endif
2010-10-04 17:56:13 +01:00
b _ _ t u r n _ m m u _ o n
ENDPROC( _ _ e n a b l e _ m m u )
/ *
* Enable t h e M M U . T h i s c o m p l e t e l y c h a n g e s t h e s t r u c t u r e o f t h e v i s i b l e
* memory s p a c e . Y o u w i l l n o t b e a b l e t o t r a c e e x e c u t i o n t h r o u g h t h i s .
* If y o u h a v e a n e n q u i r y a b o u t t h i s , * p l e a s e * c h e c k t h e l i n u x - a r m - k e r n e l
* mailing l i s t a r c h i v e s B E F O R E s e n d i n g a n o t h e r p o s t t o t h e l i s t .
*
* r0 = c p #15 c o n t r o l r e g i s t e r
2010-10-04 18:02:59 +01:00
* r1 = m a c h i n e I D
2011-04-28 14:27:20 -06:00
* r2 = a t a g s o r d t b p o i n t e r
2010-10-04 18:02:59 +01:00
* r9 = p r o c e s s o r I D
2010-10-04 17:56:13 +01:00
* r1 3 = * v i r t u a l * a d d r e s s t o j u m p t o u p o n c o m p l e t i o n
*
* other r e g i s t e r s d e p e n d o n t h e f u n c t i o n c a l l e d u p o n c o m p l e t i o n
* /
.align 5
2011-11-23 12:26:25 +00:00
.pushsection .idmap .text , " ax"
ENTRY( _ _ t u r n _ m m u _ o n )
2010-10-04 17:56:13 +01:00
mov r0 , r0
2011-11-22 17:30:28 +00:00
instr_ s y n c
2010-10-04 17:56:13 +01:00
mcr p15 , 0 , r0 , c1 , c0 , 0 @ write control reg
mrc p15 , 0 , r3 , c0 , c0 , 0 @ read id reg
2011-11-22 17:30:28 +00:00
instr_ s y n c
2010-10-04 17:56:13 +01:00
mov r3 , r3
mov r3 , r13
2014-06-30 16:29:12 +01:00
ret r3
2011-11-23 12:03:27 +00:00
__turn_mmu_on_end :
2010-10-04 17:56:13 +01:00
ENDPROC( _ _ t u r n _ m m u _ o n )
2011-11-23 12:26:25 +00:00
.popsection
2010-10-04 17:56:13 +01:00
2005-04-16 15:20:36 -07:00
2010-09-04 10:47:48 +01:00
# ifdef C O N F I G _ S M P _ O N _ U P
2014-04-16 15:38:26 +01:00
_ _ HEAD
2010-09-04 10:47:48 +01:00
__fixup_smp :
2011-01-30 16:40:20 +00:00
and r3 , r9 , #0x000f0000 @ architecture version
teq r3 , #0x000f0000 @ CPU ID supported?
2010-09-04 10:47:48 +01:00
bne _ _ f i x u p _ s m p _ o n _ u p @ no, assume UP
2011-01-30 16:40:20 +00:00
bic r3 , r9 , #0x00ff0000
bic r3 , r3 , #0x0000000f @ mask 0xff00fff0
mov r4 , #0x41000000
2010-11-22 12:06:28 +00:00
orr r4 , r4 , #0x0000b000
2011-01-30 16:40:20 +00:00
orr r4 , r4 , #0x00000020 @ val 0x4100b020
teq r3 , r4 @ ARM 11MPCore?
2014-06-30 16:29:12 +01:00
reteq l r @ yes, assume SMP
2010-09-04 10:47:48 +01:00
mrc p15 , 0 , r0 , c0 , c0 , 5 @ read MPIDR
2011-01-30 16:40:20 +00:00
and r0 , r0 , #0xc0000000 @ multiprocessing extensions and
teq r0 , #0x80000000 @ not part of a uniprocessor system?
2013-09-27 21:56:31 +01:00
bne _ _ f i x u p _ s m p _ o n _ u p @ no, assume UP
@ Core indicates it is SMP. Check for Aegis SOC where a single
@ Cortex-A9 CPU is present but SMP operations fault.
mov r4 , #0x41000000
orr r4 , r4 , #0x0000c000
orr r4 , r4 , #0x00000090
teq r3 , r4 @ Check for ARM Cortex-A9
2014-06-30 16:29:12 +01:00
retne l r @ Not ARM Cortex-A9,
2013-09-27 21:56:31 +01:00
@ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
@ below address check will need to be #ifdef'd or equivalent
@ for the Aegis platform.
mrc p15 , 4 , r0 , c15 , c0 @ get SCU base address
teq r0 , #0x0 @ '0' on actual UP A9 hardware
beq _ _ f i x u p _ s m p _ o n _ u p @ So its an A9 UP
ldr r0 , [ r0 , #4 ] @ read SCU Config
2013-11-07 08:42:40 +01:00
ARM_ B E 8 ( r e v r0 , r0 ) @ byteswap if big endian
2013-09-27 21:56:31 +01:00
and r0 , r0 , #0x3 @ number of CPUs
teq r0 , #0x0 @ is 1?
2014-06-30 16:29:12 +01:00
retne l r
2010-09-04 10:47:48 +01:00
__fixup_smp_on_up :
adr r0 , 1 f
2010-11-22 12:06:28 +00:00
ldmia r0 , { r3 - r5 }
2010-09-04 10:47:48 +01:00
sub r3 , r0 , r3
2010-11-22 12:06:28 +00:00
add r4 , r4 , r3
add r5 , r5 , r3
2011-02-10 15:25:18 +00:00
b _ _ d o _ f i x u p _ s m p _ o n _ u p
2010-09-04 10:47:48 +01:00
ENDPROC( _ _ f i x u p _ s m p )
2010-11-29 19:43:24 +01:00
.align
2010-09-04 10:47:48 +01:00
1 : .word .
.word __smpalt_begin
.word __smpalt_end
.pushsection .data
2017-07-26 12:49:31 +01:00
.align 2
2010-09-04 10:47:48 +01:00
.globl smp_on_up
smp_on_up :
ALT_ S M P ( . l o n g 1 )
ALT_ U P ( . l o n g 0 )
.popsection
2011-02-10 15:25:18 +00:00
# endif
2010-09-04 10:47:48 +01:00
2011-02-10 15:25:18 +00:00
.text
__do_fixup_smp_on_up :
cmp r4 , r5
2014-06-30 16:29:12 +01:00
reths l r
2011-02-10 15:25:18 +00:00
ldmia r4 ! , { r0 , r6 }
ARM( s t r r6 , [ r0 , r3 ] )
THUMB( a d d r0 , r0 , r3 )
# ifdef _ _ A R M E B _ _
THUMB( m o v r6 , r6 , r o r #16 ) @ Convert word order for big-endian.
2010-09-04 10:47:48 +01:00
# endif
2011-02-10 15:25:18 +00:00
THUMB( s t r h r6 , [ r0 ] , #2 ) @ For Thumb-2, store as two halfwords
THUMB( m o v r6 , r6 , l s r #16 ) @ to be robust against misaligned r3.
THUMB( s t r h r6 , [ r0 ] )
b _ _ d o _ f i x u p _ s m p _ o n _ u p
ENDPROC( _ _ d o _ f i x u p _ s m p _ o n _ u p )
ENTRY( f i x u p _ s m p )
stmfd s p ! , { r4 - r6 , l r }
mov r4 , r0
add r5 , r0 , r1
mov r3 , #0
bl _ _ d o _ f i x u p _ s m p _ o n _ u p
ldmfd s p ! , { r4 - r6 , p c }
ENDPROC( f i x u p _ s m p )
2010-09-04 10:47:48 +01:00
2013-10-29 07:29:56 +01:00
# ifdef _ _ A R M E B _ _
2013-07-29 20:26:22 +05:30
# define L O W _ O F F S E T 0 x4
# define H I G H _ O F F S E T 0 x0
# else
# define L O W _ O F F S E T 0 x0
# define H I G H _ O F F S E T 0 x4
# endif
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
# ifdef C O N F I G _ A R M _ P A T C H _ P H Y S _ V I R T
/ * _ _ fixup_ p v _ t a b l e - p a t c h t h e s t u b i n s t r u c t i o n s w i t h t h e d e l t a b e t w e e n
* PHYS_ O F F S E T a n d P A G E _ O F F S E T , w h i c h i s a s s u m e d t o b e 1 6 M i B a l i g n e d a n d
* can b e e x p r e s s e d b y a n i m m e d i a t e s h i f t e r o p e r a n d . T h e s t u b i n s t r u c t i o n
* has a f o r m o f ' ( a d d | s u b ) r d , r n , #i m m ' .
* /
_ _ HEAD
__fixup_pv_table :
adr r0 , 1 f
2013-07-29 20:26:22 +05:30
ldmia r0 , { r3 - r7 }
mvn i p , #0
subs r3 , r0 , r3 @ PHYS_OFFSET - PAGE_OFFSET
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
add r4 , r4 , r3 @ adjust table start address
add r5 , r5 , r3 @ adjust table end address
ARM: Better virt_to_page() handling
virt_to_page() is incredibly inefficient when virt-to-phys patching is
enabled. This is because we end up with this calculation:
page = &mem_map[asm virt_to_phys(addr) >> 12 - __pv_phys_offset >> 12]
in assembly. The asm virt_to_phys() is equivalent this this operation:
addr - PAGE_OFFSET + __pv_phys_offset
and we can see that because this is assembly, the compiler has no chance
to optimise some of that away. This should reduce down to:
page = &mem_map[(addr - PAGE_OFFSET) >> 12]
for the common cases. Permit the compiler to make this optimisation by
giving it more of the information it needs - do this by providing a
virt_to_pfn() macro.
Another issue which makes this more complex is that __pv_phys_offset is
a 64-bit type on all platforms. This is needlessly wasteful - if we
store the physical offset as a PFN, we can save a lot of work having
to deal with 64-bit values, which sometimes ends up producing incredibly
horrid code:
a4c: e3009000 movw r9, #0
a4c: R_ARM_MOVW_ABS_NC __pv_phys_offset
a50: e3409000 movt r9, #0 ; r9 = &__pv_phys_offset
a50: R_ARM_MOVT_ABS __pv_phys_offset
a54: e3002000 movw r2, #0
a54: R_ARM_MOVW_ABS_NC __pv_phys_offset
a58: e3402000 movt r2, #0 ; r2 = &__pv_phys_offset
a58: R_ARM_MOVT_ABS __pv_phys_offset
a5c: e5999004 ldr r9, [r9, #4] ; r9 = high word of __pv_phys_offset
a60: e3001000 movw r1, #0
a60: R_ARM_MOVW_ABS_NC mem_map
a64: e592c000 ldr ip, [r2] ; ip = low word of __pv_phys_offset
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-03-25 19:45:31 +00:00
add r6 , r6 , r3 @ adjust __pv_phys_pfn_offset address
2013-07-29 20:26:22 +05:30
add r7 , r7 , r3 @ adjust __pv_offset address
2015-01-20 03:49:35 +01:00
mov r0 , r8 , l s r #P A G E _ S H I F T @ c o n v e r t t o P F N
2014-04-22 02:25:36 +01:00
str r0 , [ r6 ] @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
2013-07-29 20:26:22 +05:30
strcc i p , [ r7 , #H I G H _ O F F S E T ] @ s a v e t o _ _ p v _ o f f s e t h i g h b i t s
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
mov r6 , r3 , l s r #24 @ constant for add/sub instructions
teq r3 , r6 , l s l #24 @ must be 16MiB aligned
2011-02-21 06:53:35 +01:00
THUMB( i t n e @ cross section branch )
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
bne _ _ e r r o r
2013-07-29 20:26:22 +05:30
str r3 , [ r7 , #L O W _ O F F S E T ] @ s a v e t o _ _ p v _ o f f s e t l o w b i t s
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
b _ _ f i x u p _ a _ p v _ t a b l e
ENDPROC( _ _ f i x u p _ p v _ t a b l e )
.align
1 : .long .
.long __pv_table_begin
.long __pv_table_end
ARM: Better virt_to_page() handling
virt_to_page() is incredibly inefficient when virt-to-phys patching is
enabled. This is because we end up with this calculation:
page = &mem_map[asm virt_to_phys(addr) >> 12 - __pv_phys_offset >> 12]
in assembly. The asm virt_to_phys() is equivalent this this operation:
addr - PAGE_OFFSET + __pv_phys_offset
and we can see that because this is assembly, the compiler has no chance
to optimise some of that away. This should reduce down to:
page = &mem_map[(addr - PAGE_OFFSET) >> 12]
for the common cases. Permit the compiler to make this optimisation by
giving it more of the information it needs - do this by providing a
virt_to_pfn() macro.
Another issue which makes this more complex is that __pv_phys_offset is
a 64-bit type on all platforms. This is needlessly wasteful - if we
store the physical offset as a PFN, we can save a lot of work having
to deal with 64-bit values, which sometimes ends up producing incredibly
horrid code:
a4c: e3009000 movw r9, #0
a4c: R_ARM_MOVW_ABS_NC __pv_phys_offset
a50: e3409000 movt r9, #0 ; r9 = &__pv_phys_offset
a50: R_ARM_MOVT_ABS __pv_phys_offset
a54: e3002000 movw r2, #0
a54: R_ARM_MOVW_ABS_NC __pv_phys_offset
a58: e3402000 movt r2, #0 ; r2 = &__pv_phys_offset
a58: R_ARM_MOVT_ABS __pv_phys_offset
a5c: e5999004 ldr r9, [r9, #4] ; r9 = high word of __pv_phys_offset
a60: e3001000 movw r1, #0
a60: R_ARM_MOVW_ABS_NC mem_map
a64: e592c000 ldr ip, [r2] ; ip = low word of __pv_phys_offset
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-03-25 19:45:31 +00:00
2 : .long _ _ p v _ p h y s _ p f n _ o f f s e t
2013-07-29 20:26:22 +05:30
.long __pv_offset
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
.text
__fixup_a_pv_table :
2013-07-29 20:26:22 +05:30
adr r0 , 3 f
ldr r6 , [ r0 ]
add r6 , r6 , r3
ldr r0 , [ r6 , #H I G H _ O F F S E T ] @ p v _ o f f s e t h i g h w o r d
ldr r6 , [ r6 , #L O W _ O F F S E T ] @ p v _ o f f s e t l o w w o r d
mov r6 , r6 , l s r #24
cmn r0 , #1
2011-02-21 06:53:35 +01:00
# ifdef C O N F I G _ T H U M B 2 _ K E R N E L
2013-07-29 20:26:22 +05:30
moveq r0 , #0x200000 @ set bit 21, mov to mvn instruction
2011-08-12 00:14:29 +01:00
lsls r6 , #24
beq 2 f
2011-02-21 06:53:35 +01:00
clz r7 , r6
lsr r6 , #24
lsl r6 , r7
bic r6 , #0x0080
lsrs r7 , #1
orrcs r6 , #0x0080
orr r6 , r6 , r7 , l s l #12
orr r6 , #0x4000
2011-08-12 00:14:29 +01:00
b 2 f
1 : add r7 , r3
ldrh i p , [ r7 , #2 ]
2013-02-01 16:23:08 +01:00
ARM_ B E 8 ( r e v16 i p , i p )
2013-07-29 20:26:22 +05:30
tst i p , #0x4000
and i p , #0x8f00
orrne i p , r6 @ mask in offset bits 31-24
orreq i p , r0 @ mask in offset bits 7-0
2013-02-01 16:23:08 +01:00
ARM_ B E 8 ( r e v16 i p , i p )
2011-02-21 06:53:35 +01:00
strh i p , [ r7 , #2 ]
2013-10-28 00:43:41 +00:00
bne 2 f
ldrh i p , [ r7 ]
ARM_ B E 8 ( r e v16 i p , i p )
bic i p , #0x20
orr i p , i p , r0 , l s r #16
ARM_ B E 8 ( r e v16 i p , i p )
strh i p , [ r7 ]
2011-08-12 00:14:29 +01:00
2 : cmp r4 , r5
2011-02-21 06:53:35 +01:00
ldrcc r7 , [ r4 ] , #4 @ use branch for delay slot
2011-08-12 00:14:29 +01:00
bcc 1 b
2011-02-21 06:53:35 +01:00
bx l r
2013-11-07 08:42:42 +01:00
# else
# ifdef C O N F I G _ C P U _ E N D I A N _ B E 8
moveq r0 , #0x00004000 @ set bit 22, mov to mvn instruction
2011-02-21 06:53:35 +01:00
# else
2013-07-29 20:26:22 +05:30
moveq r0 , #0x400000 @ set bit 22, mov to mvn instruction
2013-11-07 08:42:42 +01:00
# endif
2011-08-12 00:14:29 +01:00
b 2 f
1 : ldr i p , [ r7 , r3 ]
2013-02-01 16:23:08 +01:00
# ifdef C O N F I G _ C P U _ E N D I A N _ B E 8
@ in BE8, we load data in BE, but instructions still in LE
bic i p , i p , #0xff000000
2013-10-28 00:43:41 +00:00
tst i p , #0x000f0000 @ check the rotation field
orrne i p , i p , r6 , l s l #24 @ mask in offset bits 31-24
biceq i p , i p , #0x00004000 @ clear bit 22
2013-11-07 08:42:42 +01:00
orreq i p , i p , r0 @ mask in offset bits 7-0
2013-02-01 16:23:08 +01:00
# else
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
bic i p , i p , #0x000000ff
2013-07-29 20:26:22 +05:30
tst i p , #0xf00 @ check the rotation field
orrne i p , i p , r6 @ mask in offset bits 31-24
biceq i p , i p , #0x400000 @ clear bit 22
orreq i p , i p , r0 @ mask in offset bits 7-0
2013-02-01 16:23:08 +01:00
# endif
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
str i p , [ r7 , r3 ]
2011-08-12 00:14:29 +01:00
2 : cmp r4 , r5
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
ldrcc r7 , [ r4 ] , #4 @ use branch for delay slot
2011-08-12 00:14:29 +01:00
bcc 1 b
2014-06-30 16:29:12 +01:00
ret l r
2011-02-21 06:53:35 +01:00
# endif
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
ENDPROC( _ _ f i x u p _ a _ p v _ t a b l e )
2013-10-29 07:29:56 +01:00
.align
2013-07-29 20:26:22 +05:30
3 : .long _ _ p v _ o f f s e t
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
ENTRY( f i x u p _ p v _ t a b l e )
stmfd s p ! , { r4 - r7 , l r }
mov r3 , #0 @ no offset
mov r4 , r0 @ r0 = table start
add r5 , r0 , r1 @ r1 = table size
bl _ _ f i x u p _ a _ p v _ t a b l e
ldmfd s p ! , { r4 - r7 , p c }
ENDPROC( f i x u p _ p v _ t a b l e )
.data
2017-07-26 12:49:31 +01:00
.align 2
ARM: Better virt_to_page() handling
virt_to_page() is incredibly inefficient when virt-to-phys patching is
enabled. This is because we end up with this calculation:
page = &mem_map[asm virt_to_phys(addr) >> 12 - __pv_phys_offset >> 12]
in assembly. The asm virt_to_phys() is equivalent this this operation:
addr - PAGE_OFFSET + __pv_phys_offset
and we can see that because this is assembly, the compiler has no chance
to optimise some of that away. This should reduce down to:
page = &mem_map[(addr - PAGE_OFFSET) >> 12]
for the common cases. Permit the compiler to make this optimisation by
giving it more of the information it needs - do this by providing a
virt_to_pfn() macro.
Another issue which makes this more complex is that __pv_phys_offset is
a 64-bit type on all platforms. This is needlessly wasteful - if we
store the physical offset as a PFN, we can save a lot of work having
to deal with 64-bit values, which sometimes ends up producing incredibly
horrid code:
a4c: e3009000 movw r9, #0
a4c: R_ARM_MOVW_ABS_NC __pv_phys_offset
a50: e3409000 movt r9, #0 ; r9 = &__pv_phys_offset
a50: R_ARM_MOVT_ABS __pv_phys_offset
a54: e3002000 movw r2, #0
a54: R_ARM_MOVW_ABS_NC __pv_phys_offset
a58: e3402000 movt r2, #0 ; r2 = &__pv_phys_offset
a58: R_ARM_MOVT_ABS __pv_phys_offset
a5c: e5999004 ldr r9, [r9, #4] ; r9 = high word of __pv_phys_offset
a60: e3001000 movw r1, #0
a60: R_ARM_MOVW_ABS_NC mem_map
a64: e592c000 ldr ip, [r2] ; ip = low word of __pv_phys_offset
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-03-25 19:45:31 +00:00
.globl __pv_phys_pfn_offset
.type _ _ pv_ p h y s _ p f n _ o f f s e t , % o b j e c t
__pv_phys_pfn_offset :
.word 0
.size _ _ pv_ p h y s _ p f n _ o f f s e t , . - _ _ p v _ p h y s _ p f n _ o f f s e t
2013-07-29 20:26:22 +05:30
.globl __pv_offset
.type _ _ pv_ o f f s e t , % o b j e c t
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
__pv_offset :
2013-07-29 20:26:22 +05:30
.quad 0
.size _ _ pv_ o f f s e t , . - _ _ p v _ o f f s e t
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-04 19:09:43 +00:00
# endif
2006-03-27 14:58:25 +01:00
# include " h e a d - c o m m o n . S "