2011-11-22 21:30:29 +04:00
/ *
* arch/ a r m / m m / p r o c - v7 - 3 l e v e l . S
*
* Copyright ( C ) 2 0 0 1 D e e p B l u e S o l u t i o n s L t d .
* Copyright ( C ) 2 0 1 1 A R M L t d .
* Author : Catalin M a r i n a s < c a t a l i n . m a r i n a s @arm.com>
* based o n a r c h / a r m / m m / p r o c - v7 - 2 l e v e l . S
*
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or modify
* it u n d e r t h e t e r m s o f t h e G N U G e n e r a l P u b l i c L i c e n s e v e r s i o n 2 a s
* published b y t h e F r e e S o f t w a r e F o u n d a t i o n .
*
* This p r o g r a m i s d i s t r i b u t e d i n t h e h o p e t h a t i t w i l l b e u s e f u l ,
* but W I T H O U T A N Y W A R R A N T Y ; without even the implied warranty of
* MERCHANTABILITY o r F I T N E S S F O R A P A R T I C U L A R P U R P O S E . S e e t h e
* GNU G e n e r a l P u b l i c L i c e n s e f o r m o r e d e t a i l s .
*
* You s h o u l d h a v e r e c e i v e d a c o p y o f t h e G N U G e n e r a l P u b l i c L i c e n s e
* along w i t h t h i s p r o g r a m ; if not, write to the Free Software
* Foundation, I n c . , 5 9 T e m p l e P l a c e , S u i t e 3 3 0 , B o s t o n , M A 0 2 1 1 1 - 1 3 0 7 U S A
* /
2014-06-30 19:29:12 +04:00
# include < a s m / a s s e m b l e r . h >
2011-11-22 21:30:29 +04:00
# define T T B _ I R G N _ N C ( 0 < < 8 )
# define T T B _ I R G N _ W B W A ( 1 < < 8 )
# define T T B _ I R G N _ W T ( 2 < < 8 )
# define T T B _ I R G N _ W B ( 3 < < 8 )
# define T T B _ R G N _ N C ( 0 < < 1 0 )
# define T T B _ R G N _ O C _ W B W A ( 1 < < 1 0 )
# define T T B _ R G N _ O C _ W T ( 2 < < 1 0 )
# define T T B _ R G N _ O C _ W B ( 3 < < 1 0 )
# define T T B _ S ( 3 < < 1 2 )
# define T T B _ E A E ( 1 < < 3 1 )
/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
# define T T B _ F L A G S _ U P ( T T B _ I R G N _ W B | T T B _ R G N _ O C _ W B )
# define P M D _ F L A G S _ U P ( P M D _ S E C T _ W B )
/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
# define T T B _ F L A G S _ S M P ( T T B _ I R G N _ W B W A | T T B _ S | T T B _ R G N _ O C _ W B W A )
# define P M D _ F L A G S _ S M P ( P M D _ S E C T _ W B W A | P M D _ S E C T _ S )
2012-07-16 23:37:06 +04:00
# ifndef _ _ A R M E B _ _
# define r p g d l r0
# define r p g d h r1
# else
# define r p g d l r1
# define r p g d h r0
# endif
2011-11-22 21:30:29 +04:00
/ *
* cpu_ v7 _ s w i t c h _ m m ( p g d _ p h y s , t s k )
*
* Set t h e t r a n s l a t i o n t a b l e b a s e p o i n t e r t o b e p g d _ p h y s ( p h y s i c a l a d d r e s s o f
* the n e w T T B ) .
* /
ENTRY( c p u _ v7 _ s w i t c h _ m m )
# ifdef C O N F I G _ M M U
2012-07-16 23:37:06 +04:00
mmid r2 , r2
asid r2 , r2
orr r p g d h , r p g d h , r2 , l s l #( 48 - 3 2 ) @ upper 32-bits of pgd
mcrr p15 , 0 , r p g d l , r p g d h , c2 @ set TTB 0
2011-11-22 21:30:29 +04:00
isb
# endif
2014-06-30 19:29:12 +04:00
ret l r
2011-11-22 21:30:29 +04:00
ENDPROC( c p u _ v7 _ s w i t c h _ m m )
ARM: 8037/1: mm: support big-endian page tables
When enable LPAE and big-endian in a hisilicon board, while specify
mem=384M mem=512M@7680M, will get bad page state:
Freeing unused kernel memory: 180K (c0466000 - c0493000)
BUG: Bad page state in process init pfn:fa442
page:c7749840 count:0 mapcount:-1 mapping: (null) index:0x0
page flags: 0x40000400(reserved)
Modules linked in:
CPU: 0 PID: 1 Comm: init Not tainted 3.10.27+ #66
[<c000f5f0>] (unwind_backtrace+0x0/0x11c) from [<c000cbc4>] (show_stack+0x10/0x14)
[<c000cbc4>] (show_stack+0x10/0x14) from [<c009e448>] (bad_page+0xd4/0x104)
[<c009e448>] (bad_page+0xd4/0x104) from [<c009e520>] (free_pages_prepare+0xa8/0x14c)
[<c009e520>] (free_pages_prepare+0xa8/0x14c) from [<c009f8ec>] (free_hot_cold_page+0x18/0xf0)
[<c009f8ec>] (free_hot_cold_page+0x18/0xf0) from [<c00b5444>] (handle_pte_fault+0xcf4/0xdc8)
[<c00b5444>] (handle_pte_fault+0xcf4/0xdc8) from [<c00b6458>] (handle_mm_fault+0xf4/0x120)
[<c00b6458>] (handle_mm_fault+0xf4/0x120) from [<c0013754>] (do_page_fault+0xfc/0x354)
[<c0013754>] (do_page_fault+0xfc/0x354) from [<c0008400>] (do_DataAbort+0x2c/0x90)
[<c0008400>] (do_DataAbort+0x2c/0x90) from [<c0008fb4>] (__dabt_usr+0x34/0x40)
The bad pfn:fa442 is not system memory(mem=384M mem=512M@7680M), after debugging,
I find in page fault handler, will get wrong pfn from pte just after set pte,
as follow:
do_anonymous_page()
{
...
set_pte_at(mm, address, page_table, entry);
//debug code
pfn = pte_pfn(entry);
pr_info("pfn:0x%lx, pte:0x%llxn", pfn, pte_val(entry));
//read out the pte just set
new_pte = pte_offset_map(pmd, address);
new_pfn = pte_pfn(*new_pte);
pr_info("new pfn:0x%lx, new pte:0x%llxn", pfn, pte_val(entry));
...
}
pfn: 0x1fa4f5, pte:0xc00001fa4f575f
new_pfn:0xfa4f5, new_pte:0xc00000fa4f5f5f //new pfn/pte is wrong.
The bug is happened in cpu_v7_set_pte_ext(ptep, pte):
An LPAE PTE is a 64bit quantity, passed to cpu_v7_set_pte_ext in the r2 and r3 registers.
On an LE kernel, r2 contains the LSB of the PTE, and r3 the MSB.
On a BE kernel, the assignment is reversed.
Unfortunately, the current code always assumes the LE case,
leading to corruption of the PTE when clearing/setting bits.
This patch fixes this issue much like it has been done already in the
cpu_v7_switch_mm case.
CC stable <stable@vger.kernel.org>
Signed-off-by: Jianguo Wu <wujianguo@huawei.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-04-24 06:45:56 +04:00
# ifdef _ _ A R M E B _ _
# define r l r3
# define r h r2
# else
# define r l r2
# define r h r3
# endif
2011-11-22 21:30:29 +04:00
/ *
* cpu_ v7 _ s e t _ p t e _ e x t ( p t e p , p t e )
*
* Set a l e v e l 2 t r a n s l a t i o n t a b l e e n t r y .
* - ptep - p o i n t e r t o l e v e l 3 t r a n s l a t i o n t a b l e e n t r y
* - pte - P T E v a l u e t o s t o r e ( 6 4 - b i t i n r2 a n d r3 )
* /
ENTRY( c p u _ v7 _ s e t _ p t e _ e x t )
# ifdef C O N F I G _ M M U
ARM: 8037/1: mm: support big-endian page tables
When enable LPAE and big-endian in a hisilicon board, while specify
mem=384M mem=512M@7680M, will get bad page state:
Freeing unused kernel memory: 180K (c0466000 - c0493000)
BUG: Bad page state in process init pfn:fa442
page:c7749840 count:0 mapcount:-1 mapping: (null) index:0x0
page flags: 0x40000400(reserved)
Modules linked in:
CPU: 0 PID: 1 Comm: init Not tainted 3.10.27+ #66
[<c000f5f0>] (unwind_backtrace+0x0/0x11c) from [<c000cbc4>] (show_stack+0x10/0x14)
[<c000cbc4>] (show_stack+0x10/0x14) from [<c009e448>] (bad_page+0xd4/0x104)
[<c009e448>] (bad_page+0xd4/0x104) from [<c009e520>] (free_pages_prepare+0xa8/0x14c)
[<c009e520>] (free_pages_prepare+0xa8/0x14c) from [<c009f8ec>] (free_hot_cold_page+0x18/0xf0)
[<c009f8ec>] (free_hot_cold_page+0x18/0xf0) from [<c00b5444>] (handle_pte_fault+0xcf4/0xdc8)
[<c00b5444>] (handle_pte_fault+0xcf4/0xdc8) from [<c00b6458>] (handle_mm_fault+0xf4/0x120)
[<c00b6458>] (handle_mm_fault+0xf4/0x120) from [<c0013754>] (do_page_fault+0xfc/0x354)
[<c0013754>] (do_page_fault+0xfc/0x354) from [<c0008400>] (do_DataAbort+0x2c/0x90)
[<c0008400>] (do_DataAbort+0x2c/0x90) from [<c0008fb4>] (__dabt_usr+0x34/0x40)
The bad pfn:fa442 is not system memory(mem=384M mem=512M@7680M), after debugging,
I find in page fault handler, will get wrong pfn from pte just after set pte,
as follow:
do_anonymous_page()
{
...
set_pte_at(mm, address, page_table, entry);
//debug code
pfn = pte_pfn(entry);
pr_info("pfn:0x%lx, pte:0x%llxn", pfn, pte_val(entry));
//read out the pte just set
new_pte = pte_offset_map(pmd, address);
new_pfn = pte_pfn(*new_pte);
pr_info("new pfn:0x%lx, new pte:0x%llxn", pfn, pte_val(entry));
...
}
pfn: 0x1fa4f5, pte:0xc00001fa4f575f
new_pfn:0xfa4f5, new_pte:0xc00000fa4f5f5f //new pfn/pte is wrong.
The bug is happened in cpu_v7_set_pte_ext(ptep, pte):
An LPAE PTE is a 64bit quantity, passed to cpu_v7_set_pte_ext in the r2 and r3 registers.
On an LE kernel, r2 contains the LSB of the PTE, and r3 the MSB.
On a BE kernel, the assignment is reversed.
Unfortunately, the current code always assumes the LE case,
leading to corruption of the PTE when clearing/setting bits.
This patch fixes this issue much like it has been done already in the
cpu_v7_switch_mm case.
CC stable <stable@vger.kernel.org>
Signed-off-by: Jianguo Wu <wujianguo@huawei.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-04-24 06:45:56 +04:00
tst r l , #L _ P T E _ V A L I D
2011-11-22 21:30:29 +04:00
beq 1 f
ARM: 8037/1: mm: support big-endian page tables
When enable LPAE and big-endian in a hisilicon board, while specify
mem=384M mem=512M@7680M, will get bad page state:
Freeing unused kernel memory: 180K (c0466000 - c0493000)
BUG: Bad page state in process init pfn:fa442
page:c7749840 count:0 mapcount:-1 mapping: (null) index:0x0
page flags: 0x40000400(reserved)
Modules linked in:
CPU: 0 PID: 1 Comm: init Not tainted 3.10.27+ #66
[<c000f5f0>] (unwind_backtrace+0x0/0x11c) from [<c000cbc4>] (show_stack+0x10/0x14)
[<c000cbc4>] (show_stack+0x10/0x14) from [<c009e448>] (bad_page+0xd4/0x104)
[<c009e448>] (bad_page+0xd4/0x104) from [<c009e520>] (free_pages_prepare+0xa8/0x14c)
[<c009e520>] (free_pages_prepare+0xa8/0x14c) from [<c009f8ec>] (free_hot_cold_page+0x18/0xf0)
[<c009f8ec>] (free_hot_cold_page+0x18/0xf0) from [<c00b5444>] (handle_pte_fault+0xcf4/0xdc8)
[<c00b5444>] (handle_pte_fault+0xcf4/0xdc8) from [<c00b6458>] (handle_mm_fault+0xf4/0x120)
[<c00b6458>] (handle_mm_fault+0xf4/0x120) from [<c0013754>] (do_page_fault+0xfc/0x354)
[<c0013754>] (do_page_fault+0xfc/0x354) from [<c0008400>] (do_DataAbort+0x2c/0x90)
[<c0008400>] (do_DataAbort+0x2c/0x90) from [<c0008fb4>] (__dabt_usr+0x34/0x40)
The bad pfn:fa442 is not system memory(mem=384M mem=512M@7680M), after debugging,
I find in page fault handler, will get wrong pfn from pte just after set pte,
as follow:
do_anonymous_page()
{
...
set_pte_at(mm, address, page_table, entry);
//debug code
pfn = pte_pfn(entry);
pr_info("pfn:0x%lx, pte:0x%llxn", pfn, pte_val(entry));
//read out the pte just set
new_pte = pte_offset_map(pmd, address);
new_pfn = pte_pfn(*new_pte);
pr_info("new pfn:0x%lx, new pte:0x%llxn", pfn, pte_val(entry));
...
}
pfn: 0x1fa4f5, pte:0xc00001fa4f575f
new_pfn:0xfa4f5, new_pte:0xc00000fa4f5f5f //new pfn/pte is wrong.
The bug is happened in cpu_v7_set_pte_ext(ptep, pte):
An LPAE PTE is a 64bit quantity, passed to cpu_v7_set_pte_ext in the r2 and r3 registers.
On an LE kernel, r2 contains the LSB of the PTE, and r3 the MSB.
On a BE kernel, the assignment is reversed.
Unfortunately, the current code always assumes the LE case,
leading to corruption of the PTE when clearing/setting bits.
This patch fixes this issue much like it has been done already in the
cpu_v7_switch_mm case.
CC stable <stable@vger.kernel.org>
Signed-off-by: Jianguo Wu <wujianguo@huawei.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-04-24 06:45:56 +04:00
tst r h , #1 < < ( 5 7 - 3 2 ) @ L_PTE_NONE
bicne r l , #L _ P T E _ V A L I D
2012-09-01 08:22:12 +04:00
bne 1 f
ARM: 8037/1: mm: support big-endian page tables
When enable LPAE and big-endian in a hisilicon board, while specify
mem=384M mem=512M@7680M, will get bad page state:
Freeing unused kernel memory: 180K (c0466000 - c0493000)
BUG: Bad page state in process init pfn:fa442
page:c7749840 count:0 mapcount:-1 mapping: (null) index:0x0
page flags: 0x40000400(reserved)
Modules linked in:
CPU: 0 PID: 1 Comm: init Not tainted 3.10.27+ #66
[<c000f5f0>] (unwind_backtrace+0x0/0x11c) from [<c000cbc4>] (show_stack+0x10/0x14)
[<c000cbc4>] (show_stack+0x10/0x14) from [<c009e448>] (bad_page+0xd4/0x104)
[<c009e448>] (bad_page+0xd4/0x104) from [<c009e520>] (free_pages_prepare+0xa8/0x14c)
[<c009e520>] (free_pages_prepare+0xa8/0x14c) from [<c009f8ec>] (free_hot_cold_page+0x18/0xf0)
[<c009f8ec>] (free_hot_cold_page+0x18/0xf0) from [<c00b5444>] (handle_pte_fault+0xcf4/0xdc8)
[<c00b5444>] (handle_pte_fault+0xcf4/0xdc8) from [<c00b6458>] (handle_mm_fault+0xf4/0x120)
[<c00b6458>] (handle_mm_fault+0xf4/0x120) from [<c0013754>] (do_page_fault+0xfc/0x354)
[<c0013754>] (do_page_fault+0xfc/0x354) from [<c0008400>] (do_DataAbort+0x2c/0x90)
[<c0008400>] (do_DataAbort+0x2c/0x90) from [<c0008fb4>] (__dabt_usr+0x34/0x40)
The bad pfn:fa442 is not system memory(mem=384M mem=512M@7680M), after debugging,
I find in page fault handler, will get wrong pfn from pte just after set pte,
as follow:
do_anonymous_page()
{
...
set_pte_at(mm, address, page_table, entry);
//debug code
pfn = pte_pfn(entry);
pr_info("pfn:0x%lx, pte:0x%llxn", pfn, pte_val(entry));
//read out the pte just set
new_pte = pte_offset_map(pmd, address);
new_pfn = pte_pfn(*new_pte);
pr_info("new pfn:0x%lx, new pte:0x%llxn", pfn, pte_val(entry));
...
}
pfn: 0x1fa4f5, pte:0xc00001fa4f575f
new_pfn:0xfa4f5, new_pte:0xc00000fa4f5f5f //new pfn/pte is wrong.
The bug is happened in cpu_v7_set_pte_ext(ptep, pte):
An LPAE PTE is a 64bit quantity, passed to cpu_v7_set_pte_ext in the r2 and r3 registers.
On an LE kernel, r2 contains the LSB of the PTE, and r3 the MSB.
On a BE kernel, the assignment is reversed.
Unfortunately, the current code always assumes the LE case,
leading to corruption of the PTE when clearing/setting bits.
This patch fixes this issue much like it has been done already in the
cpu_v7_switch_mm case.
CC stable <stable@vger.kernel.org>
Signed-off-by: Jianguo Wu <wujianguo@huawei.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-04-24 06:45:56 +04:00
tst r h , #1 < < ( 5 5 - 3 2 ) @ L_PTE_DIRTY
orreq r l , #L _ P T E _ R D O N L Y
2011-11-22 21:30:29 +04:00
1 : strd r2 , r3 , [ r0 ]
2013-07-15 17:26:19 +04:00
ALT_ S M P ( W ( n o p ) )
2013-04-03 20:16:57 +04:00
ALT_ U P ( m c r p15 , 0 , r0 , c7 , c10 , 1 ) @ flush_pte
2011-11-22 21:30:29 +04:00
# endif
2014-06-30 19:29:12 +04:00
ret l r
2011-11-22 21:30:29 +04:00
ENDPROC( c p u _ v7 _ s e t _ p t e _ e x t )
/ *
* Memory r e g i o n a t t r i b u t e s f o r L P A E ( d e f i n e d i n p g t a b l e - 3 l e v e l . h ) :
*
* n = A t t r I n d x [ 2 : 0 ]
*
* n M A I R
* UNCACHED 0 0 0 0 0 0 0 0 0 0 0
* BUFFERABLE 0 0 1 0 1 0 0 0 1 0 0
* DEV_ W C 0 0 1 0 1 0 0 0 1 0 0
* WRITETHROUGH 0 1 0 1 0 1 0 1 0 1 0
* WRITEBACK 0 1 1 1 1 1 0 1 1 1 0
* DEV_ C A C H E D 0 1 1 1 1 1 0 1 1 1 0
* DEV_ S H A R E D 1 0 0 0 0 0 0 0 1 0 0
* DEV_ N O N S H A R E D 1 0 0 0 0 0 0 0 1 0 0
* unused 1 0 1
* unused 1 1 0
* WRITEALLOC 1 1 1 1 1 1 1 1 1 1 1
* /
.equ PRRR, 0 x e e a a44 0 0 @ MAIR0
.equ NMRR, 0 x f f00 0 0 0 4 @ MAIR1
/ *
* Macro f o r s e t t i n g u p t h e T T B R x a n d T T B C R r e g i s t e r s .
* - \ ttbr1 u p d a t e d .
* /
.macro v7 _ t t b _ s e t u p , z e r o , t t b r0 , t t b r1 , t m p
ldr \ t m p , =swapper_pg_dir @ swapper_pg_dir virtual address
2012-07-21 23:55:04 +04:00
mov \ t m p , \ t m p , l s r #A R C H _ P G D _ S H I F T
2012-07-22 03:47:52 +04:00
cmp \ t t b r1 , \ t m p @ PHYS_OFFSET > PAGE_OFFSET?
2011-11-22 21:30:29 +04:00
mrc p15 , 0 , \ t m p , c2 , c0 , 2 @ TTB control register
orr \ t m p , \ t m p , #T T B _ E A E
ALT_ S M P ( o r r \ t m p , \ t m p , #T T B _ F L A G S _ S M P )
ALT_ U P ( o r r \ t m p , \ t m p , #T T B _ F L A G S _ U P )
ALT_ S M P ( o r r \ t m p , \ t m p , #T T B _ F L A G S _ S M P < < 16 )
ALT_ U P ( o r r \ t m p , \ t m p , #T T B _ F L A G S _ U P < < 16 )
/ *
2012-07-22 03:47:52 +04:00
* Only u s e s p l i t T T B R s i f P H Y S _ O F F S E T < = P A G E _ O F F S E T ( c m p a b o v e ) ,
* otherwise b o o t i n g s e c o n d a r y C P U s w o u l d e n d u p u s i n g T T B R 1 f o r t h e
* identity m a p p i n g s e t u p i n T T B R 0 .
2011-11-22 21:30:29 +04:00
* /
2012-07-22 03:47:52 +04:00
orrls \ t m p , \ t m p , #T T B R 1 _ S I Z E @ TTBCR.T1SZ
mcr p15 , 0 , \ t m p , c2 , c0 , 2 @ TTBCR
2012-07-21 23:55:04 +04:00
mov \ t m p , \ t t b r1 , l s r #( 32 - A R C H _ P G D _ S H I F T ) @ upper bits
mov \ t t b r1 , \ t t b r1 , l s l #A R C H _ P G D _ S H I F T @ l o w e r b i t s
2012-07-22 03:47:52 +04:00
addls \ t t b r1 , \ t t b r1 , #T T B R 1 _ O F F S E T
mcrr p15 , 1 , \ t t b r1 , \ z e r o , c2 @ load TTBR1
2012-07-21 23:55:04 +04:00
mov \ t m p , \ t t b r0 , l s r #( 32 - A R C H _ P G D _ S H I F T ) @ upper bits
mov \ t t b r0 , \ t t b r0 , l s l #A R C H _ P G D _ S H I F T @ l o w e r b i t s
mcrr p15 , 0 , \ t t b r0 , \ z e r o , c2 @ load TTBR0
mcrr p15 , 1 , \ t t b r1 , \ z e r o , c2 @ load TTBR1
mcrr p15 , 0 , \ t t b r0 , \ z e r o , c2 @ load TTBR0
2011-11-22 21:30:29 +04:00
.endm
/ *
* AT
* TFR E V X F I H D L R S
* .EEE . .EE PUI. . T A T 4 R V I Z W R S B L D P W C A M
* rxxx r r x x x x x0 0 1 0 1 x x x x x x x x x11 1 x x x x < f o r c e d
* 1 1 0 1 1 0 1 0 0 1 1 1 1 0 0 .111 1101 < we w a n t
* /
.align 2
.type v7 _ c r v a l , #o b j e c t
v7_crval :
crval c l e a r =0x0120c302 , m m u s e t =0x30c23c7d , u c s e t =0x00c01c7c