2008-05-20 03:52:27 +04:00
/ *
2005-04-17 02:20:36 +04:00
* ultra. S : D o n ' t e x p a n d t h e s e a l l o v e r t h e p l a c e . . .
*
2008-05-20 10:46:00 +04:00
* Copyright ( C ) 1 9 9 7 , 2 0 0 0 , 2 0 0 8 D a v i d S . M i l l e r ( d a v e m @davemloft.net)
2005-04-17 02:20:36 +04:00
* /
# include < a s m / a s i . h >
# include < a s m / p g t a b l e . h >
# include < a s m / p a g e . h >
# include < a s m / s p i t f i r e . h >
# include < a s m / m m u _ c o n t e x t . h >
2005-08-31 07:21:34 +04:00
# include < a s m / m m u . h >
2005-04-17 02:20:36 +04:00
# include < a s m / p i l . h >
# include < a s m / h e a d . h >
# include < a s m / t h r e a d _ i n f o . h >
# include < a s m / c a c h e f l u s h . h >
2006-02-04 14:08:37 +03:00
# include < a s m / h y p e r v i s o r . h >
2008-05-20 10:46:00 +04:00
# include < a s m / c p u d a t a . h >
2005-04-17 02:20:36 +04:00
/ * Basically, m o s t o f t h e S p i t f i r e v s . C h e e t a h m a d n e s s
* has t o d o w i t h t h e f a c t t h a t C h e e t a h d o e s n o t s u p p o r t
* IMMU f l u s h e s o u t o f t h e s e c o n d a r y c o n t e x t . S o m e o n e n e e d s
* to t h r o w a s o u t h l a k e b i r t h d a y p a r t y f o r t h e f o l k s
* in M i c r o e l e c t r o n i c s w h o r e f u s e d t o f i x t h i s s h i t .
* /
/ * This f i l e i s m e a n t t o b e r e a d e f f i c i e n t l y b y t h e C P U , n o t h u m a n s .
* Staraj s i e t e g o n i k o m u n i e p i e r d o l n a c . . .
* /
.text
.align 32
.globl __flush_tlb_mm
2006-02-04 14:08:37 +03:00
__flush_tlb_mm : /* 18 insns */
/* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
2005-04-17 02:20:36 +04:00
ldxa [ % o 1 ] A S I _ D M M U , % g 2
cmp % g 2 , % o 0
bne,p n % i c c , _ _ s p i t f i r e _ f l u s h _ t l b _ m m _ s l o w
mov 0 x50 , % g 3
stxa % g 0 , [ % g 3 ] A S I _ D M M U _ D E M A P
stxa % g 0 , [ % g 3 ] A S I _ I M M U _ D E M A P
2006-02-01 05:33:00 +03:00
sethi % h i ( K E R N B A S E ) , % g 3
flush % g 3
2005-04-17 02:20:36 +04:00
retl
2006-02-01 05:33:00 +03:00
nop
2005-04-17 02:20:36 +04:00
nop
nop
nop
nop
nop
nop
nop
2005-08-31 07:21:34 +04:00
nop
nop
2005-04-17 02:20:36 +04:00
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-20 01:26:26 +04:00
.align 32
.globl __flush_tlb_page
__flush_tlb_page : /* 22 insns */
/* %o0 = context, %o1 = vaddr */
rdpr % p s t a t e , % g 7
andn % g 7 , P S T A T E _ I E , % g 2
wrpr % g 2 , % p s t a t e
mov S E C O N D A R Y _ C O N T E X T , % o 4
ldxa [ % o 4 ] A S I _ D M M U , % g 2
stxa % o 0 , [ % o 4 ] A S I _ D M M U
andcc % o 1 , 1 , % g 0
andn % o 1 , 1 , % o 3
be,p n % i c c , 1 f
or % o 3 , 0 x10 , % o 3
stxa % g 0 , [ % o 3 ] A S I _ I M M U _ D E M A P
1 : stxa % g 0 , [ % o 3 ] A S I _ D M M U _ D E M A P
membar #S y n c
stxa % g 2 , [ % o 4 ] A S I _ D M M U
sethi % h i ( K E R N B A S E ) , % o 4
flush % o 4
retl
wrpr % g 7 , 0 x0 , % p s t a t e
nop
nop
nop
nop
2005-04-17 02:20:36 +04:00
.align 32
.globl __flush_tlb_pending
2006-02-04 14:08:37 +03:00
__flush_tlb_pending : /* 26 insns */
2005-04-17 02:20:36 +04:00
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
rdpr % p s t a t e , % g 7
sllx % o 1 , 3 , % o 1
andn % g 7 , P S T A T E _ I E , % g 2
wrpr % g 2 , % p s t a t e
mov S E C O N D A R Y _ C O N T E X T , % o 4
ldxa [ % o 4 ] A S I _ D M M U , % g 2
stxa % o 0 , [ % o 4 ] A S I _ D M M U
1 : sub % o 1 , ( 1 < < 3 ) , % o 1
ldx [ % o 2 + % o 1 ] , % o 3
andcc % o 3 , 1 , % g 0
andn % o 3 , 1 , % o 3
be,p n % i c c , 2 f
or % o 3 , 0 x10 , % o 3
stxa % g 0 , [ % o 3 ] A S I _ I M M U _ D E M A P
2 : stxa % g 0 , [ % o 3 ] A S I _ D M M U _ D E M A P
membar #S y n c
brnz,p t % o 1 , 1 b
nop
stxa % g 2 , [ % o 4 ] A S I _ D M M U
2006-02-01 05:33:00 +03:00
sethi % h i ( K E R N B A S E ) , % o 4
flush % o 4
2005-04-17 02:20:36 +04:00
retl
wrpr % g 7 , 0 x0 , % p s t a t e
2005-07-06 06:45:24 +04:00
nop
2005-08-31 07:21:34 +04:00
nop
nop
nop
2005-04-17 02:20:36 +04:00
.align 32
.globl __flush_tlb_kernel_range
2006-02-16 07:35:10 +03:00
__flush_tlb_kernel_range : /* 16 insns */
2006-02-04 14:08:37 +03:00
/* %o0=start, %o1=end */
2005-04-17 02:20:36 +04:00
cmp % o 0 , % o 1
be,p n % x c c , 2 f
sethi % h i ( P A G E _ S I Z E ) , % o 4
sub % o 1 , % o 0 , % o 3
sub % o 3 , % o 4 , % o 3
or % o 0 , 0 x20 , % o 0 ! N u c l e u s
1 : stxa % g 0 , [ % o 0 + % o 3 ] A S I _ D M M U _ D E M A P
stxa % g 0 , [ % o 0 + % o 3 ] A S I _ I M M U _ D E M A P
membar #S y n c
brnz,p t % o 3 , 1 b
sub % o 3 , % o 4 , % o 3
2006-02-01 05:33:00 +03:00
2 : sethi % h i ( K E R N B A S E ) , % o 3
flush % o 3
retl
nop
2006-02-04 14:08:37 +03:00
nop
2005-04-17 02:20:36 +04:00
__spitfire_flush_tlb_mm_slow :
rdpr % p s t a t e , % g 1
wrpr % g 1 , P S T A T E _ I E , % p s t a t e
stxa % o 0 , [ % o 1 ] A S I _ D M M U
stxa % g 0 , [ % g 3 ] A S I _ D M M U _ D E M A P
stxa % g 0 , [ % g 3 ] A S I _ I M M U _ D E M A P
flush % g 6
stxa % g 2 , [ % o 1 ] A S I _ D M M U
2006-02-01 05:33:00 +03:00
sethi % h i ( K E R N B A S E ) , % o 1
flush % o 1
2005-04-17 02:20:36 +04:00
retl
wrpr % g 1 , 0 , % p s t a t e
/ *
* The f o l l o w i n g c o d e f l u s h e s o n e p a g e _ s i z e w o r t h .
* /
2005-09-07 02:19:31 +04:00
.section .kprobes .text , " ax"
2005-04-17 02:20:36 +04:00
.align 32
.globl __flush_icache_page
__flush_icache_page : /* %o0 = phys_page */
srlx % o 0 , P A G E _ S H I F T , % o 0
sethi % u h i ( P A G E _ O F F S E T ) , % g 1
sllx % o 0 , P A G E _ S H I F T , % o 0
sethi % h i ( P A G E _ S I Z E ) , % g 2
sllx % g 1 , 3 2 , % g 1
add % o 0 , % g 1 , % o 0
1 : subcc % g 2 , 3 2 , % g 2
bne,p t % i c c , 1 b
flush % o 0 + % g 2
retl
nop
# ifdef D C A C H E _ A L I A S I N G _ P O S S I B L E
# if ( P A G E _ S H I F T ! = 1 3 )
# error o n l y p a g e s h i f t o f 1 3 i s s u p p o r t e d b y d c a c h e f l u s h
# endif
# define D T A G _ M A S K 0 x3
2005-09-27 03:06:03 +04:00
/ * This r o u t i n e i s S p i t f i r e s p e c i f i c s o t h e h a r d c o d e d
* D- c a c h e s i z e a n d l i n e - s i z e a r e O K .
* /
2005-04-17 02:20:36 +04:00
.align 64
.globl __flush_dcache_page
__flush_dcache_page : /* %o0=kaddr, %o1=flush_icache */
sethi % u h i ( P A G E _ O F F S E T ) , % g 1
sllx % g 1 , 3 2 , % g 1
2005-09-27 03:06:03 +04:00
sub % o 0 , % g 1 , % o 0 ! p h y s i c a l a d d r e s s
srlx % o 0 , 1 1 , % o 0 ! m a k e D - c a c h e T A G
sethi % h i ( 1 < < 1 4 ) , % o 2 ! D - c a c h e s i z e
sub % o 2 , ( 1 < < 5 ) , % o 2 ! D - c a c h e l i n e s i z e
1 : ldxa [ % o 2 ] A S I _ D C A C H E _ T A G , % o 3 ! l o a d D - c a c h e T A G
andcc % o 3 , D T A G _ M A S K , % g 0 ! V a l i d ?
be,p n % x c c , 2 f ! N o p e , b r a n c h
andn % o 3 , D T A G _ M A S K , % o 3 ! C l e a r v a l i d b i t s
cmp % o 3 , % o 0 ! T A G m a t c h ?
bne,p t % x c c , 2 f ! N o p e , b r a n c h
nop
stxa % g 0 , [ % o 2 ] A S I _ D C A C H E _ T A G ! I n v a l i d a t e T A G
membar #S y n c
2 : brnz,p t % o 2 , 1 b
sub % o 2 , ( 1 < < 5 ) , % o 2 ! D - c a c h e l i n e s i z e
2005-04-17 02:20:36 +04:00
/ * The I - c a c h e d o e s n o t s n o o p l o c a l s t o r e s s o w e
* better f l u s h t h a t t o o w h e n n e c e s s a r y .
* /
brnz,p t % o 1 , _ _ f l u s h _ i c a c h e _ p a g e
sllx % o 0 , 1 1 , % o 0
retl
nop
# endif / * D C A C H E _ A L I A S I N G _ P O S S I B L E * /
2005-09-27 03:06:03 +04:00
.previous
2005-08-31 07:21:34 +04:00
/* Cheetah specific versions, patched at boot time. */
2006-02-01 05:33:00 +03:00
__cheetah_flush_tlb_mm : /* 19 insns */
2005-04-17 02:20:36 +04:00
rdpr % p s t a t e , % g 7
andn % g 7 , P S T A T E _ I E , % g 2
wrpr % g 2 , 0 x0 , % p s t a t e
wrpr % g 0 , 1 , % t l
mov P R I M A R Y _ C O N T E X T , % o 2
mov 0 x40 , % g 3
ldxa [ % o 2 ] A S I _ D M M U , % g 2
2005-08-31 07:21:34 +04:00
srlx % g 2 , C T X _ P G S Z 1 _ N U C _ S H I F T , % o 1
sllx % o 1 , C T X _ P G S Z 1 _ N U C _ S H I F T , % o 1
or % o 0 , % o 1 , % o 0 / * P r e s e r v e n u c l e u s p a g e s i z e f i e l d s * /
2005-04-17 02:20:36 +04:00
stxa % o 0 , [ % o 2 ] A S I _ D M M U
stxa % g 0 , [ % g 3 ] A S I _ D M M U _ D E M A P
stxa % g 0 , [ % g 3 ] A S I _ I M M U _ D E M A P
stxa % g 2 , [ % o 2 ] A S I _ D M M U
2006-02-01 05:33:00 +03:00
sethi % h i ( K E R N B A S E ) , % o 2
flush % o 2
2005-04-17 02:20:36 +04:00
wrpr % g 0 , 0 , % t l
retl
wrpr % g 7 , 0 x0 , % p s t a t e
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-20 01:26:26 +04:00
__cheetah_flush_tlb_page : /* 22 insns */
/* %o0 = context, %o1 = vaddr */
rdpr % p s t a t e , % g 7
andn % g 7 , P S T A T E _ I E , % g 2
wrpr % g 2 , 0 x0 , % p s t a t e
wrpr % g 0 , 1 , % t l
mov P R I M A R Y _ C O N T E X T , % o 4
ldxa [ % o 4 ] A S I _ D M M U , % g 2
srlx % g 2 , C T X _ P G S Z 1 _ N U C _ S H I F T , % o 3
sllx % o 3 , C T X _ P G S Z 1 _ N U C _ S H I F T , % o 3
or % o 0 , % o 3 , % o 0 / * P r e s e r v e n u c l e u s p a g e s i z e f i e l d s * /
stxa % o 0 , [ % o 4 ] A S I _ D M M U
andcc % o 1 , 1 , % g 0
be,p n % i c c , 1 f
andn % o 1 , 1 , % o 3
stxa % g 0 , [ % o 3 ] A S I _ I M M U _ D E M A P
1 : stxa % g 0 , [ % o 3 ] A S I _ D M M U _ D E M A P
membar #S y n c
stxa % g 2 , [ % o 4 ] A S I _ D M M U
sethi % h i ( K E R N B A S E ) , % o 4
flush % o 4
wrpr % g 0 , 0 , % t l
retl
wrpr % g 7 , 0 x0 , % p s t a t e
2006-02-01 05:33:00 +03:00
__cheetah_flush_tlb_pending : /* 27 insns */
2005-04-17 02:20:36 +04:00
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
rdpr % p s t a t e , % g 7
sllx % o 1 , 3 , % o 1
andn % g 7 , P S T A T E _ I E , % g 2
wrpr % g 2 , 0 x0 , % p s t a t e
wrpr % g 0 , 1 , % t l
mov P R I M A R Y _ C O N T E X T , % o 4
ldxa [ % o 4 ] A S I _ D M M U , % g 2
2005-08-31 07:21:34 +04:00
srlx % g 2 , C T X _ P G S Z 1 _ N U C _ S H I F T , % o 3
sllx % o 3 , C T X _ P G S Z 1 _ N U C _ S H I F T , % o 3
or % o 0 , % o 3 , % o 0 / * P r e s e r v e n u c l e u s p a g e s i z e f i e l d s * /
2005-04-17 02:20:36 +04:00
stxa % o 0 , [ % o 4 ] A S I _ D M M U
1 : sub % o 1 , ( 1 < < 3 ) , % o 1
ldx [ % o 2 + % o 1 ] , % o 3
andcc % o 3 , 1 , % g 0
be,p n % i c c , 2 f
andn % o 3 , 1 , % o 3
stxa % g 0 , [ % o 3 ] A S I _ I M M U _ D E M A P
2 : stxa % g 0 , [ % o 3 ] A S I _ D M M U _ D E M A P
[SPARC64]: Avoid membar instructions in delay slots.
In particular, avoid membar instructions in the delay
slot of a jmpl instruction.
UltraSPARC-I, II, IIi, and IIe have a bug, documented in
the UltraSPARC-IIi User's Manual, Appendix K, Erratum 51
The long and short of it is that if the IMU unit misses
on a branch or jmpl, and there is a store buffer synchronizing
membar in the delay slot, the chip can stop fetching instructions.
If interrupts are enabled or some other trap is enabled, the
chip will unwedge itself, but performance will suffer.
We already had a workaround for this bug in a few spots, but
it's better to have the entire tree sanitized for this rule.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-28 02:42:04 +04:00
membar #S y n c
2005-04-17 02:20:36 +04:00
brnz,p t % o 1 , 1 b
[SPARC64]: Avoid membar instructions in delay slots.
In particular, avoid membar instructions in the delay
slot of a jmpl instruction.
UltraSPARC-I, II, IIi, and IIe have a bug, documented in
the UltraSPARC-IIi User's Manual, Appendix K, Erratum 51
The long and short of it is that if the IMU unit misses
on a branch or jmpl, and there is a store buffer synchronizing
membar in the delay slot, the chip can stop fetching instructions.
If interrupts are enabled or some other trap is enabled, the
chip will unwedge itself, but performance will suffer.
We already had a workaround for this bug in a few spots, but
it's better to have the entire tree sanitized for this rule.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-28 02:42:04 +04:00
nop
2005-04-17 02:20:36 +04:00
stxa % g 2 , [ % o 4 ] A S I _ D M M U
2006-02-01 05:33:00 +03:00
sethi % h i ( K E R N B A S E ) , % o 4
flush % o 4
2005-04-17 02:20:36 +04:00
wrpr % g 0 , 0 , % t l
retl
wrpr % g 7 , 0 x0 , % p s t a t e
# ifdef D C A C H E _ A L I A S I N G _ P O S S I B L E
2005-09-27 03:06:03 +04:00
__cheetah_flush_dcache_page : /* 11 insns */
2005-04-17 02:20:36 +04:00
sethi % u h i ( P A G E _ O F F S E T ) , % g 1
sllx % g 1 , 3 2 , % g 1
sub % o 0 , % g 1 , % o 0
sethi % h i ( P A G E _ S I Z E ) , % o 4
1 : subcc % o 4 , ( 1 < < 5 ) , % o 4
stxa % g 0 , [ % o 0 + % o 4 ] A S I _ D C A C H E _ I N V A L I D A T E
membar #S y n c
bne,p t % i c c , 1 b
nop
retl / * I - c a c h e f l u s h n e v e r n e e d e d o n C h e e t a h , s e e c a l l e r s . * /
nop
# endif / * D C A C H E _ A L I A S I N G _ P O S S I B L E * /
2006-02-04 14:08:37 +03:00
/* Hypervisor specific versions, patched at boot time. */
2006-02-27 06:31:49 +03:00
__hypervisor_tlb_tl0_error :
save % s p , - 1 9 2 , % s p
mov % i 0 , % o 0
call h y p e r v i s o r _ t l b o p _ e r r o r
mov % i 1 , % o 1
ret
restore
__hypervisor_flush_tlb_mm : /* 10 insns */
2006-02-04 14:08:37 +03:00
mov % o 0 , % o 2 / * A R G 2 : m m u c o n t e x t * /
mov 0 , % o 0 / * A R G 0 : C P U l i s t s u n i m p l e m e n t e d * /
mov 0 , % o 1 / * A R G 1 : C P U l i s t s u n i m p l e m e n t e d * /
mov H V _ M M U _ A L L , % o 3 / * A R G 3 : f l a g s * /
mov H V _ F A S T _ M M U _ D E M A P _ C T X , % o 5
ta H V _ F A S T _ T R A P
2006-02-27 06:31:49 +03:00
brnz,p n % o 0 , _ _ h y p e r v i s o r _ t l b _ t l 0 _ e r r o r
mov H V _ F A S T _ M M U _ D E M A P _ C T X , % o 1
2006-02-04 14:08:37 +03:00
retl
nop
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-20 01:26:26 +04:00
__hypervisor_flush_tlb_page : /* 11 insns */
/* %o0 = context, %o1 = vaddr */
mov % o 0 , % g 2
mov % o 1 , % o 0 / * A R G 0 : v a d d r + I M M U - b i t * /
mov % g 2 , % o 1 / * A R G 1 : m m u c o n t e x t * /
mov H V _ M M U _ A L L , % o 2 / * A R G 2 : f l a g s * /
srlx % o 0 , P A G E _ S H I F T , % o 0
sllx % o 0 , P A G E _ S H I F T , % o 0
ta H V _ M M U _ U N M A P _ A D D R _ T R A P
brnz,p n % o 0 , _ _ h y p e r v i s o r _ t l b _ t l 0 _ e r r o r
mov H V _ M M U _ U N M A P _ A D D R _ T R A P , % o 1
retl
nop
2006-02-27 06:31:49 +03:00
__hypervisor_flush_tlb_pending : /* 16 insns */
2006-02-04 14:08:37 +03:00
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
sllx % o 1 , 3 , % g 1
mov % o 2 , % g 2
mov % o 0 , % g 3
1 : sub % g 1 , ( 1 < < 3 ) , % g 1
ldx [ % g 2 + % g 1 ] , % o 0 / * A R G 0 : v a d d r + I M M U - b i t * /
mov % g 3 , % o 1 / * A R G 1 : m m u c o n t e x t * /
2006-02-27 06:31:49 +03:00
mov H V _ M M U _ A L L , % o 2 / * A R G 2 : f l a g s * /
srlx % o 0 , P A G E _ S H I F T , % o 0
sllx % o 0 , P A G E _ S H I F T , % o 0
2006-02-04 14:08:37 +03:00
ta H V _ M M U _ U N M A P _ A D D R _ T R A P
2006-02-27 06:31:49 +03:00
brnz,p n % o 0 , _ _ h y p e r v i s o r _ t l b _ t l 0 _ e r r o r
mov H V _ M M U _ U N M A P _ A D D R _ T R A P , % o 1
2006-02-04 14:08:37 +03:00
brnz,p t % g 1 , 1 b
nop
retl
nop
2006-02-27 06:31:49 +03:00
__hypervisor_flush_tlb_kernel_range : /* 16 insns */
2006-02-04 14:08:37 +03:00
/* %o0=start, %o1=end */
cmp % o 0 , % o 1
be,p n % x c c , 2 f
sethi % h i ( P A G E _ S I Z E ) , % g 3
mov % o 0 , % g 1
sub % o 1 , % g 1 , % g 2
sub % g 2 , % g 3 , % g 2
1 : add % g 1 , % g 2 , % o 0 / * A R G 0 : v i r t u a l a d d r e s s * /
mov 0 , % o 1 / * A R G 1 : m m u c o n t e x t * /
mov H V _ M M U _ A L L , % o 2 / * A R G 2 : f l a g s * /
ta H V _ M M U _ U N M A P _ A D D R _ T R A P
2006-02-27 06:31:49 +03:00
brnz,p n % o 0 , _ _ h y p e r v i s o r _ t l b _ t l 0 _ e r r o r
mov H V _ M M U _ U N M A P _ A D D R _ T R A P , % o 1
2006-02-04 14:08:37 +03:00
brnz,p t % g 2 , 1 b
sub % g 2 , % g 3 , % g 2
2 : retl
nop
# ifdef D C A C H E _ A L I A S I N G _ P O S S I B L E
/ * XXX N i a g a r a a n d f r i e n d s h a v e a n 8 K c a c h e , s o n o a l i a s i n g i s
* XXX p o s s i b l e , b u t n o t h i n g e x p l i c i t i n t h e H y p e r v i s o r A P I
* XXX g u a r a n t e e s t h i s .
* /
__hypervisor_flush_dcache_page : /* 2 insns */
retl
nop
# endif
tlb_patch_one :
2005-04-17 02:20:36 +04:00
1 : lduw [ % o 1 ] , % g 1
stw % g 1 , [ % o 0 ]
flush % o 0
subcc % o 2 , 1 , % o 2
add % o 1 , 4 , % o 1
bne,p t % i c c , 1 b
add % o 0 , 4 , % o 0
retl
nop
.globl cheetah_patch_cachetlbops
cheetah_patch_cachetlbops :
save % s p , - 1 2 8 , % s p
sethi % h i ( _ _ f l u s h _ t l b _ m m ) , % o 0
or % o 0 , % l o ( _ _ f l u s h _ t l b _ m m ) , % o 0
sethi % h i ( _ _ c h e e t a h _ f l u s h _ t l b _ m m ) , % o 1
or % o 1 , % l o ( _ _ c h e e t a h _ f l u s h _ t l b _ m m ) , % o 1
2006-02-04 14:08:37 +03:00
call t l b _ p a t c h _ o n e
2006-02-01 05:33:00 +03:00
mov 1 9 , % o 2
2005-04-17 02:20:36 +04:00
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-20 01:26:26 +04:00
sethi % h i ( _ _ f l u s h _ t l b _ p a g e ) , % o 0
or % o 0 , % l o ( _ _ f l u s h _ t l b _ p a g e ) , % o 0
sethi % h i ( _ _ c h e e t a h _ f l u s h _ t l b _ p a g e ) , % o 1
or % o 1 , % l o ( _ _ c h e e t a h _ f l u s h _ t l b _ p a g e ) , % o 1
call t l b _ p a t c h _ o n e
mov 2 2 , % o 2
2005-04-17 02:20:36 +04:00
sethi % h i ( _ _ f l u s h _ t l b _ p e n d i n g ) , % o 0
or % o 0 , % l o ( _ _ f l u s h _ t l b _ p e n d i n g ) , % o 0
sethi % h i ( _ _ c h e e t a h _ f l u s h _ t l b _ p e n d i n g ) , % o 1
or % o 1 , % l o ( _ _ c h e e t a h _ f l u s h _ t l b _ p e n d i n g ) , % o 1
2006-02-04 14:08:37 +03:00
call t l b _ p a t c h _ o n e
2006-02-01 05:33:00 +03:00
mov 2 7 , % o 2
2005-04-17 02:20:36 +04:00
# ifdef D C A C H E _ A L I A S I N G _ P O S S I B L E
sethi % h i ( _ _ f l u s h _ d c a c h e _ p a g e ) , % o 0
or % o 0 , % l o ( _ _ f l u s h _ d c a c h e _ p a g e ) , % o 0
2005-09-27 03:06:03 +04:00
sethi % h i ( _ _ c h e e t a h _ f l u s h _ d c a c h e _ p a g e ) , % o 1
or % o 1 , % l o ( _ _ c h e e t a h _ f l u s h _ d c a c h e _ p a g e ) , % o 1
2006-02-04 14:08:37 +03:00
call t l b _ p a t c h _ o n e
2005-04-17 02:20:36 +04:00
mov 1 1 , % o 2
# endif / * D C A C H E _ A L I A S I N G _ P O S S I B L E * /
ret
restore
# ifdef C O N F I G _ S M P
/ * These a r e a l l c a l l e d b y t h e s l a v e s o f a c r o s s c a l l , a t
* trap l e v e l 1 , w i t h i n t e r r u p t s f u l l y d i s a b l e d .
*
* Register u s a g e :
* % g5 m m - > c o n t e x t ( a l l t l b f l u s h e s )
* % g1 a d d r e s s a r g 1 ( t l b p a g e a n d r a n g e f l u s h e s )
* % g7 a d d r e s s a r g 2 ( t l b r a n g e f l u s h o n l y )
*
2006-02-27 10:24:22 +03:00
* % g6 s c r a t c h 1
* % g2 s c r a t c h 2
* % g3 s c r a t c h 3
* % g4 s c r a t c h 4
2005-04-17 02:20:36 +04:00
* /
.align 32
.globl xcall_flush_tlb_mm
2006-02-27 06:31:49 +03:00
xcall_flush_tlb_mm : /* 21 insns */
2005-04-17 02:20:36 +04:00
mov P R I M A R Y _ C O N T E X T , % g 2
ldxa [ % g 2 ] A S I _ D M M U , % g 3
2005-08-31 07:21:34 +04:00
srlx % g 3 , C T X _ P G S Z 1 _ N U C _ S H I F T , % g 4
sllx % g 4 , C T X _ P G S Z 1 _ N U C _ S H I F T , % g 4
or % g 5 , % g 4 , % g 5 / * P r e s e r v e n u c l e u s p a g e s i z e f i e l d s * /
2005-04-17 02:20:36 +04:00
stxa % g 5 , [ % g 2 ] A S I _ D M M U
2005-08-31 07:21:34 +04:00
mov 0 x40 , % g 4
2005-04-17 02:20:36 +04:00
stxa % g 0 , [ % g 4 ] A S I _ D M M U _ D E M A P
stxa % g 0 , [ % g 4 ] A S I _ I M M U _ D E M A P
stxa % g 3 , [ % g 2 ] A S I _ D M M U
retry
2006-02-04 14:08:37 +03:00
nop
nop
nop
nop
nop
nop
nop
2006-02-27 06:31:49 +03:00
nop
nop
nop
2005-04-17 02:20:36 +04:00
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-20 01:26:26 +04:00
.globl xcall_flush_tlb_page
xcall_flush_tlb_page : /* 17 insns */
/* %g5=context, %g1=vaddr */
2005-04-17 02:20:36 +04:00
mov P R I M A R Y _ C O N T E X T , % g 4
ldxa [ % g 4 ] A S I _ D M M U , % g 2
2005-08-31 07:21:34 +04:00
srlx % g 2 , C T X _ P G S Z 1 _ N U C _ S H I F T , % g 4
sllx % g 4 , C T X _ P G S Z 1 _ N U C _ S H I F T , % g 4
or % g 5 , % g 4 , % g 5
mov P R I M A R Y _ C O N T E X T , % g 4
2005-04-17 02:20:36 +04:00
stxa % g 5 , [ % g 4 ] A S I _ D M M U
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-20 01:26:26 +04:00
andcc % g 1 , 0 x1 , % g 0
2005-04-17 02:20:36 +04:00
be,p n % i c c , 2 f
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-20 01:26:26 +04:00
andn % g 1 , 0 x1 , % g 5
2005-04-17 02:20:36 +04:00
stxa % g 0 , [ % g 5 ] A S I _ I M M U _ D E M A P
2 : stxa % g 0 , [ % g 5 ] A S I _ D M M U _ D E M A P
membar #S y n c
stxa % g 2 , [ % g 4 ] A S I _ D M M U
retry
2006-02-27 06:31:49 +03:00
nop
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-20 01:26:26 +04:00
nop
2005-04-17 02:20:36 +04:00
.globl xcall_flush_tlb_kernel_range
2006-02-27 06:31:49 +03:00
xcall_flush_tlb_kernel_range : /* 25 insns */
2005-04-17 02:20:36 +04:00
sethi % h i ( P A G E _ S I Z E - 1 ) , % g 2
or % g 2 , % l o ( P A G E _ S I Z E - 1 ) , % g 2
andn % g 1 , % g 2 , % g 1
andn % g 7 , % g 2 , % g 7
sub % g 7 , % g 1 , % g 3
add % g 2 , 1 , % g 2
sub % g 3 , % g 2 , % g 3
or % g 1 , 0 x20 , % g 1 ! N u c l e u s
1 : stxa % g 0 , [ % g 1 + % g 3 ] A S I _ D M M U _ D E M A P
stxa % g 0 , [ % g 1 + % g 3 ] A S I _ I M M U _ D E M A P
membar #S y n c
brnz,p t % g 3 , 1 b
sub % g 3 , % g 2 , % g 3
retry
nop
nop
2006-02-04 14:08:37 +03:00
nop
nop
nop
nop
nop
nop
2006-02-27 06:31:49 +03:00
nop
nop
nop
2005-04-17 02:20:36 +04:00
/ * This r u n s i n a v e r y c o n t r o l l e d e n v i r o n m e n t , s o w e d o
* not n e e d t o w o r r y a b o u t B H r a c e s e t c .
* /
.globl xcall_sync_tick
xcall_sync_tick :
2006-02-06 09:27:28 +03:00
661 : rdpr % p s t a t e , % g 2
2005-04-17 02:20:36 +04:00
wrpr % g 2 , P S T A T E _ I G | P S T A T E _ A G , % p s t a t e
2006-02-07 11:00:16 +03:00
.section .sun4v_2insn_patch , " ax"
2006-02-06 09:27:28 +03:00
.word 661b
nop
nop
.previous
2005-04-17 02:20:36 +04:00
rdpr % p i l , % g 2
2008-11-24 08:55:29 +03:00
wrpr % g 0 , P I L _ N O R M A L _ M A X , % p i l
2005-04-17 02:20:36 +04:00
sethi % h i ( 1 0 9 f ) , % g 7
b,p t % x c c , e t r a p _ i r q
109 : or % g 7 , % l o ( 1 0 9 b ) , % g 7
2006-11-17 00:38:57 +03:00
# ifdef C O N F I G _ T R A C E _ I R Q F L A G S
call t r a c e _ h a r d i r q s _ o f f
nop
# endif
2005-04-17 02:20:36 +04:00
call s m p _ s y n c h r o n i z e _ t i c k _ c l i e n t
nop
b r t r a p _ x c a l l
ldx [ % s p + P T R E G S _ O F F + P T _ V 9 _ T S T A T E ] , % l 1
2008-05-20 10:46:00 +04:00
.globl xcall_fetch_glob_regs
xcall_fetch_glob_regs :
2012-10-16 20:34:01 +04:00
sethi % h i ( g l o b a l _ c p u _ s n a p s h o t ) , % g 1
or % g 1 , % l o ( g l o b a l _ c p u _ s n a p s h o t ) , % g 1
2008-05-20 10:46:00 +04:00
_ _ GET_ C P U I D ( % g 2 )
sllx % g 2 , 6 , % g 3
add % g 1 , % g 3 , % g 1
rdpr % t s t a t e , % g 7
stx % g 7 , [ % g 1 + G R _ S N A P _ T S T A T E ]
rdpr % t p c , % g 7
stx % g 7 , [ % g 1 + G R _ S N A P _ T P C ]
rdpr % t n p c , % g 7
stx % g 7 , [ % g 1 + G R _ S N A P _ T N P C ]
stx % o 7 , [ % g 1 + G R _ S N A P _ O 7 ]
stx % i 7 , [ % g 1 + G R _ S N A P _ I 7 ]
2008-07-31 08:57:59 +04:00
/* Don't try this at home kids... */
2012-05-10 22:00:46 +04:00
rdpr % c w p , % g 3
sub % g 3 , 1 , % g 7
2008-07-31 08:57:59 +04:00
wrpr % g 7 , % c w p
mov % i 7 , % g 7
2012-05-10 22:00:46 +04:00
wrpr % g 3 , % c w p
2008-07-31 08:57:59 +04:00
stx % g 7 , [ % g 1 + G R _ S N A P _ R P C ]
2008-05-20 10:46:00 +04:00
sethi % h i ( t r a p _ b l o c k ) , % g 7
or % g 7 , % l o ( t r a p _ b l o c k ) , % g 7
sllx % g 2 , T R A P _ B L O C K _ S Z _ S H I F T , % g 2
add % g 7 , % g 2 , % g 7
ldx [ % g 7 + T R A P _ P E R _ C P U _ T H R E A D ] , % g 3
stx % g 3 , [ % g 1 + G R _ S N A P _ T H R E A D ]
retry
2012-10-16 20:34:01 +04:00
.globl xcall_fetch_glob_pmu
xcall_fetch_glob_pmu :
sethi % h i ( g l o b a l _ c p u _ s n a p s h o t ) , % g 1
or % g 1 , % l o ( g l o b a l _ c p u _ s n a p s h o t ) , % g 1
_ _ GET_ C P U I D ( % g 2 )
sllx % g 2 , 6 , % g 3
add % g 1 , % g 3 , % g 1
rd % p i c , % g 7
stx % g 7 , [ % g 1 + ( 4 * 8 ) ]
rd % p c r , % g 7
stx % g 7 , [ % g 1 + ( 0 * 8 ) ]
retry
.globl xcall_fetch_glob_pmu_n4
xcall_fetch_glob_pmu_n4 :
sethi % h i ( g l o b a l _ c p u _ s n a p s h o t ) , % g 1
or % g 1 , % l o ( g l o b a l _ c p u _ s n a p s h o t ) , % g 1
_ _ GET_ C P U I D ( % g 2 )
sllx % g 2 , 6 , % g 3
add % g 1 , % g 3 , % g 1
ldxa [ % g 0 ] A S I _ P I C , % g 7
stx % g 7 , [ % g 1 + ( 4 * 8 ) ]
mov 0 x08 , % g 3
ldxa [ % g 3 ] A S I _ P I C , % g 7
stx % g 7 , [ % g 1 + ( 5 * 8 ) ]
mov 0 x10 , % g 3
ldxa [ % g 3 ] A S I _ P I C , % g 7
stx % g 7 , [ % g 1 + ( 6 * 8 ) ]
mov 0 x18 , % g 3
ldxa [ % g 3 ] A S I _ P I C , % g 7
stx % g 7 , [ % g 1 + ( 7 * 8 ) ]
mov % o 0 , % g 2
mov % o 1 , % g 3
mov % o 5 , % g 7
mov H V _ F A S T _ V T _ G E T _ P E R F R E G , % o 5
mov 3 , % o 0
ta H V _ F A S T _ T R A P
stx % o 1 , [ % g 1 + ( 3 * 8 ) ]
mov H V _ F A S T _ V T _ G E T _ P E R F R E G , % o 5
mov 2 , % o 0
ta H V _ F A S T _ T R A P
stx % o 1 , [ % g 1 + ( 2 * 8 ) ]
mov H V _ F A S T _ V T _ G E T _ P E R F R E G , % o 5
mov 1 , % o 0
ta H V _ F A S T _ T R A P
stx % o 1 , [ % g 1 + ( 1 * 8 ) ]
mov H V _ F A S T _ V T _ G E T _ P E R F R E G , % o 5
mov 0 , % o 0
ta H V _ F A S T _ T R A P
stx % o 1 , [ % g 1 + ( 0 * 8 ) ]
mov % g 2 , % o 0
mov % g 3 , % o 1
mov % g 7 , % o 5
retry
2005-04-17 02:20:36 +04:00
# ifdef D C A C H E _ A L I A S I N G _ P O S S I B L E
.align 32
.globl xcall_flush_dcache_page_cheetah
xcall_flush_dcache_page_cheetah : /* %g1 == physical page address */
sethi % h i ( P A G E _ S I Z E ) , % g 3
1 : subcc % g 3 , ( 1 < < 5 ) , % g 3
stxa % g 0 , [ % g 1 + % g 3 ] A S I _ D C A C H E _ I N V A L I D A T E
membar #S y n c
bne,p t % i c c , 1 b
nop
retry
nop
# endif / * D C A C H E _ A L I A S I N G _ P O S S I B L E * /
.globl xcall_flush_dcache_page_spitfire
xcall_flush_dcache_page_spitfire : / * % g1 = = p h y s i c a l p a g e a d d r e s s
% g7 = = k e r n e l p a g e v i r t u a l a d d r e s s
% g5 = = ( p a g e - > m a p p i n g ! = N U L L ) * /
# ifdef D C A C H E _ A L I A S I N G _ P O S S I B L E
srlx % g 1 , ( 1 3 - 2 ) , % g 1 ! F o r m t a g c o m p a r i t o r
sethi % h i ( L 1 D C A C H E _ S I Z E ) , % g 3 ! D $ s i z e = = 1 6 K
sub % g 3 , ( 1 < < 5 ) , % g 3 ! D $ l i n e s i z e = = 3 2
1 : ldxa [ % g 3 ] A S I _ D C A C H E _ T A G , % g 2
andcc % g 2 , 0 x3 , % g 0
be,p n % x c c , 2 f
andn % g 2 , 0 x3 , % g 2
cmp % g 2 , % g 1
bne,p t % x c c , 2 f
nop
stxa % g 0 , [ % g 3 ] A S I _ D C A C H E _ T A G
membar #S y n c
2 : cmp % g 3 , 0
bne,p t % x c c , 1 b
sub % g 3 , ( 1 < < 5 ) , % g 3
brz,p n % g 5 , 2 f
# endif / * D C A C H E _ A L I A S I N G _ P O S S I B L E * /
sethi % h i ( P A G E _ S I Z E ) , % g 3
1 : flush % g 7
subcc % g 3 , ( 1 < < 5 ) , % g 3
bne,p t % i c c , 1 b
add % g 7 , ( 1 < < 5 ) , % g 7
2 : retry
nop
nop
2006-02-27 06:31:49 +03:00
/ * % g5 : error
* % g6 : tlb o p
* /
__hypervisor_tlb_xcall_error :
mov % g 5 , % g 4
mov % g 6 , % g 5
ba,p t % x c c , e t r a p
rd % p c , % g 7
mov % l 4 , % o 0
call h y p e r v i s o r _ t l b o p _ e r r o r _ x c a l l
mov % l 5 , % o 1
2008-04-24 14:15:22 +04:00
ba,a ,p t % x c c , r t r a p
2006-02-27 06:31:49 +03:00
2006-02-04 14:08:37 +03:00
.globl __hypervisor_xcall_flush_tlb_mm
2006-02-27 06:31:49 +03:00
__hypervisor_xcall_flush_tlb_mm : /* 21 insns */
2006-02-04 14:08:37 +03:00
/* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
mov % o 0 , % g 2
mov % o 1 , % g 3
mov % o 2 , % g 4
mov % o 3 , % g 1
mov % o 5 , % g 7
clr % o 0 / * A R G 0 : C P U l i s t s u n i m p l e m e n t e d * /
clr % o 1 / * A R G 1 : C P U l i s t s u n i m p l e m e n t e d * /
mov % g 5 , % o 2 / * A R G 2 : m m u c o n t e x t * /
mov H V _ M M U _ A L L , % o 3 / * A R G 3 : f l a g s * /
mov H V _ F A S T _ M M U _ D E M A P _ C T X , % o 5
ta H V _ F A S T _ T R A P
2006-02-27 06:31:49 +03:00
mov H V _ F A S T _ M M U _ D E M A P _ C T X , % g 6
brnz,p n % o 0 , _ _ h y p e r v i s o r _ t l b _ x c a l l _ e r r o r
mov % o 0 , % g 5
2006-02-04 14:08:37 +03:00
mov % g 2 , % o 0
mov % g 3 , % o 1
mov % g 4 , % o 2
mov % g 1 , % o 3
mov % g 7 , % o 5
membar #S y n c
retry
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-20 01:26:26 +04:00
.globl __hypervisor_xcall_flush_tlb_page
__hypervisor_xcall_flush_tlb_page : /* 17 insns */
/* %g5=ctx, %g1=vaddr */
2006-02-04 14:08:37 +03:00
mov % o 0 , % g 2
mov % o 1 , % g 3
mov % o 2 , % g 4
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-20 01:26:26 +04:00
mov % g 1 , % o 0 / * A R G 0 : v i r t u a l a d d r e s s * /
2006-02-04 14:08:37 +03:00
mov % g 5 , % o 1 / * A R G 1 : m m u c o n t e x t * /
2006-02-27 06:31:49 +03:00
mov H V _ M M U _ A L L , % o 2 / * A R G 2 : f l a g s * /
srlx % o 0 , P A G E _ S H I F T , % o 0
sllx % o 0 , P A G E _ S H I F T , % o 0
2006-02-04 14:08:37 +03:00
ta H V _ M M U _ U N M A P _ A D D R _ T R A P
2006-02-27 06:31:49 +03:00
mov H V _ M M U _ U N M A P _ A D D R _ T R A P , % g 6
brnz,a ,p n % o 0 , _ _ h y p e r v i s o r _ t l b _ x c a l l _ e r r o r
mov % o 0 , % g 5
2006-02-04 14:08:37 +03:00
mov % g 2 , % o 0
mov % g 3 , % o 1
mov % g 4 , % o 2
membar #S y n c
retry
.globl __hypervisor_xcall_flush_tlb_kernel_range
2006-02-27 06:31:49 +03:00
__hypervisor_xcall_flush_tlb_kernel_range : /* 25 insns */
/* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
2006-02-04 14:08:37 +03:00
sethi % h i ( P A G E _ S I Z E - 1 ) , % g 2
or % g 2 , % l o ( P A G E _ S I Z E - 1 ) , % g 2
andn % g 1 , % g 2 , % g 1
andn % g 7 , % g 2 , % g 7
sub % g 7 , % g 1 , % g 3
add % g 2 , 1 , % g 2
sub % g 3 , % g 2 , % g 3
mov % o 0 , % g 2
mov % o 1 , % g 4
2006-02-27 06:31:49 +03:00
mov % o 2 , % g 7
2006-02-04 14:08:37 +03:00
1 : add % g 1 , % g 3 , % o 0 / * A R G 0 : v i r t u a l a d d r e s s * /
mov 0 , % o 1 / * A R G 1 : m m u c o n t e x t * /
mov H V _ M M U _ A L L , % o 2 / * A R G 2 : f l a g s * /
ta H V _ M M U _ U N M A P _ A D D R _ T R A P
2006-02-27 06:31:49 +03:00
mov H V _ M M U _ U N M A P _ A D D R _ T R A P , % g 6
brnz,p n % o 0 , _ _ h y p e r v i s o r _ t l b _ x c a l l _ e r r o r
mov % o 0 , % g 5
2006-02-04 14:08:37 +03:00
sethi % h i ( P A G E _ S I Z E ) , % o 2
brnz,p t % g 3 , 1 b
sub % g 3 , % o 2 , % g 3
mov % g 2 , % o 0
mov % g 4 , % o 1
2006-02-27 06:31:49 +03:00
mov % g 7 , % o 2
2006-02-04 14:08:37 +03:00
membar #S y n c
retry
2005-04-17 02:20:36 +04:00
/* These just get rescheduled to PIL vectors. */
.globl xcall_call_function
xcall_call_function :
wr % g 0 , ( 1 < < P I L _ S M P _ C A L L _ F U N C ) , % s e t _ s o f t i n t
retry
2008-07-18 10:44:50 +04:00
.globl xcall_call_function_single
xcall_call_function_single :
wr % g 0 , ( 1 < < P I L _ S M P _ C A L L _ F U N C _ S N G L ) , % s e t _ s o f t i n t
retry
2005-04-17 02:20:36 +04:00
.globl xcall_receive_signal
xcall_receive_signal :
wr % g 0 , ( 1 < < P I L _ S M P _ R E C E I V E _ S I G N A L ) , % s e t _ s o f t i n t
retry
.globl xcall_capture
xcall_capture :
wr % g 0 , ( 1 < < P I L _ S M P _ C A P T U R E ) , % s e t _ s o f t i n t
retry
2006-03-07 09:50:44 +03:00
.globl xcall_new_mmu_context_version
xcall_new_mmu_context_version :
wr % g 0 , ( 1 < < P I L _ S M P _ C T X _ N E W _ V E R S I O N ) , % s e t _ s o f t i n t
retry
2008-04-29 13:38:50 +04:00
# ifdef C O N F I G _ K G D B
.globl xcall_kgdb_capture
xcall_kgdb_capture :
2009-03-19 09:51:57 +03:00
wr % g 0 , ( 1 < < P I L _ K G D B _ C A P T U R E ) , % s e t _ s o f t i n t
retry
2008-04-29 13:38:50 +04:00
# endif
2005-04-17 02:20:36 +04:00
# endif / * C O N F I G _ S M P * /
2006-02-04 14:08:37 +03:00
.globl hypervisor_patch_cachetlbops
hypervisor_patch_cachetlbops :
save % s p , - 1 2 8 , % s p
sethi % h i ( _ _ f l u s h _ t l b _ m m ) , % o 0
or % o 0 , % l o ( _ _ f l u s h _ t l b _ m m ) , % o 0
sethi % h i ( _ _ h y p e r v i s o r _ f l u s h _ t l b _ m m ) , % o 1
or % o 1 , % l o ( _ _ h y p e r v i s o r _ f l u s h _ t l b _ m m ) , % o 1
call t l b _ p a t c h _ o n e
2006-02-27 06:31:49 +03:00
mov 1 0 , % o 2
2006-02-04 14:08:37 +03:00
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-20 01:26:26 +04:00
sethi % h i ( _ _ f l u s h _ t l b _ p a g e ) , % o 0
or % o 0 , % l o ( _ _ f l u s h _ t l b _ p a g e ) , % o 0
sethi % h i ( _ _ h y p e r v i s o r _ f l u s h _ t l b _ p a g e ) , % o 1
or % o 1 , % l o ( _ _ h y p e r v i s o r _ f l u s h _ t l b _ p a g e ) , % o 1
call t l b _ p a t c h _ o n e
mov 1 1 , % o 2
2006-02-04 14:08:37 +03:00
sethi % h i ( _ _ f l u s h _ t l b _ p e n d i n g ) , % o 0
or % o 0 , % l o ( _ _ f l u s h _ t l b _ p e n d i n g ) , % o 0
sethi % h i ( _ _ h y p e r v i s o r _ f l u s h _ t l b _ p e n d i n g ) , % o 1
or % o 1 , % l o ( _ _ h y p e r v i s o r _ f l u s h _ t l b _ p e n d i n g ) , % o 1
call t l b _ p a t c h _ o n e
2006-02-27 06:31:49 +03:00
mov 1 6 , % o 2
2006-02-04 14:08:37 +03:00
sethi % h i ( _ _ f l u s h _ t l b _ k e r n e l _ r a n g e ) , % o 0
or % o 0 , % l o ( _ _ f l u s h _ t l b _ k e r n e l _ r a n g e ) , % o 0
sethi % h i ( _ _ h y p e r v i s o r _ f l u s h _ t l b _ k e r n e l _ r a n g e ) , % o 1
or % o 1 , % l o ( _ _ h y p e r v i s o r _ f l u s h _ t l b _ k e r n e l _ r a n g e ) , % o 1
call t l b _ p a t c h _ o n e
2006-02-27 06:31:49 +03:00
mov 1 6 , % o 2
2006-02-04 14:08:37 +03:00
# ifdef D C A C H E _ A L I A S I N G _ P O S S I B L E
sethi % h i ( _ _ f l u s h _ d c a c h e _ p a g e ) , % o 0
or % o 0 , % l o ( _ _ f l u s h _ d c a c h e _ p a g e ) , % o 0
sethi % h i ( _ _ h y p e r v i s o r _ f l u s h _ d c a c h e _ p a g e ) , % o 1
or % o 1 , % l o ( _ _ h y p e r v i s o r _ f l u s h _ d c a c h e _ p a g e ) , % o 1
call t l b _ p a t c h _ o n e
mov 2 , % o 2
# endif / * D C A C H E _ A L I A S I N G _ P O S S I B L E * /
# ifdef C O N F I G _ S M P
sethi % h i ( x c a l l _ f l u s h _ t l b _ m m ) , % o 0
or % o 0 , % l o ( x c a l l _ f l u s h _ t l b _ m m ) , % o 0
sethi % h i ( _ _ h y p e r v i s o r _ x c a l l _ f l u s h _ t l b _ m m ) , % o 1
or % o 1 , % l o ( _ _ h y p e r v i s o r _ x c a l l _ f l u s h _ t l b _ m m ) , % o 1
call t l b _ p a t c h _ o n e
2006-02-27 06:31:49 +03:00
mov 2 1 , % o 2
2006-02-04 14:08:37 +03:00
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-20 01:26:26 +04:00
sethi % h i ( x c a l l _ f l u s h _ t l b _ p a g e ) , % o 0
or % o 0 , % l o ( x c a l l _ f l u s h _ t l b _ p a g e ) , % o 0
sethi % h i ( _ _ h y p e r v i s o r _ x c a l l _ f l u s h _ t l b _ p a g e ) , % o 1
or % o 1 , % l o ( _ _ h y p e r v i s o r _ x c a l l _ f l u s h _ t l b _ p a g e ) , % o 1
2006-02-04 14:08:37 +03:00
call t l b _ p a t c h _ o n e
sparc64: Fix race in TLB batch processing.
As reported by Dave Kleikamp, when we emit cross calls to do batched
TLB flush processing we have a race because we do not synchronize on
the sibling cpus completing the cross call.
So meanwhile the TLB batch can be reset (tb->tlb_nr set to zero, etc.)
and either flushes are missed or flushes will flush the wrong
addresses.
Fix this by using generic infrastructure to synchonize on the
completion of the cross call.
This first required getting the flush_tlb_pending() call out from
switch_to() which operates with locks held and interrupts disabled.
The problem is that smp_call_function_many() cannot be invoked with
IRQs disabled and this is explicitly checked for with WARN_ON_ONCE().
We get the batch processing outside of locked IRQ disabled sections by
using some ideas from the powerpc port. Namely, we only batch inside
of arch_{enter,leave}_lazy_mmu_mode() calls. If we're not in such a
region, we flush TLBs synchronously.
1) Get rid of xcall_flush_tlb_pending and per-cpu type
implementations.
2) Do TLB batch cross calls instead via:
smp_call_function_many()
tlb_pending_func()
__flush_tlb_pending()
3) Batch only in lazy mmu sequences:
a) Add 'active' member to struct tlb_batch
b) Define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
c) Set 'active' in arch_enter_lazy_mmu_mode()
d) Run batch and clear 'active' in arch_leave_lazy_mmu_mode()
e) Check 'active' in tlb_batch_add_one() and do a synchronous
flush if it's clear.
4) Add infrastructure for synchronous TLB page flushes.
a) Implement __flush_tlb_page and per-cpu variants, patch
as needed.
b) Likewise for xcall_flush_tlb_page.
c) Implement smp_flush_tlb_page() to invoke the cross-call.
d) Wire up global_flush_tlb_page() to the right routine based
upon CONFIG_SMP
5) It turns out that singleton batches are very common, 2 out of every
3 batch flushes have only a single entry in them.
The batch flush waiting is very expensive, both because of the poll
on sibling cpu completeion, as well as because passing the tlb batch
pointer to the sibling cpus invokes a shared memory dereference.
Therefore, in flush_tlb_pending(), if there is only one entry in
the batch perform a completely asynchronous global_flush_tlb_page()
instead.
Reported-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
2013-04-20 01:26:26 +04:00
mov 1 7 , % o 2
2006-02-04 14:08:37 +03:00
sethi % h i ( x c a l l _ f l u s h _ t l b _ k e r n e l _ r a n g e ) , % o 0
or % o 0 , % l o ( x c a l l _ f l u s h _ t l b _ k e r n e l _ r a n g e ) , % o 0
sethi % h i ( _ _ h y p e r v i s o r _ x c a l l _ f l u s h _ t l b _ k e r n e l _ r a n g e ) , % o 1
or % o 1 , % l o ( _ _ h y p e r v i s o r _ x c a l l _ f l u s h _ t l b _ k e r n e l _ r a n g e ) , % o 1
call t l b _ p a t c h _ o n e
2006-02-27 06:31:49 +03:00
mov 2 5 , % o 2
2006-02-04 14:08:37 +03:00
# endif / * C O N F I G _ S M P * /
ret
restore