2016-08-30 17:30:02 +01:00
/ *
* linux/ a r c h / a r m / m m / c a c h e - v7 m . S
*
* Based o n l i n u x / a r c h / a r m / m m / c a c h e - v7 . S
*
* Copyright ( C ) 2 0 0 1 D e e p B l u e S o l u t i o n s L t d .
* Copyright ( C ) 2 0 0 5 A R M L t d .
*
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or modify
* it u n d e r t h e t e r m s o f t h e G N U G e n e r a l P u b l i c L i c e n s e v e r s i o n 2 a s
* published b y t h e F r e e S o f t w a r e F o u n d a t i o n .
*
* This i s t h e " s h e l l " o f t h e A R M v7 M p r o c e s s o r s u p p o r t .
* /
# include < l i n u x / l i n k a g e . h >
# include < l i n u x / i n i t . h >
# include < a s m / a s s e m b l e r . h >
# include < a s m / e r r n o . h >
# include < a s m / u n w i n d . h >
# include < a s m / v7 m . h >
# include " p r o c - m a c r o s . S "
/* Generic V7M read/write macros for memory mapped cache operations */
.macro v7 m _ c a c h e _ r e a d , r t , r e g
movw \ r t , #: l o w e r 16 : B A S E A D D R _ V 7 M _ S C B + \ r e g
movt \ r t , #: u p p e r 16 : B A S E A D D R _ V 7 M _ S C B + \ r e g
ldr \ r t , [ \ r t ]
.endm
.macro v7 m _ c a c h e o p , r t , t m p , o p , c = a l
movw\ c \ t m p , #: l o w e r 16 : B A S E A D D R _ V 7 M _ S C B + \ o p
movt\ c \ t m p , #: u p p e r 16 : B A S E A D D R _ V 7 M _ S C B + \ o p
str\ c \ r t , [ \ t m p ]
.endm
.macro read_ c c s i d r , r t
v7 m _ c a c h e _ r e a d \ r t , V 7 M _ S C B _ C C S I D R
.endm
.macro read_ c l i d r , r t
v7 m _ c a c h e _ r e a d \ r t , V 7 M _ S C B _ C L I D R
.endm
.macro write_ c s s e l r , r t , t m p
v7 m _ c a c h e o p \ r t , \ t m p , V 7 M _ S C B _ C S S E L R
.endm
/ *
* dcisw : Invalidate d a t a c a c h e b y s e t / w a y
* /
.macro dcisw, r t , t m p
v7 m _ c a c h e o p \ r t , \ t m p , V 7 M _ S C B _ D C I S W
.endm
/ *
* dccisw : Clean a n d i n v a l i d a t e d a t a c a c h e b y s e t / w a y
* /
.macro dccisw, r t , t m p
v7 m _ c a c h e o p \ r t , \ t m p , V 7 M _ S C B _ D C C I S W
.endm
/ *
* dccimvac : Clean a n d i n v a l i d a t e d a t a c a c h e l i n e b y M V A t o P o C .
* /
.irp c,,e q ,n e ,c s ,c c ,m i ,p l ,v s ,v c ,h i ,l s ,g e ,l t ,g t ,l e ,h s ,l o
.macro dccimvac\ c , r t , t m p
v7 m _ c a c h e o p \ r t , \ t m p , V 7 M _ S C B _ D C C I M V A C , \ c
.endm
.endr
/ *
* dcimvac : Invalidate d a t a c a c h e l i n e b y M V A t o P o C
* /
.macro dcimvac, r t , t m p
v7 m _ c a c h e o p \ r t , \ t m p , V 7 M _ S C B _ D C I M V A C
.endm
/ *
* dccmvau : Clean d a t a c a c h e l i n e b y M V A t o P o U
* /
.macro dccmvau, r t , t m p
v7 m _ c a c h e o p \ r t , \ t m p , V 7 M _ S C B _ D C C M V A U
.endm
/ *
* dccmvac : Clean d a t a c a c h e l i n e b y M V A t o P o C
* /
.macro dccmvac, r t , t m p
v7 m _ c a c h e o p \ r t , \ t m p , V 7 M _ S C B _ D C C M V A C
.endm
/ *
* icimvau : Invalidate i n s t r u c t i o n c a c h e s b y M V A t o P o U
* /
.macro icimvau, r t , t m p
v7 m _ c a c h e o p \ r t , \ t m p , V 7 M _ S C B _ I C I M V A U
.endm
/ *
* Invalidate t h e i c a c h e , i n n e r s h a r e a b l e i f S M P , i n v a l i d a t e B T B f o r U P .
* rt d a t a i g n o r e d b y I C I A L L U ( I S ) , s o c a n b e u s e d f o r t h e a d d r e s s
* /
.macro invalidate_ i c a c h e , r t
v7 m _ c a c h e o p \ r t , \ r t , V 7 M _ S C B _ I C I A L L U
mov \ r t , #0
.endm
/ *
* Invalidate t h e B T B , i n n e r s h a r e a b l e i f S M P .
* rt d a t a i g n o r e d b y B P I A L L , s o i t c a n b e u s e d f o r t h e a d d r e s s
* /
.macro invalidate_ b p , r t
v7 m _ c a c h e o p \ r t , \ r t , V 7 M _ S C B _ B P I A L L
mov \ r t , #0
.endm
ENTRY( v7 m _ i n v a l i d a t e _ l 1 )
mov r0 , #0
write_ c s s e l r r0 , r1
read_ c c s i d r r0
movw r1 , #0x7fff
and r2 , r1 , r0 , l s r #13
movw r1 , #0x3ff
and r3 , r1 , r0 , l s r #3 @ NumWays - 1
add r2 , r2 , #1 @ NumSets
and r0 , r0 , #0x7
add r0 , r0 , #4 @ SetShift
clz r1 , r3 @ WayShift
add r4 , r3 , #1 @ NumWays
1 : sub r2 , r2 , #1 @ NumSets--
mov r3 , r4 @ Temp = NumWays
2 : subs r3 , r3 , #1 @ Temp--
mov r5 , r3 , l s l r1
mov r6 , r2 , l s l r0
orr r5 , r5 , r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
dcisw r5 , r6
bgt 2 b
cmp r2 , #0
bgt 1 b
dsb s t
isb
ret l r
ENDPROC( v7 m _ i n v a l i d a t e _ l 1 )
/ *
* v7 m _ f l u s h _ i c a c h e _ a l l ( )
*
* Flush t h e w h o l e I - c a c h e .
*
* Registers :
* r0 - s e t t o 0
* /
ENTRY( v7 m _ f l u s h _ i c a c h e _ a l l )
invalidate_ i c a c h e r0
ret l r
ENDPROC( v7 m _ f l u s h _ i c a c h e _ a l l )
/ *
* v7 m _ f l u s h _ d c a c h e _ a l l ( )
*
* Flush t h e w h o l e D - c a c h e .
*
* Corrupted r e g i s t e r s : r0 - r7 , r9 - r11
* /
ENTRY( v7 m _ f l u s h _ d c a c h e _ a l l )
dmb @ ensure ordering with previous memory accesses
read_ c l i d r r0
mov r3 , r0 , l s r #23 @ move LoC into position
ands r3 , r3 , #7 < < 1 @ extract LoC*2 from clidr
beq f i n i s h e d @ if loc is 0, then no need to clean
start_flush_levels :
mov r10 , #0 @ start clean at cache level 0
flush_levels :
add r2 , r10 , r10 , l s r #1 @ work out 3x current cache level
mov r1 , r0 , l s r r2 @ extract cache type bits from clidr
and r1 , r1 , #7 @ mask of the bits for current cache only
cmp r1 , #2 @ see what cache we have at this level
blt s k i p @ skip if no cache, or just i-cache
# ifdef C O N F I G _ P R E E M P T
save_ a n d _ d i s a b l e _ i r q s _ n o t r a c e r9 @ make cssr&csidr read atomic
# endif
write_ c s s e l r r10 , r1 @ set current cache level
isb @ isb to sych the new cssr&csidr
read_ c c s i d r r1 @ read the new csidr
# ifdef C O N F I G _ P R E E M P T
restore_ i r q s _ n o t r a c e r9
# endif
and r2 , r1 , #7 @ extract the length of the cache lines
add r2 , r2 , #4 @ add 4 (line length offset)
movw r4 , #0x3ff
ands r4 , r4 , r1 , l s r #3 @ find maximum number on the way size
clz r5 , r4 @ find bit position of way size increment
movw r7 , #0x7fff
ands r7 , r7 , r1 , l s r #13 @ extract max number of the index size
loop1 :
mov r9 , r7 @ create working copy of max index
loop2 :
lsl r6 , r4 , r5
orr r11 , r10 , r6 @ factor way and cache number into r11
lsl r6 , r9 , r2
orr r11 , r11 , r6 @ factor index number into r11
dccisw r11 , r6 @ clean/invalidate by set/way
subs r9 , r9 , #1 @ decrement the index
bge l o o p2
subs r4 , r4 , #1 @ decrement the way
bge l o o p1
skip :
add r10 , r10 , #2 @ increment cache number
cmp r3 , r10
bgt f l u s h _ l e v e l s
finished :
2017-02-27 14:28:41 -08:00
mov r10 , #0 @ switch back to cache level 0
2016-08-30 17:30:02 +01:00
write_ c s s e l r r10 , r3 @ select current cache level in cssr
dsb s t
isb
ret l r
ENDPROC( v7 m _ f l u s h _ d c a c h e _ a l l )
/ *
* v7 m _ f l u s h _ c a c h e _ a l l ( )
*
* Flush t h e e n t i r e c a c h e s y s t e m .
* The d a t a c a c h e f l u s h i s n o w a c h i e v e d u s i n g a t o m i c c l e a n / i n v a l i d a t e s
* working o u t w a r d s f r o m L 1 c a c h e . T h i s i s d o n e u s i n g S e t / W a y b a s e d c a c h e
* maintenance i n s t r u c t i o n s .
* The i n s t r u c t i o n c a c h e c a n s t i l l b e i n v a l i d a t e d b a c k t o t h e p o i n t o f
* unification i n a s i n g l e i n s t r u c t i o n .
*
* /
ENTRY( v7 m _ f l u s h _ k e r n _ c a c h e _ a l l )
stmfd s p ! , { r4 - r7 , r9 - r11 , l r }
bl v7 m _ f l u s h _ d c a c h e _ a l l
invalidate_ i c a c h e r0
ldmfd s p ! , { r4 - r7 , r9 - r11 , l r }
ret l r
ENDPROC( v7 m _ f l u s h _ k e r n _ c a c h e _ a l l )
/ *
* v7 m _ f l u s h _ c a c h e _ a l l ( )
*
* Flush a l l T L B e n t r i e s i n a p a r t i c u l a r a d d r e s s s p a c e
*
* - mm - m m _ s t r u c t d e s c r i b i n g a d d r e s s s p a c e
* /
ENTRY( v7 m _ f l u s h _ u s e r _ c a c h e _ a l l )
/*FALLTHROUGH*/
/ *
* v7 m _ f l u s h _ c a c h e _ r a n g e ( s t a r t , e n d , f l a g s )
*
* Flush a r a n g e o f T L B e n t r i e s i n t h e s p e c i f i e d a d d r e s s s p a c e .
*
* - start - s t a r t a d d r e s s ( m a y n o t b e a l i g n e d )
* - end - e n d a d d r e s s ( e x c l u s i v e , m a y n o t b e a l i g n e d )
* - flags - v m _ a r e a _ s t r u c t f l a g s d e s c r i b i n g a d d r e s s s p a c e
*
* It i s a s s u m e d t h a t :
* - we h a v e a V I P T c a c h e .
* /
ENTRY( v7 m _ f l u s h _ u s e r _ c a c h e _ r a n g e )
ret l r
ENDPROC( v7 m _ f l u s h _ u s e r _ c a c h e _ a l l )
ENDPROC( v7 m _ f l u s h _ u s e r _ c a c h e _ r a n g e )
/ *
* v7 m _ c o h e r e n t _ k e r n _ r a n g e ( s t a r t ,e n d )
*
* Ensure t h a t t h e I a n d D c a c h e s a r e c o h e r e n t w i t h i n s p e c i f i e d
* region. T h i s i s t y p i c a l l y u s e d w h e n c o d e h a s b e e n w r i t t e n t o
* a m e m o r y r e g i o n , a n d w i l l b e e x e c u t e d .
*
* - start - v i r t u a l s t a r t a d d r e s s o f r e g i o n
* - end - v i r t u a l e n d a d d r e s s o f r e g i o n
*
* It i s a s s u m e d t h a t :
* - the I c a c h e d o e s n o t r e a d d a t a f r o m t h e w r i t e b u f f e r
* /
ENTRY( v7 m _ c o h e r e n t _ k e r n _ r a n g e )
/* FALLTHROUGH */
/ *
* v7 m _ c o h e r e n t _ u s e r _ r a n g e ( s t a r t ,e n d )
*
* Ensure t h a t t h e I a n d D c a c h e s a r e c o h e r e n t w i t h i n s p e c i f i e d
* region. T h i s i s t y p i c a l l y u s e d w h e n c o d e h a s b e e n w r i t t e n t o
* a m e m o r y r e g i o n , a n d w i l l b e e x e c u t e d .
*
* - start - v i r t u a l s t a r t a d d r e s s o f r e g i o n
* - end - v i r t u a l e n d a d d r e s s o f r e g i o n
*
* It i s a s s u m e d t h a t :
* - the I c a c h e d o e s n o t r e a d d a t a f r o m t h e w r i t e b u f f e r
* /
ENTRY( v7 m _ c o h e r e n t _ u s e r _ r a n g e )
UNWIND( . f n s t a r t )
dcache_ l i n e _ s i z e r2 , r3
sub r3 , r2 , #1
bic r12 , r0 , r3
1 :
/ *
* We u s e o p e n c o d e d v e r s i o n o f d c c m v a u o t h e r w i s e U S E R ( ) w o u l d
* point a t m o v w i n s t r u c t i o n .
* /
dccmvau r12 , r3
add r12 , r12 , r2
cmp r12 , r1
blo 1 b
dsb i s h s t
icache_ l i n e _ s i z e r2 , r3
sub r3 , r2 , #1
bic r12 , r0 , r3
2 :
icimvau r12 , r3
add r12 , r12 , r2
cmp r12 , r1
blo 2 b
invalidate_ b p r0
dsb i s h s t
isb
ret l r
UNWIND( . f n e n d )
ENDPROC( v7 m _ c o h e r e n t _ k e r n _ r a n g e )
ENDPROC( v7 m _ c o h e r e n t _ u s e r _ r a n g e )
/ *
* v7 m _ f l u s h _ k e r n _ d c a c h e _ a r e a ( v o i d * a d d r , s i z e _ t s i z e )
*
* Ensure t h a t t h e d a t a h e l d i n t h e p a g e k a d d r i s w r i t t e n b a c k
* to t h e p a g e i n q u e s t i o n .
*
* - addr - k e r n e l a d d r e s s
* - size - r e g i o n s i z e
* /
ENTRY( v7 m _ f l u s h _ k e r n _ d c a c h e _ a r e a )
dcache_ l i n e _ s i z e r2 , r3
add r1 , r0 , r1
sub r3 , r2 , #1
bic r0 , r0 , r3
1 :
dccimvac r0 , r3 @ clean & invalidate D line / unified line
add r0 , r0 , r2
cmp r0 , r1
blo 1 b
dsb s t
ret l r
ENDPROC( v7 m _ f l u s h _ k e r n _ d c a c h e _ a r e a )
/ *
* v7 m _ d m a _ i n v _ r a n g e ( s t a r t ,e n d )
*
* Invalidate t h e d a t a c a c h e w i t h i n t h e s p e c i f i e d r e g i o n ; we will
* be p e r f o r m i n g a D M A o p e r a t i o n i n t h i s r e g i o n a n d w e w a n t t o
* purge o l d d a t a i n t h e c a c h e .
*
* - start - v i r t u a l s t a r t a d d r e s s o f r e g i o n
* - end - v i r t u a l e n d a d d r e s s o f r e g i o n
* /
v7m_dma_inv_range :
dcache_ l i n e _ s i z e r2 , r3
sub r3 , r2 , #1
tst r0 , r3
bic r0 , r0 , r3
dccimvacne r0 , r3
subne r3 , r2 , #1 @ restore r3, corrupted by v7m's dccimvac
tst r1 , r3
bic r1 , r1 , r3
dccimvacne r1 , r3
1 :
dcimvac r0 , r3
add r0 , r0 , r2
cmp r0 , r1
blo 1 b
dsb s t
ret l r
ENDPROC( v7 m _ d m a _ i n v _ r a n g e )
/ *
* v7 m _ d m a _ c l e a n _ r a n g e ( s t a r t ,e n d )
* - start - v i r t u a l s t a r t a d d r e s s o f r e g i o n
* - end - v i r t u a l e n d a d d r e s s o f r e g i o n
* /
v7m_dma_clean_range :
dcache_ l i n e _ s i z e r2 , r3
sub r3 , r2 , #1
bic r0 , r0 , r3
1 :
dccmvac r0 , r3 @ clean D / U line
add r0 , r0 , r2
cmp r0 , r1
blo 1 b
dsb s t
ret l r
ENDPROC( v7 m _ d m a _ c l e a n _ r a n g e )
/ *
* v7 m _ d m a _ f l u s h _ r a n g e ( s t a r t ,e n d )
* - start - v i r t u a l s t a r t a d d r e s s o f r e g i o n
* - end - v i r t u a l e n d a d d r e s s o f r e g i o n
* /
ENTRY( v7 m _ d m a _ f l u s h _ r a n g e )
dcache_ l i n e _ s i z e r2 , r3
sub r3 , r2 , #1
bic r0 , r0 , r3
1 :
dccimvac r0 , r3 @ clean & invalidate D / U line
add r0 , r0 , r2
cmp r0 , r1
blo 1 b
dsb s t
ret l r
ENDPROC( v7 m _ d m a _ f l u s h _ r a n g e )
/ *
* dma_ m a p _ a r e a ( s t a r t , s i z e , d i r )
* - start - k e r n e l v i r t u a l s t a r t a d d r e s s
* - size - s i z e o f r e g i o n
* - dir - D M A d i r e c t i o n
* /
ENTRY( v7 m _ d m a _ m a p _ a r e a )
add r1 , r1 , r0
teq r2 , #D M A _ F R O M _ D E V I C E
beq v7 m _ d m a _ i n v _ r a n g e
b v7 m _ d m a _ c l e a n _ r a n g e
ENDPROC( v7 m _ d m a _ m a p _ a r e a )
/ *
* dma_ u n m a p _ a r e a ( s t a r t , s i z e , d i r )
* - start - k e r n e l v i r t u a l s t a r t a d d r e s s
* - size - s i z e o f r e g i o n
* - dir - D M A d i r e c t i o n
* /
ENTRY( v7 m _ d m a _ u n m a p _ a r e a )
add r1 , r1 , r0
teq r2 , #D M A _ T O _ D E V I C E
bne v7 m _ d m a _ i n v _ r a n g e
ret l r
ENDPROC( v7 m _ d m a _ u n m a p _ a r e a )
.globl v7m_flush_kern_cache_louis
.equ v7 m _ f l u s h _ k e r n _ c a c h e _ l o u i s , v7 m _ f l u s h _ k e r n _ c a c h e _ a l l
_ _ INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_ c a c h e _ f u n c t i o n s v7 m