2007-05-08 22:27:46 +01:00
/ *
* linux/ a r c h / a r m / m m / c a c h e - v7 . S
*
* Copyright ( C ) 2 0 0 1 D e e p B l u e S o l u t i o n s L t d .
* Copyright ( C ) 2 0 0 5 A R M L t d .
*
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or modify
* it u n d e r t h e t e r m s o f t h e G N U G e n e r a l P u b l i c L i c e n s e v e r s i o n 2 a s
* published b y t h e F r e e S o f t w a r e F o u n d a t i o n .
*
* This i s t h e " s h e l l " o f t h e A R M v7 p r o c e s s o r s u p p o r t .
* /
# include < l i n u x / l i n k a g e . h >
# include < l i n u x / i n i t . h >
# include < a s m / a s s e m b l e r . h >
2012-04-27 13:08:53 +01:00
# include < a s m / e r r n o . h >
2009-10-06 17:57:09 +01:00
# include < a s m / u n w i n d . h >
2007-05-08 22:27:46 +01:00
# include " p r o c - m a c r o s . S "
2010-09-21 17:16:40 +01:00
/ *
* v7 _ f l u s h _ i c a c h e _ a l l ( )
*
* Flush t h e w h o l e I - c a c h e .
*
* Registers :
* r0 - s e t t o 0
* /
ENTRY( v7 _ f l u s h _ i c a c h e _ a l l )
mov r0 , #0
ALT_ S M P ( m c r p15 , 0 , r0 , c7 , c1 , 0 ) @ invalidate I-cache inner shareable
ALT_ U P ( m c r p15 , 0 , r0 , c7 , c5 , 0 ) @ I+BTB cache invalidate
mov p c , l r
ENDPROC( v7 _ f l u s h _ i c a c h e _ a l l )
2007-05-08 22:27:46 +01:00
/ *
* v7 _ f l u s h _ d c a c h e _ a l l ( )
*
* Flush t h e w h o l e D - c a c h e .
*
2009-07-24 12:32:56 +01:00
* Corrupted r e g i s t e r s : r0 - r7 , r9 - r11 ( r6 o n l y i n T h u m b m o d e )
2007-05-08 22:27:46 +01:00
*
* - mm - m m _ s t r u c t d e s c r i b i n g a d d r e s s s p a c e
* /
ENTRY( v7 _ f l u s h _ d c a c h e _ a l l )
2008-11-06 13:23:07 +00:00
dmb @ ensure ordering with previous memory accesses
2007-05-08 22:27:46 +01:00
mrc p15 , 1 , r0 , c0 , c0 , 1 @ read clidr
ands r3 , r0 , #0x7000000 @ extract loc from clidr
mov r3 , r3 , l s r #23 @ left align loc bit field
beq f i n i s h e d @ if loc is 0, then no need to clean
mov r10 , #0 @ start clean at cache level 0
loop1 :
add r2 , r10 , r10 , l s r #1 @ work out 3x current cache level
mov r1 , r0 , l s r r2 @ extract cache type bits from clidr
and r1 , r1 , #7 @ mask of the bits for current cache only
cmp r1 , #2 @ see what cache we have at this level
blt s k i p @ skip if no cache, or just i-cache
2012-02-07 19:42:07 +01:00
# ifdef C O N F I G _ P R E E M P T
2012-02-15 16:01:42 +01:00
save_ a n d _ d i s a b l e _ i r q s _ n o t r a c e r9 @ make cssr&csidr read atomic
2012-02-07 19:42:07 +01:00
# endif
2007-05-08 22:27:46 +01:00
mcr p15 , 2 , r10 , c0 , c0 , 0 @ select current cache level in cssr
isb @ isb to sych the new cssr&csidr
mrc p15 , 1 , r1 , c0 , c0 , 0 @ read the new csidr
2012-02-07 19:42:07 +01:00
# ifdef C O N F I G _ P R E E M P T
restore_ i r q s _ n o t r a c e r9
# endif
2007-05-08 22:27:46 +01:00
and r2 , r1 , #7 @ extract the length of the cache lines
add r2 , r2 , #4 @ add 4 (line length offset)
ldr r4 , =0x3ff
ands r4 , r4 , r1 , l s r #3 @ find maximum number on the way size
clz r5 , r4 @ find bit position of way size increment
ldr r7 , =0x7fff
ands r7 , r7 , r1 , l s r #13 @ extract max number of the index size
loop2 :
mov r9 , r4 @ create working copy of max way size
loop3 :
2009-07-24 12:32:56 +01:00
ARM( o r r r11 , r10 , r9 , l s l r5 ) @ factor way and cache number into r11
THUMB( l s l r6 , r9 , r5 )
THUMB( o r r r11 , r10 , r6 ) @ factor way and cache number into r11
ARM( o r r r11 , r11 , r7 , l s l r2 ) @ factor index number into r11
THUMB( l s l r6 , r7 , r2 )
THUMB( o r r r11 , r11 , r6 ) @ factor index number into r11
2007-05-08 22:27:46 +01:00
mcr p15 , 0 , r11 , c7 , c14 , 2 @ clean & invalidate by set/way
subs r9 , r9 , #1 @ decrement the way
bge l o o p3
subs r7 , r7 , #1 @ decrement the index
bge l o o p2
skip :
add r10 , r10 , #2 @ increment cache number
cmp r3 , r10
bgt l o o p1
finished :
mov r10 , #0 @ swith back to cache level 0
mcr p15 , 2 , r10 , c0 , c0 , 0 @ select current cache level in cssr
2008-11-06 13:23:07 +00:00
dsb
2007-05-08 22:27:46 +01:00
isb
mov p c , l r
2008-08-28 11:22:32 +01:00
ENDPROC( v7 _ f l u s h _ d c a c h e _ a l l )
2007-05-08 22:27:46 +01:00
/ *
* v7 _ f l u s h _ c a c h e _ a l l ( )
*
* Flush t h e e n t i r e c a c h e s y s t e m .
* The d a t a c a c h e f l u s h i s n o w a c h i e v e d u s i n g a t o m i c c l e a n / i n v a l i d a t e s
* working o u t w a r d s f r o m L 1 c a c h e . T h i s i s d o n e u s i n g S e t / W a y b a s e d c a c h e
2011-03-30 22:57:33 -03:00
* maintenance i n s t r u c t i o n s .
2007-05-08 22:27:46 +01:00
* The i n s t r u c t i o n c a c h e c a n s t i l l b e i n v a l i d a t e d b a c k t o t h e p o i n t o f
* unification i n a s i n g l e i n s t r u c t i o n .
*
* /
ENTRY( v7 _ f l u s h _ k e r n _ c a c h e _ a l l )
2009-07-24 12:32:56 +01:00
ARM( s t m f d s p ! , { r4 - r5 , r7 , r9 - r11 , l r } )
THUMB( s t m f d s p ! , { r4 - r7 , r9 - r11 , l r } )
2007-05-08 22:27:46 +01:00
bl v7 _ f l u s h _ d c a c h e _ a l l
mov r0 , #0
2010-09-04 10:47:48 +01:00
ALT_ S M P ( m c r p15 , 0 , r0 , c7 , c1 , 0 ) @ invalidate I-cache inner shareable
ALT_ U P ( m c r p15 , 0 , r0 , c7 , c5 , 0 ) @ I+BTB cache invalidate
2009-07-24 12:32:56 +01:00
ARM( l d m f d s p ! , { r4 - r5 , r7 , r9 - r11 , l r } )
THUMB( l d m f d s p ! , { r4 - r7 , r9 - r11 , l r } )
2007-05-08 22:27:46 +01:00
mov p c , l r
2008-08-28 11:22:32 +01:00
ENDPROC( v7 _ f l u s h _ k e r n _ c a c h e _ a l l )
2007-05-08 22:27:46 +01:00
/ *
* v7 _ f l u s h _ c a c h e _ a l l ( )
*
* Flush a l l T L B e n t r i e s i n a p a r t i c u l a r a d d r e s s s p a c e
*
* - mm - m m _ s t r u c t d e s c r i b i n g a d d r e s s s p a c e
* /
ENTRY( v7 _ f l u s h _ u s e r _ c a c h e _ a l l )
/*FALLTHROUGH*/
/ *
* v7 _ f l u s h _ c a c h e _ r a n g e ( s t a r t , e n d , f l a g s )
*
* Flush a r a n g e o f T L B e n t r i e s i n t h e s p e c i f i e d a d d r e s s s p a c e .
*
* - start - s t a r t a d d r e s s ( m a y n o t b e a l i g n e d )
* - end - e n d a d d r e s s ( e x c l u s i v e , m a y n o t b e a l i g n e d )
* - flags - v m _ a r e a _ s t r u c t f l a g s d e s c r i b i n g a d d r e s s s p a c e
*
* It i s a s s u m e d t h a t :
* - we h a v e a V I P T c a c h e .
* /
ENTRY( v7 _ f l u s h _ u s e r _ c a c h e _ r a n g e )
mov p c , l r
2008-08-28 11:22:32 +01:00
ENDPROC( v7 _ f l u s h _ u s e r _ c a c h e _ a l l )
ENDPROC( v7 _ f l u s h _ u s e r _ c a c h e _ r a n g e )
2007-05-08 22:27:46 +01:00
/ *
* v7 _ c o h e r e n t _ k e r n _ r a n g e ( s t a r t ,e n d )
*
* Ensure t h a t t h e I a n d D c a c h e s a r e c o h e r e n t w i t h i n s p e c i f i e d
* region. T h i s i s t y p i c a l l y u s e d w h e n c o d e h a s b e e n w r i t t e n t o
* a m e m o r y r e g i o n , a n d w i l l b e e x e c u t e d .
*
* - start - v i r t u a l s t a r t a d d r e s s o f r e g i o n
* - end - v i r t u a l e n d a d d r e s s o f r e g i o n
*
* It i s a s s u m e d t h a t :
* - the I c a c h e d o e s n o t r e a d d a t a f r o m t h e w r i t e b u f f e r
* /
ENTRY( v7 _ c o h e r e n t _ k e r n _ r a n g e )
/* FALLTHROUGH */
/ *
* v7 _ c o h e r e n t _ u s e r _ r a n g e ( s t a r t ,e n d )
*
* Ensure t h a t t h e I a n d D c a c h e s a r e c o h e r e n t w i t h i n s p e c i f i e d
* region. T h i s i s t y p i c a l l y u s e d w h e n c o d e h a s b e e n w r i t t e n t o
* a m e m o r y r e g i o n , a n d w i l l b e e x e c u t e d .
*
* - start - v i r t u a l s t a r t a d d r e s s o f r e g i o n
* - end - v i r t u a l e n d a d d r e s s o f r e g i o n
*
* It i s a s s u m e d t h a t :
* - the I c a c h e d o e s n o t r e a d d a t a f r o m t h e w r i t e b u f f e r
* /
ENTRY( v7 _ c o h e r e n t _ u s e r _ r a n g e )
2009-10-06 17:57:09 +01:00
UNWIND( . f n s t a r t )
2007-05-08 22:27:46 +01:00
dcache_ l i n e _ s i z e r2 , r3
sub r3 , r2 , #1
2010-12-07 16:56:29 +01:00
bic r12 , r0 , r3
2011-09-15 11:45:15 +01:00
# ifdef C O N F I G _ A R M _ E R R A T A _ 7 6 4 3 6 9
ALT_ S M P ( W ( d s b ) )
ALT_ U P ( W ( n o p ) )
# endif
2009-10-06 17:57:09 +01:00
1 :
2010-12-07 16:56:29 +01:00
USER( m c r p15 , 0 , r12 , c7 , c11 , 1 ) @ clean D line to the point of unification
add r12 , r12 , r2
cmp r12 , r1
blo 1 b
2007-05-08 22:27:46 +01:00
dsb
2010-12-07 16:56:29 +01:00
icache_ l i n e _ s i z e r2 , r3
sub r3 , r2 , #1
bic r12 , r0 , r3
2009-10-06 17:57:09 +01:00
2 :
2010-12-07 16:56:29 +01:00
USER( m c r p15 , 0 , r12 , c7 , c5 , 1 ) @ invalidate I line
add r12 , r12 , r2
cmp r12 , r1
blo 2 b
2007-05-08 22:27:46 +01:00
mov r0 , #0
2010-09-04 10:47:48 +01:00
ALT_ S M P ( m c r p15 , 0 , r0 , c7 , c1 , 6 ) @ invalidate BTB Inner Shareable
ALT_ U P ( m c r p15 , 0 , r0 , c7 , c5 , 6 ) @ invalidate BTB
2007-05-08 22:27:46 +01:00
dsb
isb
mov p c , l r
2009-10-06 17:57:09 +01:00
/ *
* Fault h a n d l i n g f o r t h e c a c h e o p e r a t i o n a b o v e . I f t h e v i r t u a l a d d r e s s i n r0
2012-04-27 13:08:53 +01:00
* isn' t m a p p e d , f a i l w i t h - E F A U L T .
2009-10-06 17:57:09 +01:00
* /
9001 :
2012-04-27 13:08:53 +01:00
mov r0 , #- E F A U L T
mov p c , l r
2009-10-06 17:57:09 +01:00
UNWIND( . f n e n d )
2008-08-28 11:22:32 +01:00
ENDPROC( v7 _ c o h e r e n t _ k e r n _ r a n g e )
ENDPROC( v7 _ c o h e r e n t _ u s e r _ r a n g e )
2007-05-08 22:27:46 +01:00
/ *
2009-11-26 12:56:21 +00:00
* v7 _ f l u s h _ k e r n _ d c a c h e _ a r e a ( v o i d * a d d r , s i z e _ t s i z e )
2007-05-08 22:27:46 +01:00
*
* Ensure t h a t t h e d a t a h e l d i n t h e p a g e k a d d r i s w r i t t e n b a c k
* to t h e p a g e i n q u e s t i o n .
*
2009-11-26 12:56:21 +00:00
* - addr - k e r n e l a d d r e s s
* - size - r e g i o n s i z e
2007-05-08 22:27:46 +01:00
* /
2009-11-26 12:56:21 +00:00
ENTRY( v7 _ f l u s h _ k e r n _ d c a c h e _ a r e a )
2007-05-08 22:27:46 +01:00
dcache_ l i n e _ s i z e r2 , r3
2009-11-26 12:56:21 +00:00
add r1 , r0 , r1
2011-05-26 11:20:19 +01:00
sub r3 , r2 , #1
bic r0 , r0 , r3
2011-09-15 11:45:15 +01:00
# ifdef C O N F I G _ A R M _ E R R A T A _ 7 6 4 3 6 9
ALT_ S M P ( W ( d s b ) )
ALT_ U P ( W ( n o p ) )
# endif
2007-05-08 22:27:46 +01:00
1 :
mcr p15 , 0 , r0 , c7 , c14 , 1 @ clean & invalidate D line / unified line
add r0 , r0 , r2
cmp r0 , r1
blo 1 b
dsb
mov p c , l r
2009-11-26 12:56:21 +00:00
ENDPROC( v7 _ f l u s h _ k e r n _ d c a c h e _ a r e a )
2007-05-08 22:27:46 +01:00
/ *
* v7 _ d m a _ i n v _ r a n g e ( s t a r t ,e n d )
*
* Invalidate t h e d a t a c a c h e w i t h i n t h e s p e c i f i e d r e g i o n ; we will
* be p e r f o r m i n g a D M A o p e r a t i o n i n t h i s r e g i o n a n d w e w a n t t o
* purge o l d d a t a i n t h e c a c h e .
*
* - start - v i r t u a l s t a r t a d d r e s s o f r e g i o n
* - end - v i r t u a l e n d a d d r e s s o f r e g i o n
* /
2009-11-26 16:24:19 +00:00
v7_dma_inv_range :
2007-05-08 22:27:46 +01:00
dcache_ l i n e _ s i z e r2 , r3
sub r3 , r2 , #1
tst r0 , r3
bic r0 , r0 , r3
2011-09-15 11:45:15 +01:00
# ifdef C O N F I G _ A R M _ E R R A T A _ 7 6 4 3 6 9
ALT_ S M P ( W ( d s b ) )
ALT_ U P ( W ( n o p ) )
# endif
2007-05-08 22:27:46 +01:00
mcrne p15 , 0 , r0 , c7 , c14 , 1 @ clean & invalidate D / U line
tst r1 , r3
bic r1 , r1 , r3
mcrne p15 , 0 , r1 , c7 , c14 , 1 @ clean & invalidate D / U line
1 :
mcr p15 , 0 , r0 , c7 , c6 , 1 @ invalidate D / U line
add r0 , r0 , r2
cmp r0 , r1
blo 1 b
dsb
mov p c , l r
2008-08-28 11:22:32 +01:00
ENDPROC( v7 _ d m a _ i n v _ r a n g e )
2007-05-08 22:27:46 +01:00
/ *
* v7 _ d m a _ c l e a n _ r a n g e ( s t a r t ,e n d )
* - start - v i r t u a l s t a r t a d d r e s s o f r e g i o n
* - end - v i r t u a l e n d a d d r e s s o f r e g i o n
* /
2009-11-26 16:24:19 +00:00
v7_dma_clean_range :
2007-05-08 22:27:46 +01:00
dcache_ l i n e _ s i z e r2 , r3
sub r3 , r2 , #1
bic r0 , r0 , r3
2011-09-15 11:45:15 +01:00
# ifdef C O N F I G _ A R M _ E R R A T A _ 7 6 4 3 6 9
ALT_ S M P ( W ( d s b ) )
ALT_ U P ( W ( n o p ) )
# endif
2007-05-08 22:27:46 +01:00
1 :
mcr p15 , 0 , r0 , c7 , c10 , 1 @ clean D / U line
add r0 , r0 , r2
cmp r0 , r1
blo 1 b
dsb
mov p c , l r
2008-08-28 11:22:32 +01:00
ENDPROC( v7 _ d m a _ c l e a n _ r a n g e )
2007-05-08 22:27:46 +01:00
/ *
* v7 _ d m a _ f l u s h _ r a n g e ( s t a r t ,e n d )
* - start - v i r t u a l s t a r t a d d r e s s o f r e g i o n
* - end - v i r t u a l e n d a d d r e s s o f r e g i o n
* /
ENTRY( v7 _ d m a _ f l u s h _ r a n g e )
dcache_ l i n e _ s i z e r2 , r3
sub r3 , r2 , #1
bic r0 , r0 , r3
2011-09-15 11:45:15 +01:00
# ifdef C O N F I G _ A R M _ E R R A T A _ 7 6 4 3 6 9
ALT_ S M P ( W ( d s b ) )
ALT_ U P ( W ( n o p ) )
# endif
2007-05-08 22:27:46 +01:00
1 :
mcr p15 , 0 , r0 , c7 , c14 , 1 @ clean & invalidate D / U line
add r0 , r0 , r2
cmp r0 , r1
blo 1 b
dsb
mov p c , l r
2008-08-28 11:22:32 +01:00
ENDPROC( v7 _ d m a _ f l u s h _ r a n g e )
2007-05-08 22:27:46 +01:00
2009-11-26 16:19:58 +00:00
/ *
* dma_ m a p _ a r e a ( s t a r t , s i z e , d i r )
* - start - k e r n e l v i r t u a l s t a r t a d d r e s s
* - size - s i z e o f r e g i o n
* - dir - D M A d i r e c t i o n
* /
ENTRY( v7 _ d m a _ m a p _ a r e a )
add r1 , r1 , r0
2009-10-31 16:52:16 +00:00
teq r2 , #D M A _ F R O M _ D E V I C E
beq v7 _ d m a _ i n v _ r a n g e
b v7 _ d m a _ c l e a n _ r a n g e
2009-11-26 16:19:58 +00:00
ENDPROC( v7 _ d m a _ m a p _ a r e a )
/ *
* dma_ u n m a p _ a r e a ( s t a r t , s i z e , d i r )
* - start - k e r n e l v i r t u a l s t a r t a d d r e s s
* - size - s i z e o f r e g i o n
* - dir - D M A d i r e c t i o n
* /
ENTRY( v7 _ d m a _ u n m a p _ a r e a )
2009-10-31 16:52:16 +00:00
add r1 , r1 , r0
teq r2 , #D M A _ T O _ D E V I C E
bne v7 _ d m a _ i n v _ r a n g e
2009-11-26 16:19:58 +00:00
mov p c , l r
ENDPROC( v7 _ d m a _ u n m a p _ a r e a )
2007-05-08 22:27:46 +01:00
_ _ INITDATA
2011-06-23 17:16:25 +01:00
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_ c a c h e _ f u n c t i o n s v7