2011-08-04 22:19:25 +04:00
/ *
* This i s a S I M D S H A - 1 i m p l e m e n t a t i o n . I t r e q u i r e s t h e I n t e l ( R ) S u p p l e m e n t a l
* SSE3 i n s t r u c t i o n s e t e x t e n s i o n s i n t r o d u c e d i n I n t e l C o r e M i c r o a r c h i t e c t u r e
* processors. C P U s s u p p o r t i n g I n t e l ( R ) A V X e x t e n s i o n s w i l l g e t a n a d d i t i o n a l
* boost.
*
* This w o r k w a s i n s p i r e d b y t h e v e c t o r i z e d i m p l e m e n t a t i o n o f D e a n G a u d e t .
* Additional i n f o r m a t i o n o n i t c a n b e f o u n d a t :
* http : / / www. a r c t i c . o r g / ~ d e a n / c r y p t o / s h a1 . h t m l
*
* It w a s i m p r o v e d u p o n w i t h m o r e e f f i c i e n t v e c t o r i z a t i o n o f t h e m e s s a g e
* scheduling. T h i s i m p l e m e n t a t i o n h a s a l s o b e e n o p t i m i z e d f o r a l l c u r r e n t a n d
* several f u t u r e g e n e r a t i o n s o f I n t e l C P U s .
*
* See t h i s a r t i c l e f o r m o r e i n f o r m a t i o n a b o u t t h e i m p l e m e n t a t i o n d e t a i l s :
* http : / / software. i n t e l . c o m / e n - u s / a r t i c l e s / i m p r o v i n g - t h e - p e r f o r m a n c e - o f - t h e - s e c u r e - h a s h - a l g o r i t h m - 1 /
*
* Copyright ( C ) 2 0 1 0 , I n t e l C o r p .
* Authors : Maxim L o c k t y u k h i n < m a x i m . l o c k t y u k h i n @intel.com>
* Ronen Z o h a r < r o n e n . z o h a r @intel.com>
*
* Converted t o A T & T s y n t a x a n d a d a p t e d f o r i n c l u s i o n i n t h e L i n u x k e r n e l :
* Author : Mathias K r a u s e < m i n i p l i @googlemail.com>
*
* This p r o g r a m i s f r e e s o f t w a r e ; you can redistribute it and/or modify
* it u n d e r t h e t e r m s o f t h e G N U G e n e r a l P u b l i c L i c e n s e a s p u b l i s h e d b y
* the F r e e S o f t w a r e F o u n d a t i o n ; either version 2 of the License, or
* ( at y o u r o p t i o n ) a n y l a t e r v e r s i o n .
* /
2013-01-19 15:39:41 +04:00
# include < l i n u x / l i n k a g e . h >
2011-08-04 22:19:25 +04:00
# define C T X % r d i / / a r g 1
# define B U F % r s i / / a r g 2
# define C N T % r d x / / a r g 3
# define R E G _ A % e c x
# define R E G _ B % e s i
# define R E G _ C % e d i
# define R E G _ D % e b p
# define R E G _ E % e d x
# define R E G _ T 1 % e a x
# define R E G _ T 2 % e b x
# define K _ B A S E % r8
# define H A S H _ P T R % r9
# define B U F F E R _ P T R % r10
# define B U F F E R _ E N D % r11
# define W _ T M P 1 % x m m 0
# define W _ T M P 2 % x m m 9
# define W 0 % x m m 1
# define W 4 % x m m 2
# define W 8 % x m m 3
# define W 1 2 % x m m 4
# define W 1 6 % x m m 5
# define W 2 0 % x m m 6
# define W 2 4 % x m m 7
# define W 2 8 % x m m 8
# define X M M _ S H U F B _ B S W A P % x m m 1 0
/* we keep window of 64 w[i]+K pre-calculated values in a circular buffer */
# define W K ( t ) ( ( ( t ) & 1 5 ) * 4 ) ( % r s p )
# define W _ P R E C A L C _ A H E A D 1 6
/ *
* This m a c r o i m p l e m e n t s t h e S H A - 1 f u n c t i o n ' s b o d y f o r s i n g l e 6 4 - b y t e b l o c k
* param : function' s n a m e
* /
.macro SHA1_VECTOR_ASM name
2013-01-19 15:39:41 +04:00
ENTRY( \ n a m e )
2011-08-04 22:19:25 +04:00
push % r b x
push % r b p
push % r12
mov % r s p , % r12
sub $ 6 4 , % r s p # a l l o c a t e w o r k s p a c e
and $ ~ 1 5 , % r s p # a l i g n s t a c k
mov C T X , H A S H _ P T R
mov B U F , B U F F E R _ P T R
shl $ 6 , C N T # m u l t i p l y b y 64
add B U F , C N T
mov C N T , B U F F E R _ E N D
lea K _ X M M _ A R ( % r i p ) , K _ B A S E
xmm_ m o v B S W A P _ S H U F B _ C T L ( % r i p ) , X M M _ S H U F B _ B S W A P
SHA1 _ P I P E L I N E D _ M A I N _ B O D Y
# cleanup w o r k s p a c e
mov $ 8 , % e c x
mov % r s p , % r d i
xor % r a x , % r a x
rep s t o s q
mov % r12 , % r s p # d e a l l o c a t e w o r k s p a c e
pop % r12
pop % r b p
pop % r b x
ret
2013-01-19 15:39:41 +04:00
ENDPROC( \ n a m e )
2011-08-04 22:19:25 +04:00
.endm
/ *
* This m a c r o i m p l e m e n t s 8 0 r o u n d s o f S H A - 1 f o r o n e 6 4 - b y t e b l o c k
* /
.macro SHA1_PIPELINED_MAIN_BODY
INIT_ R E G A L L O C
mov ( H A S H _ P T R ) , A
mov 4 ( H A S H _ P T R ) , B
mov 8 ( H A S H _ P T R ) , C
mov 1 2 ( H A S H _ P T R ) , D
mov 1 6 ( H A S H _ P T R ) , E
.set i, 0
.rept W_PRECALC_AHEAD
W_ P R E C A L C i
.set i, ( i + 1 )
.endr
.align 4
1 :
RR F 1 ,A ,B ,C ,D ,E ,0
RR F 1 ,D ,E ,A ,B ,C ,2
RR F 1 ,B ,C ,D ,E ,A ,4
RR F 1 ,E ,A ,B ,C ,D ,6
RR F 1 ,C ,D ,E ,A ,B ,8
RR F 1 ,A ,B ,C ,D ,E ,1 0
RR F 1 ,D ,E ,A ,B ,C ,1 2
RR F 1 ,B ,C ,D ,E ,A ,1 4
RR F 1 ,E ,A ,B ,C ,D ,1 6
RR F 1 ,C ,D ,E ,A ,B ,1 8
RR F 2 ,A ,B ,C ,D ,E ,2 0
RR F 2 ,D ,E ,A ,B ,C ,2 2
RR F 2 ,B ,C ,D ,E ,A ,2 4
RR F 2 ,E ,A ,B ,C ,D ,2 6
RR F 2 ,C ,D ,E ,A ,B ,2 8
RR F 2 ,A ,B ,C ,D ,E ,3 0
RR F 2 ,D ,E ,A ,B ,C ,3 2
RR F 2 ,B ,C ,D ,E ,A ,3 4
RR F 2 ,E ,A ,B ,C ,D ,3 6
RR F 2 ,C ,D ,E ,A ,B ,3 8
RR F 3 ,A ,B ,C ,D ,E ,4 0
RR F 3 ,D ,E ,A ,B ,C ,4 2
RR F 3 ,B ,C ,D ,E ,A ,4 4
RR F 3 ,E ,A ,B ,C ,D ,4 6
RR F 3 ,C ,D ,E ,A ,B ,4 8
RR F 3 ,A ,B ,C ,D ,E ,5 0
RR F 3 ,D ,E ,A ,B ,C ,5 2
RR F 3 ,B ,C ,D ,E ,A ,5 4
RR F 3 ,E ,A ,B ,C ,D ,5 6
RR F 3 ,C ,D ,E ,A ,B ,5 8
add $ 6 4 , B U F F E R _ P T R # m o v e t o t h e n e x t 64 - b y t e b l o c k
cmp B U F F E R _ E N D , B U F F E R _ P T R # i f t h e c u r r e n t i s t h e l a s t o n e u s e
cmovae K _ B A S E , B U F F E R _ P T R # d u m m y s o u r c e t o a v o i d b u f f e r o v e r r u n
RR F 4 ,A ,B ,C ,D ,E ,6 0
RR F 4 ,D ,E ,A ,B ,C ,6 2
RR F 4 ,B ,C ,D ,E ,A ,6 4
RR F 4 ,E ,A ,B ,C ,D ,6 6
RR F 4 ,C ,D ,E ,A ,B ,6 8
RR F 4 ,A ,B ,C ,D ,E ,7 0
RR F 4 ,D ,E ,A ,B ,C ,7 2
RR F 4 ,B ,C ,D ,E ,A ,7 4
RR F 4 ,E ,A ,B ,C ,D ,7 6
RR F 4 ,C ,D ,E ,A ,B ,7 8
UPDATE_ H A S H ( H A S H _ P T R ) , A
UPDATE_ H A S H 4 ( H A S H _ P T R ) , B
UPDATE_ H A S H 8 ( H A S H _ P T R ) , C
UPDATE_ H A S H 1 2 ( H A S H _ P T R ) , D
UPDATE_ H A S H 1 6 ( H A S H _ P T R ) , E
RESTORE_ R E N A M E D _ R E G S
cmp K _ B A S E , B U F F E R _ P T R # K _ B A S E m e a n s , w e r e a c h e d t h e e n d
jne 1 b
.endm
.macro INIT_REGALLOC
.set A, R E G _ A
.set B, R E G _ B
.set C, R E G _ C
.set D, R E G _ D
.set E, R E G _ E
.set T1 , R E G _ T 1
.set T2 , R E G _ T 2
.endm
.macro RESTORE_RENAMED_REGS
# order i s i m p o r t a n t ( R E G _ C i s w h e r e i t s h o u l d b e )
mov B , R E G _ B
mov D , R E G _ D
mov A , R E G _ A
mov E , R E G _ E
.endm
.macro SWAP_REG_NAMES a, b
.set _ T, \ a
.set \ a, \ b
.set \ b, _ T
.endm
.macro F1 b, c , d
mov \ c , T 1
SWAP_ R E G _ N A M E S \ c , T 1
xor \ d , T 1
and \ b , T 1
xor \ d , T 1
.endm
.macro F2 b, c , d
mov \ d , T 1
SWAP_ R E G _ N A M E S \ d , T 1
xor \ c , T 1
xor \ b , T 1
.endm
.macro F3 b, c ,d
mov \ c , T 1
SWAP_ R E G _ N A M E S \ c , T 1
mov \ b , T 2
or \ b , T 1
and \ c , T 2
and \ d , T 1
or T 2 , T 1
.endm
.macro F4 b, c , d
F2 \ b , \ c , \ d
.endm
.macro UPDATE_HASH hash, v a l
add \ h a s h , \ v a l
mov \ v a l , \ h a s h
.endm
/ *
* RR d o e s t w o r o u n d s o f S H A - 1 b a c k t o b a c k w i t h W [ ] p r e - c a l c
* t1 = F ( b , c , d ) ; e += w(i)
* e + = t 1 ; b <<= 30; d += w(i+1);
* t1 = F ( a , b , c ) ;
* d + = t 1 ; a <<= 5;
* e + = a ;
* t1 = e ; a >>= 7;
* t1 < < = 5 ;
* d + = t 1 ;
* /
.macro RR F, a , b , c , d , e , r o u n d
add W K ( \ r o u n d ) , \ e
\ F \ b , \ c , \ d # t 1 = F ( b , c , d ) ;
W_ P R E C A L C ( \ r o u n d + W _ P R E C A L C _ A H E A D )
rol $ 3 0 , \ b
add T 1 , \ e
add W K ( \ r o u n d + 1 ) , \ d
\ F \ a , \ b , \ c
W_ P R E C A L C ( \ r o u n d + W _ P R E C A L C _ A H E A D + 1 )
rol $ 5 , \ a
add \ a , \ e
add T 1 , \ d
ror $ 7 , \ a # ( a < < r 5 ) > > r 7 ) = > a < < r 3 0 )
mov \ e , T 1
SWAP_ R E G _ N A M E S \ e , T 1
rol $ 5 , T 1
add T 1 , \ d
# write : \ a, \ b
# rotate : \ a< = \ d , \ b < = \ e , \ c < = \ a , \ d < = \ b , \ e < = \ c
.endm
.macro W_PRECALC r
.set i, \ r
.if ( i < 2 0 )
.set K_ X M M , 0
.elseif ( i < 4 0 )
.set K_ X M M , 1 6
.elseif ( i < 6 0 )
.set K_ X M M , 3 2
.elseif ( i < 8 0 )
.set K_ X M M , 4 8
.endif
.if ( ( i < 1 6 ) | | ( ( i > = 8 0 ) & & ( i < ( 8 0 + W _ P R E C A L C _ A H E A D ) ) ) )
.set i, ( ( \ r ) % 8 0 ) # p r e - c o m p u t e f o r t h e n e x t i t e r a t i o n
.if ( i = = 0 )
W_ P R E C A L C _ R E S E T
.endif
W_ P R E C A L C _ 0 0 _ 1 5
.elseif ( i< 3 2 )
W_ P R E C A L C _ 1 6 _ 3 1
.elseif ( i < 8 0 ) / / r o u n d s 3 2 - 7 9
W_ P R E C A L C _ 3 2 _ 7 9
.endif
.endm
.macro W_PRECALC_RESET
.set W, W 0
.set W_ m i n u s _ 0 4 , W 4
.set W_ m i n u s _ 0 8 , W 8
.set W_ m i n u s _ 1 2 , W 1 2
.set W_ m i n u s _ 1 6 , W 1 6
.set W_ m i n u s _ 2 0 , W 2 0
.set W_ m i n u s _ 2 4 , W 2 4
.set W_ m i n u s _ 2 8 , W 2 8
.set W_ m i n u s _ 3 2 , W
.endm
.macro W_PRECALC_ROTATE
.set W_ m i n u s _ 3 2 , W _ m i n u s _ 2 8
.set W_ m i n u s _ 2 8 , W _ m i n u s _ 2 4
.set W_ m i n u s _ 2 4 , W _ m i n u s _ 2 0
.set W_ m i n u s _ 2 0 , W _ m i n u s _ 1 6
.set W_ m i n u s _ 1 6 , W _ m i n u s _ 1 2
.set W_ m i n u s _ 1 2 , W _ m i n u s _ 0 8
.set W_ m i n u s _ 0 8 , W _ m i n u s _ 0 4
.set W_ m i n u s _ 0 4 , W
.set W, W _ m i n u s _ 3 2
.endm
.macro W_PRECALC_SSSE3
.macro W_PRECALC_00_15
W_ P R E C A L C _ 0 0 _ 1 5 _ S S S E 3
.endm
.macro W_PRECALC_16_31
W_ P R E C A L C _ 1 6 _ 3 1 _ S S S E 3
.endm
.macro W_PRECALC_32_79
W_ P R E C A L C _ 3 2 _ 7 9 _ S S S E 3
.endm
/* message scheduling pre-compute for rounds 0-15 */
.macro W_PRECALC_00_15_SSSE3
.if ( ( i & 3 ) = = 0 )
movdqu ( i * 4 ) ( B U F F E R _ P T R ) , W _ T M P 1
.elseif ( ( i & 3 ) = = 1 )
pshufb X M M _ S H U F B _ B S W A P , W _ T M P 1
movdqa W _ T M P 1 , W
.elseif ( ( i & 3 ) = = 2 )
paddd ( K _ B A S E ) , W _ T M P 1
.elseif ( ( i & 3 ) = = 3 )
movdqa W _ T M P 1 , W K ( i & ~ 3 )
W_ P R E C A L C _ R O T A T E
.endif
.endm
/ * message s c h e d u l i n g p r e - c o m p u t e f o r r o u n d s 1 6 - 3 1
*
* - calculating l a s t 3 2 w [ i ] v a l u e s i n 8 X M M r e g i s t e r s
* - pre- c a l c u l a t e K + w [ i ] v a l u e s a n d s t o r e t o m e m , f o r l a t e r l o a d b y A L U a d d
* instruction
*
* some " h e a v y - l i f t i n g " v e c t o r i z a t i o n f o r r o u n d s 1 6 - 3 1 d u e t o w [ i ] - > w [ i - 3 ]
* dependency, b u t i m p r o v e s f o r 3 2 - 7 9
* /
.macro W_PRECALC_16_31_SSSE3
# blended s c h e d u l i n g o f v e c t o r a n d s c a l a r i n s t r u c t i o n s t r e a m s , o n e 4 - w i d e
# vector i t e r a t i o n / 4 s c a l a r r o u n d s
.if ( ( i & 3 ) = = 0 )
movdqa W _ m i n u s _ 1 2 , W
palignr $ 8 , W _ m i n u s _ 1 6 , W # w [ i - 14 ]
movdqa W _ m i n u s _ 0 4 , W _ T M P 1
psrldq $ 4 , W _ T M P 1 # w [ i - 3 ]
pxor W _ m i n u s _ 0 8 , W
.elseif ( ( i & 3 ) = = 1 )
pxor W _ m i n u s _ 1 6 , W _ T M P 1
pxor W _ T M P 1 , W
movdqa W , W _ T M P 2
movdqa W , W _ T M P 1
pslldq $ 1 2 , W _ T M P 2
.elseif ( ( i & 3 ) = = 2 )
psrld $ 3 1 , W
pslld $ 1 , W _ T M P 1
por W , W _ T M P 1
movdqa W _ T M P 2 , W
psrld $ 3 0 , W _ T M P 2
pslld $ 2 , W
.elseif ( ( i & 3 ) = = 3 )
pxor W , W _ T M P 1
pxor W _ T M P 2 , W _ T M P 1
movdqa W _ T M P 1 , W
paddd K _ X M M ( K _ B A S E ) , W _ T M P 1
movdqa W _ T M P 1 , W K ( i & ~ 3 )
W_ P R E C A L C _ R O T A T E
.endif
.endm
/ * message s c h e d u l i n g p r e - c o m p u t e f o r r o u n d s 3 2 - 7 9
*
* in S H A - 1 s p e c i f i c a t i o n : w [ i ] = ( w [ i - 3 ] ^ w [ i - 8 ] ^ w [ i - 1 4 ] ^ w [ i - 1 6 ] ) r o l 1
* instead w e d o e q u a l : w [ i ] = ( w [ i - 6 ] ^ w [ i - 1 6 ] ^ w [ i - 2 8 ] ^ w [ i - 3 2 ] ) r o l 2
* allows m o r e e f f i c i e n t v e c t o r i z a t i o n s i n c e w [ i ] = > w [ i - 3 ] d e p e n d e n c y i s b r o k e n
* /
.macro W_PRECALC_32_79_SSSE3
.if ( ( i & 3 ) = = 0 )
movdqa W _ m i n u s _ 0 4 , W _ T M P 1
pxor W _ m i n u s _ 2 8 , W # W i s W _ m i n u s _ 32 b e f o r e x o r
palignr $ 8 , W _ m i n u s _ 0 8 , W _ T M P 1
.elseif ( ( i & 3 ) = = 1 )
pxor W _ m i n u s _ 1 6 , W
pxor W _ T M P 1 , W
movdqa W , W _ T M P 1
.elseif ( ( i & 3 ) = = 2 )
psrld $ 3 0 , W
pslld $ 2 , W _ T M P 1
por W , W _ T M P 1
.elseif ( ( i & 3 ) = = 3 )
movdqa W _ T M P 1 , W
paddd K _ X M M ( K _ B A S E ) , W _ T M P 1
movdqa W _ T M P 1 , W K ( i & ~ 3 )
W_ P R E C A L C _ R O T A T E
.endif
.endm
.endm / / W_ P R E C A L C _ S S S E 3
# define K 1 0 x5 a82 7 9 9 9
# define K 2 0 x6 e d9 e b a1
# define K 3 0 x8 f1 b b c d c
# define K 4 0 x c a62 c1 d6
.section .rodata
.align 16
K_XMM_AR :
.long K1 , K 1 , K 1 , K 1
.long K2 , K 2 , K 2 , K 2
.long K3 , K 3 , K 3 , K 3
.long K4 , K 4 , K 4 , K 4
BSWAP_SHUFB_CTL :
.long 0x00010203
.long 0x04050607
.long 0x08090a0b
.long 0x0c0d0e0f
.section .text
W_ P R E C A L C _ S S S E 3
.macro xmm_mov a, b
movdqu \ a ,\ b
.endm
/ * SSSE3 o p t i m i z e d i m p l e m e n t a t i o n :
* extern " C " v o i d s h a1 _ t r a n s f o r m _ s s s e 3 ( u 3 2 * d i g e s t , c o n s t c h a r * d a t a , u 3 2 * w s ,
* unsigned i n t r o u n d s ) ;
* /
SHA1 _ V E C T O R _ A S M s h a1 _ t r a n s f o r m _ s s s e 3
2012-05-24 13:13:42 +04:00
# ifdef C O N F I G _ A S _ A V X
2011-08-04 22:19:25 +04:00
.macro W_PRECALC_AVX
.purgem W_PRECALC_00_15
.macro W_PRECALC_00_15
W_ P R E C A L C _ 0 0 _ 1 5 _ A V X
.endm
.purgem W_PRECALC_16_31
.macro W_PRECALC_16_31
W_ P R E C A L C _ 1 6 _ 3 1 _ A V X
.endm
.purgem W_PRECALC_32_79
.macro W_PRECALC_32_79
W_ P R E C A L C _ 3 2 _ 7 9 _ A V X
.endm
.macro W_PRECALC_00_15_AVX
.if ( ( i & 3 ) = = 0 )
vmovdqu ( i * 4 ) ( B U F F E R _ P T R ) , W _ T M P 1
.elseif ( ( i & 3 ) = = 1 )
vpshufb X M M _ S H U F B _ B S W A P , W _ T M P 1 , W
.elseif ( ( i & 3 ) = = 2 )
vpaddd ( K _ B A S E ) , W , W _ T M P 1
.elseif ( ( i & 3 ) = = 3 )
vmovdqa W _ T M P 1 , W K ( i & ~ 3 )
W_ P R E C A L C _ R O T A T E
.endif
.endm
.macro W_PRECALC_16_31_AVX
.if ( ( i & 3 ) = = 0 )
vpalignr $ 8 , W _ m i n u s _ 1 6 , W _ m i n u s _ 1 2 , W # w [ i - 14 ]
vpsrldq $ 4 , W _ m i n u s _ 0 4 , W _ T M P 1 # w [ i - 3 ]
vpxor W _ m i n u s _ 0 8 , W , W
vpxor W _ m i n u s _ 1 6 , W _ T M P 1 , W _ T M P 1
.elseif ( ( i & 3 ) = = 1 )
vpxor W _ T M P 1 , W , W
vpslldq $ 1 2 , W , W _ T M P 2
vpslld $ 1 , W , W _ T M P 1
.elseif ( ( i & 3 ) = = 2 )
vpsrld $ 3 1 , W , W
vpor W , W _ T M P 1 , W _ T M P 1
vpslld $ 2 , W _ T M P 2 , W
vpsrld $ 3 0 , W _ T M P 2 , W _ T M P 2
.elseif ( ( i & 3 ) = = 3 )
vpxor W , W _ T M P 1 , W _ T M P 1
vpxor W _ T M P 2 , W _ T M P 1 , W
vpaddd K _ X M M ( K _ B A S E ) , W , W _ T M P 1
vmovdqu W _ T M P 1 , W K ( i & ~ 3 )
W_ P R E C A L C _ R O T A T E
.endif
.endm
.macro W_PRECALC_32_79_AVX
.if ( ( i & 3 ) = = 0 )
vpalignr $ 8 , W _ m i n u s _ 0 8 , W _ m i n u s _ 0 4 , W _ T M P 1
vpxor W _ m i n u s _ 2 8 , W , W # W i s W _ m i n u s _ 32 b e f o r e x o r
.elseif ( ( i & 3 ) = = 1 )
vpxor W _ m i n u s _ 1 6 , W _ T M P 1 , W _ T M P 1
vpxor W _ T M P 1 , W , W
.elseif ( ( i & 3 ) = = 2 )
vpslld $ 2 , W , W _ T M P 1
vpsrld $ 3 0 , W , W
vpor W , W _ T M P 1 , W
.elseif ( ( i & 3 ) = = 3 )
vpaddd K _ X M M ( K _ B A S E ) , W , W _ T M P 1
vmovdqu W _ T M P 1 , W K ( i & ~ 3 )
W_ P R E C A L C _ R O T A T E
.endif
.endm
.endm / / W_ P R E C A L C _ A V X
W_ P R E C A L C _ A V X
.purgem xmm_mov
.macro xmm_mov a, b
vmovdqu \ a ,\ b
.endm
/ * AVX o p t i m i z e d i m p l e m e n t a t i o n :
* extern " C " v o i d s h a1 _ t r a n s f o r m _ a v x ( u 3 2 * d i g e s t , c o n s t c h a r * d a t a , u 3 2 * w s ,
* unsigned i n t r o u n d s ) ;
* /
SHA1 _ V E C T O R _ A S M s h a1 _ t r a n s f o r m _ a v x
# endif