2019-11-08 13:22:16 +01:00
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/ *
* Copyright ( C ) 2 0 1 6 - 2 0 1 8 R e n é v a n D o r s t < o p e n s o u r c e @vdorst.com>. All Rights Reserved.
* Copyright ( C ) 2 0 1 5 - 2 0 1 9 J a s o n A . D o n e n f e l d < J a s o n @zx2c4.com>. All Rights Reserved.
* /
# define M A S K _ U 3 2 0 x3 c
# define C H A C H A 2 0 _ B L O C K _ S I Z E 6 4
# define S T A C K _ S I Z E 3 2
# define X 0 $ t 0
# define X 1 $ t 1
# define X 2 $ t 2
# define X 3 $ t 3
# define X 4 $ t 4
# define X 5 $ t 5
# define X 6 $ t 6
# define X 7 $ t 7
# define X 8 $ t 8
# define X 9 $ t 9
# define X 1 0 $ v1
# define X 1 1 $ s6
# define X 1 2 $ s5
# define X 1 3 $ s4
# define X 1 4 $ s3
# define X 1 5 $ s2
/* Use regs which are overwritten on exit for Tx so we don't leak clear data. */
# define T 0 $ s1
# define T 1 $ s0
# define T ( n ) T ## n
# define X ( n ) X ## n
/* Input arguments */
# define S T A T E $ a0
# define O U T $ a1
# define I N $ a2
# define B Y T E S $ a3
/* Output argument */
/ * NONCE[ 0 ] i s k e p t i n a r e g i s t e r a n d n o t i n m e m o r y .
* We d o n ' t w a n t t o t o u c h o r i g i n a l v a l u e i n m e m o r y .
* Must b e i n c r e m e n t e d e v e r y l o o p i t e r a t i o n .
* /
# define N O N C E _ 0 $ v0
/ * SAVED_ X a n d S A V E D _ C A a r e s e t i n t h e j u m p t a b l e .
* Use r e g s w h i c h a r e o v e r w r i t t e n o n e x i t e l s e w e d o n ' t l e a k c l e a r d a t a .
* They a r e u s e d t o h a n d l i n g t h e l a s t b y t e s w h i c h a r e n o t m u l t i p l e o f 4 .
* /
# define S A V E D _ X X 1 5
# define S A V E D _ C A $ s7
# define I S _ U N A L I G N E D $ s7
# if _ _ B Y T E _ O R D E R _ _ = = _ _ O R D E R _ B I G _ E N D I A N _ _
# define M S B 0
# define L S B 3
# define R O T x r o t l
# define R O T R ( n ) r o t r n , 2 4
# define C P U _ T O _ L E 3 2 ( n ) \
wsbh n ; \
rotr n , 1 6 ;
# else
# define M S B 3
# define L S B 0
# define R O T x r o t r
# define C P U _ T O _ L E 3 2 ( n )
# define R O T R ( n )
# endif
# define F O R _ E A C H _ W O R D ( x ) \
x( 0 ) ; \
x( 1 ) ; \
x( 2 ) ; \
x( 3 ) ; \
x( 4 ) ; \
x( 5 ) ; \
x( 6 ) ; \
x( 7 ) ; \
x( 8 ) ; \
x( 9 ) ; \
x( 1 0 ) ; \
x( 1 1 ) ; \
x( 1 2 ) ; \
x( 1 3 ) ; \
x( 1 4 ) ; \
x( 1 5 ) ;
# define F O R _ E A C H _ W O R D _ R E V ( x ) \
x( 1 5 ) ; \
x( 1 4 ) ; \
x( 1 3 ) ; \
x( 1 2 ) ; \
x( 1 1 ) ; \
x( 1 0 ) ; \
x( 9 ) ; \
x( 8 ) ; \
x( 7 ) ; \
x( 6 ) ; \
x( 5 ) ; \
x( 4 ) ; \
x( 3 ) ; \
x( 2 ) ; \
x( 1 ) ; \
x( 0 ) ;
# define P L U S _ O N E _ 0 1
# define P L U S _ O N E _ 1 2
# define P L U S _ O N E _ 2 3
# define P L U S _ O N E _ 3 4
# define P L U S _ O N E _ 4 5
# define P L U S _ O N E _ 5 6
# define P L U S _ O N E _ 6 7
# define P L U S _ O N E _ 7 8
# define P L U S _ O N E _ 8 9
# define P L U S _ O N E _ 9 1 0
# define P L U S _ O N E _ 1 0 1 1
# define P L U S _ O N E _ 1 1 1 2
# define P L U S _ O N E _ 1 2 1 3
# define P L U S _ O N E _ 1 3 1 4
# define P L U S _ O N E _ 1 4 1 5
# define P L U S _ O N E _ 1 5 1 6
# define P L U S _ O N E ( x ) P L U S _ O N E _ ## x
# define _ C O N C A T 3 ( a ,b ,c ) a ## b # # c
# define C O N C A T 3 ( a ,b ,c ) _ C O N C A T 3 ( a ,b ,c )
# define S T O R E _ U N A L I G N E D ( x ) \
2019-11-08 13:22:17 +01:00
CONCAT3 ( . L c h a c h a _ m i p s _ x o r _ u n a l i g n e d _ , P L U S _ O N E ( x ) , _ b : ;) \
2019-11-08 13:22:16 +01:00
.if ( x ! = 1 2 ) ; \
lw T 0 , ( x * 4 ) ( S T A T E ) ; \
.endif ; \
lwl T 1 , ( x * 4 ) + M S B ## ( I N ) ; \
lwr T 1 , ( x * 4 ) + L S B ## ( I N ) ; \
.if ( x = = 1 2 ) ; \
addu X ## x , N O N C E _ 0 ; \
.else ; \
addu X ## x , T 0 ; \
.endif ; \
CPU_ T O _ L E 3 2 ( X ## x ) ; \
xor X ## x , T 1 ; \
swl X ## x , ( x * 4 ) + M S B ## ( O U T ) ; \
swr X ## x , ( x * 4 ) + L S B ## ( O U T ) ;
# define S T O R E _ A L I G N E D ( x ) \
2019-11-08 13:22:17 +01:00
CONCAT3 ( . L c h a c h a _ m i p s _ x o r _ a l i g n e d _ , P L U S _ O N E ( x ) , _ b : ;) \
2019-11-08 13:22:16 +01:00
.if ( x ! = 1 2 ) ; \
lw T 0 , ( x * 4 ) ( S T A T E ) ; \
.endif ; \
lw T 1 , ( x * 4 ) ## ( I N ) ; \
.if ( x = = 1 2 ) ; \
addu X ## x , N O N C E _ 0 ; \
.else ; \
addu X ## x , T 0 ; \
.endif ; \
CPU_ T O _ L E 3 2 ( X ## x ) ; \
xor X ## x , T 1 ; \
sw X ## x , ( x * 4 ) ## ( O U T ) ;
/ * Jump t a b l e m a c r o .
* Used f o r s e t u p a n d h a n d l i n g t h e l a s t b y t e s , w h i c h a r e n o t m u l t i p l e o f 4 .
* X1 5 i s f r e e t o s t o r e X n
* Every j u m p t a b l e e n t r y m u s t b e e q u a l i n s i z e .
* /
# define J M P T B L _ A L I G N E D ( x ) \
2019-11-08 13:22:17 +01:00
.Lchacha_mips_jmptbl_aligned_ # # x : ; \
2019-11-08 13:22:16 +01:00
.set noreorder; \
2019-11-08 13:22:17 +01:00
b . L c h a c h a _ m i p s _ x o r _ a l i g n e d _ ## x # # _ b ; \
2019-11-08 13:22:16 +01:00
.if ( x = = 1 2 ) ; \
addu S A V E D _ X , X ## x , N O N C E _ 0 ; \
.else ; \
addu S A V E D _ X , X ## x , S A V E D _ C A ; \
.endif ; \
.set reorder
# define J M P T B L _ U N A L I G N E D ( x ) \
2019-11-08 13:22:17 +01:00
.Lchacha_mips_jmptbl_unaligned_ # # x : ; \
2019-11-08 13:22:16 +01:00
.set noreorder; \
2019-11-08 13:22:17 +01:00
b . L c h a c h a _ m i p s _ x o r _ u n a l i g n e d _ ## x # # _ b ; \
2019-11-08 13:22:16 +01:00
.if ( x = = 1 2 ) ; \
addu S A V E D _ X , X ## x , N O N C E _ 0 ; \
.else ; \
addu S A V E D _ X , X ## x , S A V E D _ C A ; \
.endif ; \
.set reorder
# define A X R ( A , B , C , D , K , L , M , N , V , W , Y , Z , S ) \
addu X ( A ) , X ( K ) ; \
addu X ( B ) , X ( L ) ; \
addu X ( C ) , X ( M ) ; \
addu X ( D ) , X ( N ) ; \
xor X ( V ) , X ( A ) ; \
xor X ( W ) , X ( B ) ; \
xor X ( Y ) , X ( C ) ; \
xor X ( Z ) , X ( D ) ; \
rotl X ( V ) , S ; \
rotl X ( W ) , S ; \
rotl X ( Y ) , S ; \
rotl X ( Z ) , S ;
.text
.set reorder
.set noat
2019-11-08 13:22:17 +01:00
.globl chacha_crypt_arch
.ent chacha_crypt_arch
chacha_crypt_arch :
2019-11-08 13:22:16 +01:00
.frame $ sp, S T A C K _ S I Z E , $ r a
2019-11-08 13:22:17 +01:00
/* Load number of rounds */
lw $ a t , 1 6 ( $ s p )
2019-11-08 13:22:16 +01:00
addiu $ s p , - S T A C K _ S I Z E
/* Return bytes = 0. */
2019-11-08 13:22:17 +01:00
beqz B Y T E S , . L c h a c h a _ m i p s _ e n d
2019-11-08 13:22:16 +01:00
lw N O N C E _ 0 , 4 8 ( S T A T E )
/* Save s0-s7 */
sw $ s0 , 0 ( $ s p )
sw $ s1 , 4 ( $ s p )
sw $ s2 , 8 ( $ s p )
sw $ s3 , 1 2 ( $ s p )
sw $ s4 , 1 6 ( $ s p )
sw $ s5 , 2 0 ( $ s p )
sw $ s6 , 2 4 ( $ s p )
sw $ s7 , 2 8 ( $ s p )
/ * Test I N o r O U T i s u n a l i g n e d .
* IS_ U N A L I G N E D = ( I N | O U T ) & 0 x00 0 0 0 0 0 3
* /
or I S _ U N A L I G N E D , I N , O U T
andi I S _ U N A L I G N E D , 0 x3
2019-11-08 13:22:17 +01:00
b . L c h a c h a _ r o u n d s _ s t a r t
2019-11-08 13:22:16 +01:00
.align 4
2019-11-08 13:22:17 +01:00
.Loop_chacha_rounds :
2019-11-08 13:22:16 +01:00
addiu I N , C H A C H A 2 0 _ B L O C K _ S I Z E
addiu O U T , C H A C H A 2 0 _ B L O C K _ S I Z E
addiu N O N C E _ 0 , 1
2019-11-08 13:22:17 +01:00
.Lchacha_rounds_start :
2019-11-08 13:22:16 +01:00
lw X 0 , 0 ( S T A T E )
lw X 1 , 4 ( S T A T E )
lw X 2 , 8 ( S T A T E )
lw X 3 , 1 2 ( S T A T E )
lw X 4 , 1 6 ( S T A T E )
lw X 5 , 2 0 ( S T A T E )
lw X 6 , 2 4 ( S T A T E )
lw X 7 , 2 8 ( S T A T E )
lw X 8 , 3 2 ( S T A T E )
lw X 9 , 3 6 ( S T A T E )
lw X 1 0 , 4 0 ( S T A T E )
lw X 1 1 , 4 4 ( S T A T E )
move X 1 2 , N O N C E _ 0
lw X 1 3 , 5 2 ( S T A T E )
lw X 1 4 , 5 6 ( S T A T E )
lw X 1 5 , 6 0 ( S T A T E )
2019-11-08 13:22:17 +01:00
.Loop_chacha_xor_rounds :
2019-11-08 13:22:16 +01:00
addiu $ a t , - 2
AXR( 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 1 2 ,1 3 ,1 4 ,1 5 , 1 6 ) ;
AXR( 8 , 9 ,1 0 ,1 1 , 1 2 ,1 3 ,1 4 ,1 5 , 4 , 5 , 6 , 7 , 1 2 ) ;
AXR( 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 1 2 ,1 3 ,1 4 ,1 5 , 8 ) ;
AXR( 8 , 9 ,1 0 ,1 1 , 1 2 ,1 3 ,1 4 ,1 5 , 4 , 5 , 6 , 7 , 7 ) ;
AXR( 0 , 1 , 2 , 3 , 5 , 6 , 7 , 4 , 1 5 ,1 2 ,1 3 ,1 4 , 1 6 ) ;
AXR( 1 0 ,1 1 , 8 , 9 , 1 5 ,1 2 ,1 3 ,1 4 , 5 , 6 , 7 , 4 , 1 2 ) ;
AXR( 0 , 1 , 2 , 3 , 5 , 6 , 7 , 4 , 1 5 ,1 2 ,1 3 ,1 4 , 8 ) ;
AXR( 1 0 ,1 1 , 8 , 9 , 1 5 ,1 2 ,1 3 ,1 4 , 5 , 6 , 7 , 4 , 7 ) ;
2019-11-08 13:22:17 +01:00
bnez $ a t , . L o o p _ c h a c h a _ x o r _ r o u n d s
2019-11-08 13:22:16 +01:00
addiu B Y T E S , - ( C H A C H A 2 0 _ B L O C K _ S I Z E )
/* Is data src/dst unaligned? Jump */
2019-11-08 13:22:17 +01:00
bnez I S _ U N A L I G N E D , . L o o p _ c h a c h a _ u n a l i g n e d
2019-11-08 13:22:16 +01:00
/* Set number rounds here to fill delayslot. */
2019-11-08 13:22:17 +01:00
lw $ a t , ( S T A C K _ S I Z E + 1 6 ) ( $ s p )
2019-11-08 13:22:16 +01:00
/* BYTES < 0, it has no full block. */
2019-11-08 13:22:17 +01:00
bltz B Y T E S , . L c h a c h a _ m i p s _ n o _ f u l l _ b l o c k _ a l i g n e d
2019-11-08 13:22:16 +01:00
FOR_ E A C H _ W O R D _ R E V ( S T O R E _ A L I G N E D )
/* BYTES > 0? Loop again. */
2019-11-08 13:22:17 +01:00
bgtz B Y T E S , . L o o p _ c h a c h a _ r o u n d s
2019-11-08 13:22:16 +01:00
/* Place this here to fill delay slot */
addiu N O N C E _ 0 , 1
/* BYTES < 0? Handle last bytes */
2019-11-08 13:22:17 +01:00
bltz B Y T E S , . L c h a c h a _ m i p s _ x o r _ b y t e s
2019-11-08 13:22:16 +01:00
2019-11-08 13:22:17 +01:00
.Lchacha_mips_xor_done :
2019-11-08 13:22:16 +01:00
/* Restore used registers */
lw $ s0 , 0 ( $ s p )
lw $ s1 , 4 ( $ s p )
lw $ s2 , 8 ( $ s p )
lw $ s3 , 1 2 ( $ s p )
lw $ s4 , 1 6 ( $ s p )
lw $ s5 , 2 0 ( $ s p )
lw $ s6 , 2 4 ( $ s p )
lw $ s7 , 2 8 ( $ s p )
/* Write NONCE_0 back to right location in state */
sw N O N C E _ 0 , 4 8 ( S T A T E )
2019-11-08 13:22:17 +01:00
.Lchacha_mips_end :
2019-11-08 13:22:16 +01:00
addiu $ s p , S T A C K _ S I Z E
jr $ r a
2019-11-08 13:22:17 +01:00
.Lchacha_mips_no_full_block_aligned :
2019-11-08 13:22:16 +01:00
/* Restore the offset on BYTES */
addiu B Y T E S , C H A C H A 2 0 _ B L O C K _ S I Z E
/* Get number of full WORDS */
andi $ a t , B Y T E S , M A S K _ U 3 2
/* Load upper half of jump table addr */
2019-11-08 13:22:17 +01:00
lui T 0 , % h i ( . L c h a c h a _ m i p s _ j m p t b l _ a l i g n e d _ 0 )
2019-11-08 13:22:16 +01:00
/* Calculate lower half jump table offset */
ins T 0 , $ a t , 1 , 6
/* Add offset to STATE */
addu T 1 , S T A T E , $ a t
/* Add lower half jump table addr */
2019-11-08 13:22:17 +01:00
addiu T 0 , % l o ( . L c h a c h a _ m i p s _ j m p t b l _ a l i g n e d _ 0 )
2019-11-08 13:22:16 +01:00
/* Read value from STATE */
lw S A V E D _ C A , 0 ( T 1 )
/* Store remaining bytecounter as negative value */
subu B Y T E S , $ a t , B Y T E S
jr T 0
/* Jump table */
FOR_ E A C H _ W O R D ( J M P T B L _ A L I G N E D )
2019-11-08 13:22:17 +01:00
.Loop_chacha_unaligned :
2019-11-08 13:22:16 +01:00
/* Set number rounds here to fill delayslot. */
2019-11-08 13:22:17 +01:00
lw $ a t , ( S T A C K _ S I Z E + 1 6 ) ( $ s p )
2019-11-08 13:22:16 +01:00
/* BYTES > 0, it has no full block. */
2019-11-08 13:22:17 +01:00
bltz B Y T E S , . L c h a c h a _ m i p s _ n o _ f u l l _ b l o c k _ u n a l i g n e d
2019-11-08 13:22:16 +01:00
FOR_ E A C H _ W O R D _ R E V ( S T O R E _ U N A L I G N E D )
/* BYTES > 0? Loop again. */
2019-11-08 13:22:17 +01:00
bgtz B Y T E S , . L o o p _ c h a c h a _ r o u n d s
2019-11-08 13:22:16 +01:00
/* Write NONCE_0 back to right location in state */
sw N O N C E _ 0 , 4 8 ( S T A T E )
.set noreorder
/* Fall through to byte handling */
2019-11-08 13:22:17 +01:00
bgez B Y T E S , . L c h a c h a _ m i p s _ x o r _ d o n e
.Lchacha_mips_xor_unaligned_0_b :
.Lchacha_mips_xor_aligned_0_b :
2019-11-08 13:22:16 +01:00
/* Place this here to fill delay slot */
addiu N O N C E _ 0 , 1
.set reorder
2019-11-08 13:22:17 +01:00
.Lchacha_mips_xor_bytes :
2019-11-08 13:22:16 +01:00
addu I N , $ a t
addu O U T , $ a t
/* First byte */
lbu T 1 , 0 ( I N )
addiu $ a t , B Y T E S , 1
CPU_ T O _ L E 3 2 ( S A V E D _ X )
ROTR( S A V E D _ X )
xor T 1 , S A V E D _ X
sb T 1 , 0 ( O U T )
2019-11-08 13:22:17 +01:00
beqz $ a t , . L c h a c h a _ m i p s _ x o r _ d o n e
2019-11-08 13:22:16 +01:00
/* Second byte */
lbu T 1 , 1 ( I N )
addiu $ a t , B Y T E S , 2
ROTx S A V E D _ X , 8
xor T 1 , S A V E D _ X
sb T 1 , 1 ( O U T )
2019-11-08 13:22:17 +01:00
beqz $ a t , . L c h a c h a _ m i p s _ x o r _ d o n e
2019-11-08 13:22:16 +01:00
/* Third byte */
lbu T 1 , 2 ( I N )
ROTx S A V E D _ X , 8
xor T 1 , S A V E D _ X
sb T 1 , 2 ( O U T )
2019-11-08 13:22:17 +01:00
b . L c h a c h a _ m i p s _ x o r _ d o n e
2019-11-08 13:22:16 +01:00
2019-11-08 13:22:17 +01:00
.Lchacha_mips_no_full_block_unaligned :
2019-11-08 13:22:16 +01:00
/* Restore the offset on BYTES */
addiu B Y T E S , C H A C H A 2 0 _ B L O C K _ S I Z E
/* Get number of full WORDS */
andi $ a t , B Y T E S , M A S K _ U 3 2
/* Load upper half of jump table addr */
2019-11-08 13:22:17 +01:00
lui T 0 , % h i ( . L c h a c h a _ m i p s _ j m p t b l _ u n a l i g n e d _ 0 )
2019-11-08 13:22:16 +01:00
/* Calculate lower half jump table offset */
ins T 0 , $ a t , 1 , 6
/* Add offset to STATE */
addu T 1 , S T A T E , $ a t
/* Add lower half jump table addr */
2019-11-08 13:22:17 +01:00
addiu T 0 , % l o ( . L c h a c h a _ m i p s _ j m p t b l _ u n a l i g n e d _ 0 )
2019-11-08 13:22:16 +01:00
/* Read value from STATE */
lw S A V E D _ C A , 0 ( T 1 )
/* Store remaining bytecounter as negative value */
subu B Y T E S , $ a t , B Y T E S
jr T 0
/* Jump table */
FOR_ E A C H _ W O R D ( J M P T B L _ U N A L I G N E D )
2019-11-08 13:22:17 +01:00
.end chacha_crypt_arch
.set at
/ * Input a r g u m e n t s
* STATE $ a0
* OUT $ a1
* NROUND $ a2
* /
# undef X 1 2
# undef X 1 3
# undef X 1 4
# undef X 1 5
# define X 1 2 $ a3
# define X 1 3 $ a t
# define X 1 4 $ v0
# define X 1 5 S T A T E
.set noat
.globl hchacha_block_arch
.ent hchacha_block_arch
hchacha_block_arch :
.frame $ sp, S T A C K _ S I Z E , $ r a
addiu $ s p , - S T A C K _ S I Z E
/* Save X11(s6) */
sw X 1 1 , 0 ( $ s p )
lw X 0 , 0 ( S T A T E )
lw X 1 , 4 ( S T A T E )
lw X 2 , 8 ( S T A T E )
lw X 3 , 1 2 ( S T A T E )
lw X 4 , 1 6 ( S T A T E )
lw X 5 , 2 0 ( S T A T E )
lw X 6 , 2 4 ( S T A T E )
lw X 7 , 2 8 ( S T A T E )
lw X 8 , 3 2 ( S T A T E )
lw X 9 , 3 6 ( S T A T E )
lw X 1 0 , 4 0 ( S T A T E )
lw X 1 1 , 4 4 ( S T A T E )
lw X 1 2 , 4 8 ( S T A T E )
lw X 1 3 , 5 2 ( S T A T E )
lw X 1 4 , 5 6 ( S T A T E )
lw X 1 5 , 6 0 ( S T A T E )
.Loop_hchacha_xor_rounds :
addiu $ a2 , - 2
AXR( 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 1 2 ,1 3 ,1 4 ,1 5 , 1 6 ) ;
AXR( 8 , 9 ,1 0 ,1 1 , 1 2 ,1 3 ,1 4 ,1 5 , 4 , 5 , 6 , 7 , 1 2 ) ;
AXR( 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 1 2 ,1 3 ,1 4 ,1 5 , 8 ) ;
AXR( 8 , 9 ,1 0 ,1 1 , 1 2 ,1 3 ,1 4 ,1 5 , 4 , 5 , 6 , 7 , 7 ) ;
AXR( 0 , 1 , 2 , 3 , 5 , 6 , 7 , 4 , 1 5 ,1 2 ,1 3 ,1 4 , 1 6 ) ;
AXR( 1 0 ,1 1 , 8 , 9 , 1 5 ,1 2 ,1 3 ,1 4 , 5 , 6 , 7 , 4 , 1 2 ) ;
AXR( 0 , 1 , 2 , 3 , 5 , 6 , 7 , 4 , 1 5 ,1 2 ,1 3 ,1 4 , 8 ) ;
AXR( 1 0 ,1 1 , 8 , 9 , 1 5 ,1 2 ,1 3 ,1 4 , 5 , 6 , 7 , 4 , 7 ) ;
bnez $ a2 , . L o o p _ h c h a c h a _ x o r _ r o u n d s
/* Restore used register */
lw X 1 1 , 0 ( $ s p )
sw X 0 , 0 ( O U T )
sw X 1 , 4 ( O U T )
sw X 2 , 8 ( O U T )
sw X 3 , 1 2 ( O U T )
sw X 1 2 , 1 6 ( O U T )
sw X 1 3 , 2 0 ( O U T )
sw X 1 4 , 2 4 ( O U T )
sw X 1 5 , 2 8 ( O U T )
addiu $ s p , S T A C K _ S I Z E
jr $ r a
.end hchacha_block_arch
2019-11-08 13:22:16 +01:00
.set at