2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2006-07-12 06:29:38 +04:00
/*
* Cryptographic API .
*
* Support for VIA PadLock hardware crypto engine .
*
* Copyright ( c ) 2006 Michal Ludvig < michal @ logix . cz >
*/
2009-07-10 13:26:44 +04:00
# include <crypto/internal/hash.h>
2011-01-07 06:52:00 +03:00
# include <crypto/padlock.h>
2007-10-09 18:43:13 +04:00
# include <crypto/sha.h>
2006-08-26 12:34:10 +04:00
# include <linux/err.h>
2006-07-12 06:29:38 +04:00
# include <linux/module.h>
# include <linux/init.h>
# include <linux/errno.h>
# include <linux/interrupt.h>
# include <linux/kernel.h>
# include <linux/scatterlist.h>
2012-01-26 03:09:06 +04:00
# include <asm/cpu_device_id.h>
2015-04-24 03:46:00 +03:00
# include <asm/fpu/api.h>
2009-09-22 10:21:53 +04:00
2009-07-11 14:16:16 +04:00
struct padlock_sha_desc {
struct shash_desc fallback ;
2006-07-12 06:29:38 +04:00
} ;
2009-07-11 14:16:16 +04:00
struct padlock_sha_ctx {
struct crypto_shash * fallback ;
} ;
2006-07-12 06:29:38 +04:00
2009-07-11 14:16:16 +04:00
static int padlock_sha_init ( struct shash_desc * desc )
2006-07-12 06:29:38 +04:00
{
2009-07-11 14:16:16 +04:00
struct padlock_sha_desc * dctx = shash_desc_ctx ( desc ) ;
struct padlock_sha_ctx * ctx = crypto_shash_ctx ( desc - > tfm ) ;
2006-07-12 06:29:38 +04:00
2009-07-11 14:16:16 +04:00
dctx - > fallback . tfm = ctx - > fallback ;
return crypto_shash_init ( & dctx - > fallback ) ;
2006-07-12 06:29:38 +04:00
}
2009-07-11 14:16:16 +04:00
static int padlock_sha_update ( struct shash_desc * desc ,
const u8 * data , unsigned int length )
2006-07-12 06:29:38 +04:00
{
2009-07-11 14:16:16 +04:00
struct padlock_sha_desc * dctx = shash_desc_ctx ( desc ) ;
2006-07-12 06:29:38 +04:00
2009-07-11 14:16:16 +04:00
return crypto_shash_update ( & dctx - > fallback , data , length ) ;
2006-07-12 06:29:38 +04:00
}
2010-02-01 01:17:56 +03:00
static int padlock_sha_export ( struct shash_desc * desc , void * out )
{
struct padlock_sha_desc * dctx = shash_desc_ctx ( desc ) ;
return crypto_shash_export ( & dctx - > fallback , out ) ;
}
static int padlock_sha_import ( struct shash_desc * desc , const void * in )
{
struct padlock_sha_desc * dctx = shash_desc_ctx ( desc ) ;
struct padlock_sha_ctx * ctx = crypto_shash_ctx ( desc - > tfm ) ;
dctx - > fallback . tfm = ctx - > fallback ;
return crypto_shash_import ( & dctx - > fallback , in ) ;
}
2006-07-12 06:29:38 +04:00
static inline void padlock_output_block ( uint32_t * src ,
uint32_t * dst , size_t count )
{
while ( count - - )
* dst + + = swab32 ( * src + + ) ;
}
2009-07-11 14:16:16 +04:00
static int padlock_sha1_finup ( struct shash_desc * desc , const u8 * in ,
unsigned int count , u8 * out )
2006-07-12 06:29:38 +04:00
{
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big . */
2009-09-22 10:21:53 +04:00
char buf [ 128 + PADLOCK_ALIGNMENT - STACK_ALIGN ] __attribute__
( ( aligned ( STACK_ALIGN ) ) ) ;
char * result = PTR_ALIGN ( & buf [ 0 ] , PADLOCK_ALIGNMENT ) ;
2009-07-11 14:16:16 +04:00
struct padlock_sha_desc * dctx = shash_desc_ctx ( desc ) ;
struct sha1_state state ;
unsigned int space ;
unsigned int leftover ;
int err ;
err = crypto_shash_export ( & dctx - > fallback , & state ) ;
if ( err )
goto out ;
if ( state . count + count > ULONG_MAX )
return crypto_shash_finup ( & dctx - > fallback , in , count , out ) ;
leftover = ( ( state . count - 1 ) & ( SHA1_BLOCK_SIZE - 1 ) ) + 1 ;
space = SHA1_BLOCK_SIZE - leftover ;
if ( space ) {
if ( count > space ) {
err = crypto_shash_update ( & dctx - > fallback , in , space ) ? :
crypto_shash_export ( & dctx - > fallback , & state ) ;
if ( err )
goto out ;
count - = space ;
in + = space ;
} else {
memcpy ( state . buffer + leftover , in , count ) ;
in = state . buffer ;
count + = leftover ;
2009-07-16 06:33:27 +04:00
state . count & = ~ ( SHA1_BLOCK_SIZE - 1 ) ;
2009-07-11 14:16:16 +04:00
}
}
memcpy ( result , & state . state , SHA1_DIGEST_SIZE ) ;
2006-07-12 06:29:38 +04:00
asm volatile ( " .byte 0xf3,0x0f,0xa6,0xc8 " /* rep xsha1 */
2009-07-11 14:16:16 +04:00
: \
2009-07-15 14:37:48 +04:00
: " c " ( ( unsigned long ) state . count + count ) , \
" a " ( ( unsigned long ) state . count ) , \
2009-07-11 14:16:16 +04:00
" S " ( in ) , " D " ( result ) ) ;
2006-07-12 06:29:38 +04:00
padlock_output_block ( ( uint32_t * ) result , ( uint32_t * ) out , 5 ) ;
2009-07-11 14:16:16 +04:00
out :
return err ;
2006-07-12 06:29:38 +04:00
}
2009-07-11 14:16:16 +04:00
static int padlock_sha1_final ( struct shash_desc * desc , u8 * out )
{
u8 buf [ 4 ] ;
return padlock_sha1_finup ( desc , buf , 0 , out ) ;
}
static int padlock_sha256_finup ( struct shash_desc * desc , const u8 * in ,
unsigned int count , u8 * out )
2006-07-12 06:29:38 +04:00
{
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big . */
2009-09-22 10:21:53 +04:00
char buf [ 128 + PADLOCK_ALIGNMENT - STACK_ALIGN ] __attribute__
( ( aligned ( STACK_ALIGN ) ) ) ;
char * result = PTR_ALIGN ( & buf [ 0 ] , PADLOCK_ALIGNMENT ) ;
2009-07-11 14:16:16 +04:00
struct padlock_sha_desc * dctx = shash_desc_ctx ( desc ) ;
struct sha256_state state ;
unsigned int space ;
unsigned int leftover ;
int err ;
2006-07-12 06:29:38 +04:00
2009-07-11 14:16:16 +04:00
err = crypto_shash_export ( & dctx - > fallback , & state ) ;
if ( err )
goto out ;
if ( state . count + count > ULONG_MAX )
return crypto_shash_finup ( & dctx - > fallback , in , count , out ) ;
leftover = ( ( state . count - 1 ) & ( SHA256_BLOCK_SIZE - 1 ) ) + 1 ;
space = SHA256_BLOCK_SIZE - leftover ;
if ( space ) {
if ( count > space ) {
err = crypto_shash_update ( & dctx - > fallback , in , space ) ? :
crypto_shash_export ( & dctx - > fallback , & state ) ;
if ( err )
goto out ;
count - = space ;
in + = space ;
} else {
memcpy ( state . buf + leftover , in , count ) ;
in = state . buf ;
count + = leftover ;
2009-07-16 06:33:27 +04:00
state . count & = ~ ( SHA1_BLOCK_SIZE - 1 ) ;
2009-07-11 14:16:16 +04:00
}
}
memcpy ( result , & state . state , SHA256_DIGEST_SIZE ) ;
2006-07-12 06:29:38 +04:00
asm volatile ( " .byte 0xf3,0x0f,0xa6,0xd0 " /* rep xsha256 */
2009-07-11 14:16:16 +04:00
: \
2009-07-15 14:37:48 +04:00
: " c " ( ( unsigned long ) state . count + count ) , \
" a " ( ( unsigned long ) state . count ) , \
2009-07-11 14:16:16 +04:00
" S " ( in ) , " D " ( result ) ) ;
2006-07-12 06:29:38 +04:00
padlock_output_block ( ( uint32_t * ) result , ( uint32_t * ) out , 8 ) ;
2009-07-11 14:16:16 +04:00
out :
return err ;
2006-07-12 06:29:38 +04:00
}
2009-07-11 14:16:16 +04:00
static int padlock_sha256_final ( struct shash_desc * desc , u8 * out )
2006-07-12 06:29:38 +04:00
{
2009-07-11 14:16:16 +04:00
u8 buf [ 4 ] ;
2006-07-12 06:29:38 +04:00
2009-07-11 14:16:16 +04:00
return padlock_sha256_finup ( desc , buf , 0 , out ) ;
2006-07-12 06:29:38 +04:00
}
2006-08-26 12:34:10 +04:00
static int padlock_cra_init ( struct crypto_tfm * tfm )
2006-07-12 06:29:38 +04:00
{
2009-07-11 14:16:16 +04:00
struct crypto_shash * hash = __crypto_shash_cast ( tfm ) ;
2014-05-14 13:41:02 +04:00
const char * fallback_driver_name = crypto_tfm_alg_name ( tfm ) ;
2009-07-11 14:16:16 +04:00
struct padlock_sha_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2009-07-10 13:26:44 +04:00
struct crypto_shash * fallback_tfm ;
int err = - ENOMEM ;
2006-08-26 12:34:10 +04:00
2006-07-12 06:29:38 +04:00
/* Allocate a fallback and abort if it failed. */
2009-07-10 13:26:44 +04:00
fallback_tfm = crypto_alloc_shash ( fallback_driver_name , 0 ,
CRYPTO_ALG_NEED_FALLBACK ) ;
2006-08-26 12:34:10 +04:00
if ( IS_ERR ( fallback_tfm ) ) {
2006-07-12 06:29:38 +04:00
printk ( KERN_WARNING PFX " Fallback driver '%s' could not be loaded! \n " ,
fallback_driver_name ) ;
2009-07-10 13:26:44 +04:00
err = PTR_ERR ( fallback_tfm ) ;
2009-07-11 14:16:16 +04:00
goto out ;
2006-07-12 06:29:38 +04:00
}
2009-07-11 14:16:16 +04:00
ctx - > fallback = fallback_tfm ;
hash - > descsize + = crypto_shash_descsize ( fallback_tfm ) ;
2006-07-12 06:29:38 +04:00
return 0 ;
2009-07-10 13:26:44 +04:00
out :
return err ;
2006-07-12 06:29:38 +04:00
}
static void padlock_cra_exit ( struct crypto_tfm * tfm )
{
2009-07-11 14:16:16 +04:00
struct padlock_sha_ctx * ctx = crypto_tfm_ctx ( tfm ) ;
2009-07-10 13:26:44 +04:00
2009-07-11 14:16:16 +04:00
crypto_free_shash ( ctx - > fallback ) ;
2006-07-12 06:29:38 +04:00
}
2009-07-11 14:16:16 +04:00
static struct shash_alg sha1_alg = {
. digestsize = SHA1_DIGEST_SIZE ,
. init = padlock_sha_init ,
. update = padlock_sha_update ,
. finup = padlock_sha1_finup ,
. final = padlock_sha1_final ,
2010-02-01 01:17:56 +03:00
. export = padlock_sha_export ,
. import = padlock_sha_import ,
2009-07-11 14:16:16 +04:00
. descsize = sizeof ( struct padlock_sha_desc ) ,
2010-02-01 01:17:56 +03:00
. statesize = sizeof ( struct sha1_state ) ,
2009-07-11 14:16:16 +04:00
. base = {
. cra_name = " sha1 " ,
. cra_driver_name = " sha1-padlock " ,
. cra_priority = PADLOCK_CRA_PRIORITY ,
2018-07-01 01:16:11 +03:00
. cra_flags = CRYPTO_ALG_NEED_FALLBACK ,
2009-07-11 14:16:16 +04:00
. cra_blocksize = SHA1_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct padlock_sha_ctx ) ,
. cra_module = THIS_MODULE ,
. cra_init = padlock_cra_init ,
. cra_exit = padlock_cra_exit ,
2006-07-12 06:29:38 +04:00
}
} ;
2009-07-11 14:16:16 +04:00
static struct shash_alg sha256_alg = {
. digestsize = SHA256_DIGEST_SIZE ,
. init = padlock_sha_init ,
. update = padlock_sha_update ,
. finup = padlock_sha256_finup ,
. final = padlock_sha256_final ,
2010-02-01 01:17:56 +03:00
. export = padlock_sha_export ,
. import = padlock_sha_import ,
2009-07-11 14:16:16 +04:00
. descsize = sizeof ( struct padlock_sha_desc ) ,
2010-02-01 01:17:56 +03:00
. statesize = sizeof ( struct sha256_state ) ,
2009-07-11 14:16:16 +04:00
. base = {
. cra_name = " sha256 " ,
. cra_driver_name = " sha256-padlock " ,
. cra_priority = PADLOCK_CRA_PRIORITY ,
2018-07-01 01:16:11 +03:00
. cra_flags = CRYPTO_ALG_NEED_FALLBACK ,
2009-07-11 14:16:16 +04:00
. cra_blocksize = SHA256_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct padlock_sha_ctx ) ,
. cra_module = THIS_MODULE ,
. cra_init = padlock_cra_init ,
. cra_exit = padlock_cra_exit ,
2006-07-12 06:29:38 +04:00
}
} ;
2011-03-27 06:45:00 +04:00
/* Add two shash_alg instance for hardware-implemented *
* multiple - parts hash supported by VIA Nano Processor . */
static int padlock_sha1_init_nano ( struct shash_desc * desc )
{
struct sha1_state * sctx = shash_desc_ctx ( desc ) ;
* sctx = ( struct sha1_state ) {
. state = { SHA1_H0 , SHA1_H1 , SHA1_H2 , SHA1_H3 , SHA1_H4 } ,
} ;
return 0 ;
}
static int padlock_sha1_update_nano ( struct shash_desc * desc ,
const u8 * data , unsigned int len )
{
struct sha1_state * sctx = shash_desc_ctx ( desc ) ;
unsigned int partial , done ;
const u8 * src ;
/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
u8 buf [ 128 + PADLOCK_ALIGNMENT - STACK_ALIGN ] __attribute__
( ( aligned ( STACK_ALIGN ) ) ) ;
u8 * dst = PTR_ALIGN ( & buf [ 0 ] , PADLOCK_ALIGNMENT ) ;
partial = sctx - > count & 0x3f ;
sctx - > count + = len ;
done = 0 ;
src = data ;
memcpy ( dst , ( u8 * ) ( sctx - > state ) , SHA1_DIGEST_SIZE ) ;
if ( ( partial + len ) > = SHA1_BLOCK_SIZE ) {
/* Append the bytes in state's buffer to a block to handle */
if ( partial ) {
done = - partial ;
memcpy ( sctx - > buffer + partial , data ,
done + SHA1_BLOCK_SIZE ) ;
src = sctx - > buffer ;
asm volatile ( " .byte 0xf3,0x0f,0xa6,0xc8 "
: " +S " ( src ) , " +D " ( dst ) \
: " a " ( ( long ) - 1 ) , " c " ( ( unsigned long ) 1 ) ) ;
done + = SHA1_BLOCK_SIZE ;
src = data + done ;
}
/* Process the left bytes from the input data */
if ( len - done > = SHA1_BLOCK_SIZE ) {
asm volatile ( " .byte 0xf3,0x0f,0xa6,0xc8 "
: " +S " ( src ) , " +D " ( dst )
: " a " ( ( long ) - 1 ) ,
" c " ( ( unsigned long ) ( ( len - done ) / SHA1_BLOCK_SIZE ) ) ) ;
done + = ( ( len - done ) - ( len - done ) % SHA1_BLOCK_SIZE ) ;
src = data + done ;
}
partial = 0 ;
}
memcpy ( ( u8 * ) ( sctx - > state ) , dst , SHA1_DIGEST_SIZE ) ;
memcpy ( sctx - > buffer + partial , src , len - done ) ;
return 0 ;
}
static int padlock_sha1_final_nano ( struct shash_desc * desc , u8 * out )
{
struct sha1_state * state = ( struct sha1_state * ) shash_desc_ctx ( desc ) ;
unsigned int partial , padlen ;
__be64 bits ;
static const u8 padding [ 64 ] = { 0x80 , } ;
bits = cpu_to_be64 ( state - > count < < 3 ) ;
/* Pad out to 56 mod 64 */
partial = state - > count & 0x3f ;
padlen = ( partial < 56 ) ? ( 56 - partial ) : ( ( 64 + 56 ) - partial ) ;
padlock_sha1_update_nano ( desc , padding , padlen ) ;
/* Append length field bytes */
padlock_sha1_update_nano ( desc , ( const u8 * ) & bits , sizeof ( bits ) ) ;
/* Swap to output */
padlock_output_block ( ( uint32_t * ) ( state - > state ) , ( uint32_t * ) out , 5 ) ;
return 0 ;
}
static int padlock_sha256_init_nano ( struct shash_desc * desc )
{
struct sha256_state * sctx = shash_desc_ctx ( desc ) ;
* sctx = ( struct sha256_state ) {
. state = { SHA256_H0 , SHA256_H1 , SHA256_H2 , SHA256_H3 , \
SHA256_H4 , SHA256_H5 , SHA256_H6 , SHA256_H7 } ,
} ;
return 0 ;
}
static int padlock_sha256_update_nano ( struct shash_desc * desc , const u8 * data ,
unsigned int len )
{
struct sha256_state * sctx = shash_desc_ctx ( desc ) ;
unsigned int partial , done ;
const u8 * src ;
/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
u8 buf [ 128 + PADLOCK_ALIGNMENT - STACK_ALIGN ] __attribute__
( ( aligned ( STACK_ALIGN ) ) ) ;
u8 * dst = PTR_ALIGN ( & buf [ 0 ] , PADLOCK_ALIGNMENT ) ;
partial = sctx - > count & 0x3f ;
sctx - > count + = len ;
done = 0 ;
src = data ;
memcpy ( dst , ( u8 * ) ( sctx - > state ) , SHA256_DIGEST_SIZE ) ;
if ( ( partial + len ) > = SHA256_BLOCK_SIZE ) {
/* Append the bytes in state's buffer to a block to handle */
if ( partial ) {
done = - partial ;
memcpy ( sctx - > buf + partial , data ,
done + SHA256_BLOCK_SIZE ) ;
src = sctx - > buf ;
asm volatile ( " .byte 0xf3,0x0f,0xa6,0xd0 "
: " +S " ( src ) , " +D " ( dst )
: " a " ( ( long ) - 1 ) , " c " ( ( unsigned long ) 1 ) ) ;
done + = SHA256_BLOCK_SIZE ;
src = data + done ;
}
/* Process the left bytes from input data*/
if ( len - done > = SHA256_BLOCK_SIZE ) {
asm volatile ( " .byte 0xf3,0x0f,0xa6,0xd0 "
: " +S " ( src ) , " +D " ( dst )
: " a " ( ( long ) - 1 ) ,
" c " ( ( unsigned long ) ( ( len - done ) / 64 ) ) ) ;
done + = ( ( len - done ) - ( len - done ) % 64 ) ;
src = data + done ;
}
partial = 0 ;
}
memcpy ( ( u8 * ) ( sctx - > state ) , dst , SHA256_DIGEST_SIZE ) ;
memcpy ( sctx - > buf + partial , src , len - done ) ;
return 0 ;
}
static int padlock_sha256_final_nano ( struct shash_desc * desc , u8 * out )
{
struct sha256_state * state =
( struct sha256_state * ) shash_desc_ctx ( desc ) ;
unsigned int partial , padlen ;
__be64 bits ;
static const u8 padding [ 64 ] = { 0x80 , } ;
bits = cpu_to_be64 ( state - > count < < 3 ) ;
/* Pad out to 56 mod 64 */
partial = state - > count & 0x3f ;
padlen = ( partial < 56 ) ? ( 56 - partial ) : ( ( 64 + 56 ) - partial ) ;
padlock_sha256_update_nano ( desc , padding , padlen ) ;
/* Append length field bytes */
padlock_sha256_update_nano ( desc , ( const u8 * ) & bits , sizeof ( bits ) ) ;
/* Swap to output */
padlock_output_block ( ( uint32_t * ) ( state - > state ) , ( uint32_t * ) out , 8 ) ;
return 0 ;
}
static int padlock_sha_export_nano ( struct shash_desc * desc ,
void * out )
{
int statesize = crypto_shash_statesize ( desc - > tfm ) ;
void * sctx = shash_desc_ctx ( desc ) ;
memcpy ( out , sctx , statesize ) ;
return 0 ;
}
static int padlock_sha_import_nano ( struct shash_desc * desc ,
const void * in )
{
int statesize = crypto_shash_statesize ( desc - > tfm ) ;
void * sctx = shash_desc_ctx ( desc ) ;
memcpy ( sctx , in , statesize ) ;
return 0 ;
}
static struct shash_alg sha1_alg_nano = {
. digestsize = SHA1_DIGEST_SIZE ,
. init = padlock_sha1_init_nano ,
. update = padlock_sha1_update_nano ,
. final = padlock_sha1_final_nano ,
. export = padlock_sha_export_nano ,
. import = padlock_sha_import_nano ,
. descsize = sizeof ( struct sha1_state ) ,
. statesize = sizeof ( struct sha1_state ) ,
. base = {
. cra_name = " sha1 " ,
. cra_driver_name = " sha1-padlock-nano " ,
. cra_priority = PADLOCK_CRA_PRIORITY ,
. cra_blocksize = SHA1_BLOCK_SIZE ,
. cra_module = THIS_MODULE ,
}
} ;
static struct shash_alg sha256_alg_nano = {
. digestsize = SHA256_DIGEST_SIZE ,
. init = padlock_sha256_init_nano ,
. update = padlock_sha256_update_nano ,
. final = padlock_sha256_final_nano ,
. export = padlock_sha_export_nano ,
. import = padlock_sha_import_nano ,
. descsize = sizeof ( struct sha256_state ) ,
. statesize = sizeof ( struct sha256_state ) ,
. base = {
. cra_name = " sha256 " ,
. cra_driver_name = " sha256-padlock-nano " ,
. cra_priority = PADLOCK_CRA_PRIORITY ,
. cra_blocksize = SHA256_BLOCK_SIZE ,
. cra_module = THIS_MODULE ,
}
} ;
2017-08-25 21:23:59 +03:00
static const struct x86_cpu_id padlock_sha_ids [ ] = {
2012-01-26 03:09:06 +04:00
X86_FEATURE_MATCH ( X86_FEATURE_PHE ) ,
{ }
} ;
MODULE_DEVICE_TABLE ( x86cpu , padlock_sha_ids ) ;
2006-07-12 06:29:38 +04:00
static int __init padlock_init ( void )
{
int rc = - ENODEV ;
2011-03-27 06:45:00 +04:00
struct cpuinfo_x86 * c = & cpu_data ( 0 ) ;
struct shash_alg * sha1 ;
struct shash_alg * sha256 ;
2006-07-12 06:29:38 +04:00
2015-12-07 12:39:41 +03:00
if ( ! x86_match_cpu ( padlock_sha_ids ) | | ! boot_cpu_has ( X86_FEATURE_PHE_EN ) )
2006-07-12 06:29:38 +04:00
return - ENODEV ;
2011-03-27 06:45:00 +04:00
/* Register the newly added algorithm module if on *
* VIA Nano processor , or else just do as before */
if ( c - > x86_model < 0x0f ) {
sha1 = & sha1_alg ;
sha256 = & sha256_alg ;
} else {
sha1 = & sha1_alg_nano ;
sha256 = & sha256_alg_nano ;
}
rc = crypto_register_shash ( sha1 ) ;
2006-07-12 06:29:38 +04:00
if ( rc )
goto out ;
2011-03-27 06:45:00 +04:00
rc = crypto_register_shash ( sha256 ) ;
2006-07-12 06:29:38 +04:00
if ( rc )
goto out_unreg1 ;
printk ( KERN_NOTICE PFX " Using VIA PadLock ACE for SHA1/SHA256 algorithms. \n " ) ;
return 0 ;
out_unreg1 :
2011-03-27 06:45:00 +04:00
crypto_unregister_shash ( sha1 ) ;
2006-07-12 06:29:38 +04:00
out :
printk ( KERN_ERR PFX " VIA PadLock SHA1/SHA256 initialization failed. \n " ) ;
return rc ;
}
static void __exit padlock_fini ( void )
{
2011-03-27 06:45:00 +04:00
struct cpuinfo_x86 * c = & cpu_data ( 0 ) ;
if ( c - > x86_model > = 0x0f ) {
crypto_unregister_shash ( & sha1_alg_nano ) ;
crypto_unregister_shash ( & sha256_alg_nano ) ;
} else {
crypto_unregister_shash ( & sha1_alg ) ;
crypto_unregister_shash ( & sha256_alg ) ;
}
2006-07-12 06:29:38 +04:00
}
module_init ( padlock_init ) ;
module_exit ( padlock_fini ) ;
MODULE_DESCRIPTION ( " VIA PadLock SHA1/SHA256 algorithms support. " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " Michal Ludvig " ) ;
2014-11-21 04:05:53 +03:00
MODULE_ALIAS_CRYPTO ( " sha1-all " ) ;
MODULE_ALIAS_CRYPTO ( " sha256-all " ) ;
MODULE_ALIAS_CRYPTO ( " sha1-padlock " ) ;
MODULE_ALIAS_CRYPTO ( " sha256-padlock " ) ;