2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2010-05-03 07:10:59 +04:00
/*
* Cryptographic API .
*
* Support for OMAP SHA1 / MD5 HW acceleration .
*
* Copyright ( c ) 2010 Nokia Corporation
* Author : Dmitry Kasatkin < dmitry . kasatkin @ nokia . com >
2012-12-21 21:04:08 +04:00
* Copyright ( c ) 2011 Texas Instruments Incorporated
2010-05-03 07:10:59 +04:00
*
* Some ideas are from old omap - sha1 - md5 . c driver .
*/
# define pr_fmt(fmt) "%s: " fmt, __func__
# include <linux/err.h>
# include <linux/device.h>
# include <linux/module.h>
# include <linux/init.h>
# include <linux/errno.h>
# include <linux/interrupt.h>
# include <linux/kernel.h>
# include <linux/irq.h>
# include <linux/io.h>
# include <linux/platform_device.h>
# include <linux/scatterlist.h>
# include <linux/dma-mapping.h>
2012-12-21 21:04:04 +04:00
# include <linux/dmaengine.h>
2012-12-21 21:04:02 +04:00
# include <linux/pm_runtime.h>
2012-12-21 21:04:06 +04:00
# include <linux/of.h>
# include <linux/of_device.h>
# include <linux/of_address.h>
# include <linux/of_irq.h>
2010-05-03 07:10:59 +04:00
# include <linux/delay.h>
# include <linux/crypto.h>
# include <linux/cryptohash.h>
# include <crypto/scatterwalk.h>
# include <crypto/algapi.h>
# include <crypto/sha.h>
# include <crypto/hash.h>
2017-05-19 09:53:28 +03:00
# include <crypto/hmac.h>
2010-05-03 07:10:59 +04:00
# include <crypto/internal/hash.h>
# define MD5_DIGEST_SIZE 16
2012-12-21 21:04:08 +04:00
# define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04))
# define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04))
# define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs)
2013-07-26 10:59:14 +04:00
# define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04))
2010-05-03 07:10:59 +04:00
# define SHA_REG_CTRL 0x18
# define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
# define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
# define SHA_REG_CTRL_ALGO_CONST (1 << 3)
# define SHA_REG_CTRL_ALGO (1 << 2)
# define SHA_REG_CTRL_INPUT_READY (1 << 1)
# define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
2012-12-21 21:04:08 +04:00
# define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs)
2010-05-03 07:10:59 +04:00
2012-12-21 21:04:08 +04:00
# define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs)
2010-05-03 07:10:59 +04:00
# define SHA_REG_MASK_DMA_EN (1 << 3)
# define SHA_REG_MASK_IT_EN (1 << 2)
# define SHA_REG_MASK_SOFTRESET (1 << 1)
# define SHA_REG_AUTOIDLE (1 << 0)
2012-12-21 21:04:08 +04:00
# define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs)
2010-05-03 07:10:59 +04:00
# define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
2013-07-26 10:59:14 +04:00
# define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs)
2012-12-21 21:04:08 +04:00
# define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7)
# define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5)
# define SHA_REG_MODE_CLOSE_HASH (1 << 4)
# define SHA_REG_MODE_ALGO_CONSTANT (1 << 3)
2013-07-26 10:59:14 +04:00
# define SHA_REG_MODE_ALGO_MASK (7 << 0)
# define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
# define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
# define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1)
# define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1)
# define SHA_REG_MODE_ALGO_SHA2_384 (1 << 0)
# define SHA_REG_MODE_ALGO_SHA2_512 (3 << 0)
# define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs)
2012-12-21 21:04:08 +04:00
# define SHA_REG_IRQSTATUS 0x118
# define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3)
# define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
# define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1)
# define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0)
# define SHA_REG_IRQENA 0x11C
# define SHA_REG_IRQENA_CTX_RDY (1 << 3)
# define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2)
# define SHA_REG_IRQENA_INPUT_RDY (1 << 1)
# define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0)
2010-05-03 07:10:59 +04:00
# define DEFAULT_TIMEOUT_INTERVAL HZ
2016-06-22 16:23:34 +03:00
# define DEFAULT_AUTOSUSPEND_DELAY 1000
2011-06-02 22:10:05 +04:00
/* mostly device flags */
# define FLAGS_BUSY 0
# define FLAGS_FINAL 1
# define FLAGS_DMA_ACTIVE 2
# define FLAGS_OUTPUT_READY 3
# define FLAGS_INIT 4
# define FLAGS_CPU 5
2011-06-02 22:10:10 +04:00
# define FLAGS_DMA_READY 6
2012-12-21 21:04:08 +04:00
# define FLAGS_AUTO_XOR 7
# define FLAGS_BE32_SHA1 8
2016-09-19 18:22:15 +03:00
# define FLAGS_SGS_COPIED 9
# define FLAGS_SGS_ALLOCED 10
2011-06-02 22:10:05 +04:00
/* context flags */
# define FLAGS_FINUP 16
2010-05-03 07:10:59 +04:00
2012-12-21 21:04:08 +04:00
# define FLAGS_MODE_SHIFT 18
2013-07-26 10:59:14 +04:00
# define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT)
# define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
# define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
# define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
# define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
# define FLAGS_MODE_SHA384 (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
# define FLAGS_MODE_SHA512 (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
# define FLAGS_HMAC 21
# define FLAGS_ERROR 22
2012-12-21 21:04:08 +04:00
# define OP_UPDATE 1
# define OP_FINAL 2
2010-05-03 07:10:59 +04:00
2010-11-19 17:04:26 +03:00
# define OMAP_ALIGN_MASK (sizeof(u32)-1)
# define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
2016-09-19 18:22:19 +03:00
# define BUFLEN SHA512_BLOCK_SIZE
2016-09-19 18:22:16 +03:00
# define OMAP_SHA_DMA_THRESHOLD 256
2010-11-19 17:04:26 +03:00
2010-05-03 07:10:59 +04:00
struct omap_sham_dev ;
struct omap_sham_reqctx {
struct omap_sham_dev * dd ;
unsigned long flags ;
unsigned long op ;
2013-07-26 10:59:14 +04:00
u8 digest [ SHA512_DIGEST_SIZE ] OMAP_ALIGNED ;
2010-05-03 07:10:59 +04:00
size_t digcnt ;
size_t bufcnt ;
size_t buflen ;
/* walk state */
struct scatterlist * sg ;
2016-09-19 18:22:15 +03:00
struct scatterlist sgl [ 2 ] ;
2016-09-19 18:22:17 +03:00
int offset ; /* offset in current sg */
2016-09-19 18:22:15 +03:00
int sg_len ;
2010-05-03 07:10:59 +04:00
unsigned int total ; /* total request */
2010-11-19 17:04:26 +03:00
u8 buffer [ 0 ] OMAP_ALIGNED ;
2010-05-03 07:10:59 +04:00
} ;
struct omap_sham_hmac_ctx {
struct crypto_shash * shash ;
2013-07-26 10:59:14 +04:00
u8 ipad [ SHA512_BLOCK_SIZE ] OMAP_ALIGNED ;
u8 opad [ SHA512_BLOCK_SIZE ] OMAP_ALIGNED ;
2010-05-03 07:10:59 +04:00
} ;
struct omap_sham_ctx {
struct omap_sham_dev * dd ;
unsigned long flags ;
/* fallback stuff */
struct crypto_shash * fallback ;
struct omap_sham_hmac_ctx base [ 0 ] ;
} ;
2016-06-22 16:23:35 +03:00
# define OMAP_SHAM_QUEUE_LENGTH 10
2010-05-03 07:10:59 +04:00
2012-12-21 21:04:09 +04:00
struct omap_sham_algs_info {
struct ahash_alg * algs_list ;
unsigned int size ;
unsigned int registered ;
} ;
2012-12-21 21:04:08 +04:00
struct omap_sham_pdata {
2012-12-21 21:04:09 +04:00
struct omap_sham_algs_info * algs_info ;
unsigned int algs_info_size ;
2012-12-21 21:04:08 +04:00
unsigned long flags ;
int digest_size ;
void ( * copy_hash ) ( struct ahash_request * req , int out ) ;
void ( * write_ctrl ) ( struct omap_sham_dev * dd , size_t length ,
int final , int dma ) ;
void ( * trigger ) ( struct omap_sham_dev * dd , size_t length ) ;
int ( * poll_irq ) ( struct omap_sham_dev * dd ) ;
irqreturn_t ( * intr_hdlr ) ( int irq , void * dev_id ) ;
u32 odigest_ofs ;
u32 idigest_ofs ;
u32 din_ofs ;
u32 digcnt_ofs ;
u32 rev_ofs ;
u32 mask_ofs ;
u32 sysstatus_ofs ;
2013-07-26 10:59:14 +04:00
u32 mode_ofs ;
u32 length_ofs ;
2012-12-21 21:04:08 +04:00
u32 major_mask ;
u32 major_shift ;
u32 minor_mask ;
u32 minor_shift ;
} ;
2010-05-03 07:10:59 +04:00
struct omap_sham_dev {
struct list_head list ;
unsigned long phys_base ;
struct device * dev ;
void __iomem * io_base ;
int irq ;
spinlock_t lock ;
2010-11-19 17:04:24 +03:00
int err ;
2012-12-21 21:04:04 +04:00
struct dma_chan * dma_lch ;
2010-05-03 07:10:59 +04:00
struct tasklet_struct done_task ;
2013-08-20 19:02:34 +04:00
u8 polling_mode ;
2017-05-24 10:35:34 +03:00
u8 xmit_buf [ BUFLEN ] OMAP_ALIGNED ;
2010-05-03 07:10:59 +04:00
unsigned long flags ;
2018-02-27 16:30:36 +03:00
int fallback_sz ;
2010-05-03 07:10:59 +04:00
struct crypto_queue queue ;
struct ahash_request * req ;
2012-12-21 21:04:08 +04:00
const struct omap_sham_pdata * pdata ;
2010-05-03 07:10:59 +04:00
} ;
struct omap_sham_drv {
struct list_head dev_list ;
spinlock_t lock ;
unsigned long flags ;
} ;
static struct omap_sham_drv sham = {
. dev_list = LIST_HEAD_INIT ( sham . dev_list ) ,
. lock = __SPIN_LOCK_UNLOCKED ( sham . lock ) ,
} ;
static inline u32 omap_sham_read ( struct omap_sham_dev * dd , u32 offset )
{
return __raw_readl ( dd - > io_base + offset ) ;
}
static inline void omap_sham_write ( struct omap_sham_dev * dd ,
u32 offset , u32 value )
{
__raw_writel ( value , dd - > io_base + offset ) ;
}
static inline void omap_sham_write_mask ( struct omap_sham_dev * dd , u32 address ,
u32 value , u32 mask )
{
u32 val ;
val = omap_sham_read ( dd , address ) ;
val & = ~ mask ;
val | = value ;
omap_sham_write ( dd , address , val ) ;
}
static inline int omap_sham_wait ( struct omap_sham_dev * dd , u32 offset , u32 bit )
{
unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL ;
while ( ! ( omap_sham_read ( dd , offset ) & bit ) ) {
if ( time_is_before_jiffies ( timeout ) )
return - ETIMEDOUT ;
}
return 0 ;
}
2012-12-21 21:04:08 +04:00
static void omap_sham_copy_hash_omap2 ( struct ahash_request * req , int out )
2010-05-03 07:10:59 +04:00
{
struct omap_sham_reqctx * ctx = ahash_request_ctx ( req ) ;
2012-12-21 21:04:08 +04:00
struct omap_sham_dev * dd = ctx - > dd ;
2010-11-19 17:04:22 +03:00
u32 * hash = ( u32 * ) ctx - > digest ;
2010-05-03 07:10:59 +04:00
int i ;
2012-12-21 21:04:08 +04:00
for ( i = 0 ; i < dd - > pdata - > digest_size / sizeof ( u32 ) ; i + + ) {
2010-11-19 17:04:27 +03:00
if ( out )
2012-12-21 21:04:08 +04:00
hash [ i ] = omap_sham_read ( dd , SHA_REG_IDIGEST ( dd , i ) ) ;
2010-11-19 17:04:27 +03:00
else
2012-12-21 21:04:08 +04:00
omap_sham_write ( dd , SHA_REG_IDIGEST ( dd , i ) , hash [ i ] ) ;
}
}
static void omap_sham_copy_hash_omap4 ( struct ahash_request * req , int out )
{
struct omap_sham_reqctx * ctx = ahash_request_ctx ( req ) ;
struct omap_sham_dev * dd = ctx - > dd ;
int i ;
if ( ctx - > flags & BIT ( FLAGS_HMAC ) ) {
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( dd - > req ) ;
struct omap_sham_ctx * tctx = crypto_ahash_ctx ( tfm ) ;
struct omap_sham_hmac_ctx * bctx = tctx - > base ;
u32 * opad = ( u32 * ) bctx - > opad ;
for ( i = 0 ; i < dd - > pdata - > digest_size / sizeof ( u32 ) ; i + + ) {
if ( out )
opad [ i ] = omap_sham_read ( dd ,
2013-07-26 10:59:14 +04:00
SHA_REG_ODIGEST ( dd , i ) ) ;
2012-12-21 21:04:08 +04:00
else
2013-07-26 10:59:14 +04:00
omap_sham_write ( dd , SHA_REG_ODIGEST ( dd , i ) ,
2012-12-21 21:04:08 +04:00
opad [ i ] ) ;
}
2010-11-19 17:04:27 +03:00
}
2012-12-21 21:04:08 +04:00
omap_sham_copy_hash_omap2 ( req , out ) ;
2010-11-19 17:04:27 +03:00
}
static void omap_sham_copy_ready_hash ( struct ahash_request * req )
{
struct omap_sham_reqctx * ctx = ahash_request_ctx ( req ) ;
u32 * in = ( u32 * ) ctx - > digest ;
u32 * hash = ( u32 * ) req - > result ;
2012-12-21 21:04:08 +04:00
int i , d , big_endian = 0 ;
2010-11-19 17:04:27 +03:00
if ( ! hash )
return ;
2012-12-21 21:04:08 +04:00
switch ( ctx - > flags & FLAGS_MODE_MASK ) {
case FLAGS_MODE_MD5 :
d = MD5_DIGEST_SIZE / sizeof ( u32 ) ;
break ;
case FLAGS_MODE_SHA1 :
/* OMAP2 SHA1 is big endian */
if ( test_bit ( FLAGS_BE32_SHA1 , & ctx - > dd - > flags ) )
big_endian = 1 ;
d = SHA1_DIGEST_SIZE / sizeof ( u32 ) ;
break ;
2012-12-21 21:04:09 +04:00
case FLAGS_MODE_SHA224 :
d = SHA224_DIGEST_SIZE / sizeof ( u32 ) ;
break ;
case FLAGS_MODE_SHA256 :
d = SHA256_DIGEST_SIZE / sizeof ( u32 ) ;
break ;
2013-07-26 10:59:14 +04:00
case FLAGS_MODE_SHA384 :
d = SHA384_DIGEST_SIZE / sizeof ( u32 ) ;
break ;
case FLAGS_MODE_SHA512 :
d = SHA512_DIGEST_SIZE / sizeof ( u32 ) ;
break ;
2012-12-21 21:04:08 +04:00
default :
d = 0 ;
}
if ( big_endian )
for ( i = 0 ; i < d ; i + + )
2010-11-19 17:04:27 +03:00
hash [ i ] = be32_to_cpu ( in [ i ] ) ;
2012-12-21 21:04:08 +04:00
else
for ( i = 0 ; i < d ; i + + )
2010-11-19 17:04:27 +03:00
hash [ i ] = le32_to_cpu ( in [ i ] ) ;
2010-05-03 07:10:59 +04:00
}
2010-11-19 17:04:26 +03:00
static int omap_sham_hw_init ( struct omap_sham_dev * dd )
2010-05-03 07:10:59 +04:00
{
2015-03-08 13:01:01 +03:00
int err ;
err = pm_runtime_get_sync ( dd - > dev ) ;
if ( err < 0 ) {
dev_err ( dd - > dev , " failed to get sync: %d \n " , err ) ;
return err ;
}
2010-05-03 07:10:59 +04:00
2011-06-02 22:10:06 +04:00
if ( ! test_bit ( FLAGS_INIT , & dd - > flags ) ) {
set_bit ( FLAGS_INIT , & dd - > flags ) ;
2010-11-19 17:04:26 +03:00
dd - > err = 0 ;
}
2010-05-03 07:10:59 +04:00
2010-11-19 17:04:26 +03:00
return 0 ;
}
2012-12-21 21:04:08 +04:00
static void omap_sham_write_ctrl_omap2 ( struct omap_sham_dev * dd , size_t length ,
2010-11-19 17:04:26 +03:00
int final , int dma )
{
struct omap_sham_reqctx * ctx = ahash_request_ctx ( dd - > req ) ;
u32 val = length < < 5 , mask ;
if ( likely ( ctx - > digcnt ) )
2012-12-21 21:04:08 +04:00
omap_sham_write ( dd , SHA_REG_DIGCNT ( dd ) , ctx - > digcnt ) ;
2010-05-03 07:10:59 +04:00
2012-12-21 21:04:08 +04:00
omap_sham_write_mask ( dd , SHA_REG_MASK ( dd ) ,
2010-05-03 07:10:59 +04:00
SHA_REG_MASK_IT_EN | ( dma ? SHA_REG_MASK_DMA_EN : 0 ) ,
SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN ) ;
/*
* Setting ALGO_CONST only for the first iteration
* and CLOSE_HASH only for the last one .
*/
2012-12-21 21:04:08 +04:00
if ( ( ctx - > flags & FLAGS_MODE_MASK ) = = FLAGS_MODE_SHA1 )
2010-05-03 07:10:59 +04:00
val | = SHA_REG_CTRL_ALGO ;
if ( ! ctx - > digcnt )
val | = SHA_REG_CTRL_ALGO_CONST ;
if ( final )
val | = SHA_REG_CTRL_CLOSE_HASH ;
mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH ;
omap_sham_write_mask ( dd , SHA_REG_CTRL , val , mask ) ;
}
2012-12-21 21:04:08 +04:00
static void omap_sham_trigger_omap2 ( struct omap_sham_dev * dd , size_t length )
{
}
static int omap_sham_poll_irq_omap2 ( struct omap_sham_dev * dd )
{
return omap_sham_wait ( dd , SHA_REG_CTRL , SHA_REG_CTRL_INPUT_READY ) ;
}
2013-07-26 10:59:14 +04:00
static int get_block_size ( struct omap_sham_reqctx * ctx )
{
int d ;
switch ( ctx - > flags & FLAGS_MODE_MASK ) {
case FLAGS_MODE_MD5 :
case FLAGS_MODE_SHA1 :
d = SHA1_BLOCK_SIZE ;
break ;
case FLAGS_MODE_SHA224 :
case FLAGS_MODE_SHA256 :
d = SHA256_BLOCK_SIZE ;
break ;
case FLAGS_MODE_SHA384 :
case FLAGS_MODE_SHA512 :
d = SHA512_BLOCK_SIZE ;
break ;
default :
d = 0 ;
}
return d ;
}
2012-12-21 21:04:08 +04:00
static void omap_sham_write_n ( struct omap_sham_dev * dd , u32 offset ,
u32 * value , int count )
{
for ( ; count - - ; value + + , offset + = 4 )
omap_sham_write ( dd , offset , * value ) ;
}
static void omap_sham_write_ctrl_omap4 ( struct omap_sham_dev * dd , size_t length ,
int final , int dma )
{
struct omap_sham_reqctx * ctx = ahash_request_ctx ( dd - > req ) ;
u32 val , mask ;
/*
* Setting ALGO_CONST only for the first iteration and
* CLOSE_HASH only for the last one . Note that flags mode bits
* correspond to algorithm encoding in mode register .
*/
2013-07-26 10:59:14 +04:00
val = ( ctx - > flags & FLAGS_MODE_MASK ) > > ( FLAGS_MODE_SHIFT ) ;
2012-12-21 21:04:08 +04:00
if ( ! ctx - > digcnt ) {
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( dd - > req ) ;
struct omap_sham_ctx * tctx = crypto_ahash_ctx ( tfm ) ;
struct omap_sham_hmac_ctx * bctx = tctx - > base ;
2013-07-26 10:59:14 +04:00
int bs , nr_dr ;
2012-12-21 21:04:08 +04:00
val | = SHA_REG_MODE_ALGO_CONSTANT ;
if ( ctx - > flags & BIT ( FLAGS_HMAC ) ) {
2013-07-26 10:59:14 +04:00
bs = get_block_size ( ctx ) ;
nr_dr = bs / ( 2 * sizeof ( u32 ) ) ;
2012-12-21 21:04:08 +04:00
val | = SHA_REG_MODE_HMAC_KEY_PROC ;
2013-07-26 10:59:14 +04:00
omap_sham_write_n ( dd , SHA_REG_ODIGEST ( dd , 0 ) ,
( u32 * ) bctx - > ipad , nr_dr ) ;
omap_sham_write_n ( dd , SHA_REG_IDIGEST ( dd , 0 ) ,
( u32 * ) bctx - > ipad + nr_dr , nr_dr ) ;
ctx - > digcnt + = bs ;
2012-12-21 21:04:08 +04:00
}
}
if ( final ) {
val | = SHA_REG_MODE_CLOSE_HASH ;
if ( ctx - > flags & BIT ( FLAGS_HMAC ) )
val | = SHA_REG_MODE_HMAC_OUTER_HASH ;
}
mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
SHA_REG_MODE_HMAC_KEY_PROC ;
dev_dbg ( dd - > dev , " ctrl: %08x, flags: %08lx \n " , val , ctx - > flags ) ;
2013-07-26 10:59:14 +04:00
omap_sham_write_mask ( dd , SHA_REG_MODE ( dd ) , val , mask ) ;
2012-12-21 21:04:08 +04:00
omap_sham_write ( dd , SHA_REG_IRQENA , SHA_REG_IRQENA_OUTPUT_RDY ) ;
omap_sham_write_mask ( dd , SHA_REG_MASK ( dd ) ,
SHA_REG_MASK_IT_EN |
( dma ? SHA_REG_MASK_DMA_EN : 0 ) ,
SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN ) ;
}
static void omap_sham_trigger_omap4 ( struct omap_sham_dev * dd , size_t length )
{
2013-07-26 10:59:14 +04:00
omap_sham_write ( dd , SHA_REG_LENGTH ( dd ) , length ) ;
2012-12-21 21:04:08 +04:00
}
static int omap_sham_poll_irq_omap4 ( struct omap_sham_dev * dd )
{
return omap_sham_wait ( dd , SHA_REG_IRQSTATUS ,
SHA_REG_IRQSTATUS_INPUT_RDY ) ;
}
2016-09-19 18:22:17 +03:00
static int omap_sham_xmit_cpu ( struct omap_sham_dev * dd , size_t length ,
int final )
2010-05-03 07:10:59 +04:00
{
struct omap_sham_reqctx * ctx = ahash_request_ctx ( dd - > req ) ;
2013-08-20 19:02:34 +04:00
int count , len32 , bs32 , offset = 0 ;
2016-09-19 18:22:17 +03:00
const u32 * buffer ;
int mlen ;
struct sg_mapping_iter mi ;
2010-05-03 07:10:59 +04:00
dev_dbg ( dd - > dev , " xmit_cpu: digcnt: %d, length: %d, final: %d \n " ,
ctx - > digcnt , length , final ) ;
2012-12-21 21:04:08 +04:00
dd - > pdata - > write_ctrl ( dd , length , final , 0 ) ;
dd - > pdata - > trigger ( dd , length ) ;
2010-05-03 07:10:59 +04:00
2010-11-19 17:04:24 +03:00
/* should be non-zero before next lines to disable clocks later */
ctx - > digcnt + = length ;
2016-09-19 18:22:17 +03:00
ctx - > total - = length ;
2010-11-19 17:04:24 +03:00
2010-05-03 07:10:59 +04:00
if ( final )
2011-06-02 22:10:07 +04:00
set_bit ( FLAGS_FINAL , & dd - > flags ) ; /* catch last interrupt */
2010-05-03 07:10:59 +04:00
2011-06-02 22:10:10 +04:00
set_bit ( FLAGS_CPU , & dd - > flags ) ;
2010-05-03 07:10:59 +04:00
len32 = DIV_ROUND_UP ( length , sizeof ( u32 ) ) ;
2013-08-20 19:02:34 +04:00
bs32 = get_block_size ( ctx ) / sizeof ( u32 ) ;
2016-09-19 18:22:17 +03:00
sg_miter_start ( & mi , ctx - > sg , ctx - > sg_len ,
SG_MITER_FROM_SG | SG_MITER_ATOMIC ) ;
mlen = 0 ;
2013-08-20 19:02:34 +04:00
while ( len32 ) {
if ( dd - > pdata - > poll_irq ( dd ) )
return - ETIMEDOUT ;
2010-05-03 07:10:59 +04:00
2016-09-19 18:22:17 +03:00
for ( count = 0 ; count < min ( len32 , bs32 ) ; count + + , offset + + ) {
if ( ! mlen ) {
sg_miter_next ( & mi ) ;
mlen = mi . length ;
if ( ! mlen ) {
pr_err ( " sg miter failure. \n " ) ;
return - EINVAL ;
}
offset = 0 ;
buffer = mi . addr ;
}
2013-08-20 19:02:34 +04:00
omap_sham_write ( dd , SHA_REG_DIN ( dd , count ) ,
buffer [ offset ] ) ;
2016-09-19 18:22:17 +03:00
mlen - = 4 ;
}
2013-08-20 19:02:34 +04:00
len32 - = min ( len32 , bs32 ) ;
}
2010-05-03 07:10:59 +04:00
2016-09-19 18:22:17 +03:00
sg_miter_stop ( & mi ) ;
2010-05-03 07:10:59 +04:00
return - EINPROGRESS ;
}
2012-12-21 21:04:04 +04:00
static void omap_sham_dma_callback ( void * param )
{
struct omap_sham_dev * dd = param ;
set_bit ( FLAGS_DMA_READY , & dd - > flags ) ;
tasklet_schedule ( & dd - > done_task ) ;
}
2016-09-19 18:22:17 +03:00
static int omap_sham_xmit_dma ( struct omap_sham_dev * dd , size_t length ,
int final )
2010-05-03 07:10:59 +04:00
{
struct omap_sham_reqctx * ctx = ahash_request_ctx ( dd - > req ) ;
2012-12-21 21:04:04 +04:00
struct dma_async_tx_descriptor * tx ;
struct dma_slave_config cfg ;
2016-09-19 18:22:17 +03:00
int ret ;
2010-05-03 07:10:59 +04:00
dev_dbg ( dd - > dev , " xmit_dma: digcnt: %d, length: %d, final: %d \n " ,
ctx - > digcnt , length , final ) ;
2016-09-19 18:22:17 +03:00
if ( ! dma_map_sg ( dd - > dev , ctx - > sg , ctx - > sg_len , DMA_TO_DEVICE ) ) {
dev_err ( dd - > dev , " dma_map_sg error \n " ) ;
return - EINVAL ;
}
2012-12-21 21:04:04 +04:00
memset ( & cfg , 0 , sizeof ( cfg ) ) ;
2012-12-21 21:04:08 +04:00
cfg . dst_addr = dd - > phys_base + SHA_REG_DIN ( dd , 0 ) ;
2012-12-21 21:04:04 +04:00
cfg . dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
2016-09-19 18:22:17 +03:00
cfg . dst_maxburst = get_block_size ( ctx ) / DMA_SLAVE_BUSWIDTH_4_BYTES ;
2012-12-21 21:04:04 +04:00
ret = dmaengine_slave_config ( dd - > dma_lch , & cfg ) ;
if ( ret ) {
pr_err ( " omap-sham: can't configure dmaengine slave: %d \n " , ret ) ;
return ret ;
}
2016-09-19 18:22:17 +03:00
tx = dmaengine_prep_slave_sg ( dd - > dma_lch , ctx - > sg , ctx - > sg_len ,
DMA_MEM_TO_DEV ,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK ) ;
2010-05-03 07:10:59 +04:00
2012-12-21 21:04:04 +04:00
if ( ! tx ) {
2016-09-19 18:22:17 +03:00
dev_err ( dd - > dev , " prep_slave_sg failed \n " ) ;
2012-12-21 21:04:04 +04:00
return - EINVAL ;
}
2010-05-03 07:10:59 +04:00
2012-12-21 21:04:04 +04:00
tx - > callback = omap_sham_dma_callback ;
tx - > callback_param = dd ;
2010-05-03 07:10:59 +04:00
2012-12-21 21:04:08 +04:00
dd - > pdata - > write_ctrl ( dd , length , final , 1 ) ;
2010-05-03 07:10:59 +04:00
ctx - > digcnt + = length ;
2016-09-19 18:22:17 +03:00
ctx - > total - = length ;
2010-05-03 07:10:59 +04:00
if ( final )
2011-06-02 22:10:07 +04:00
set_bit ( FLAGS_FINAL , & dd - > flags ) ; /* catch last interrupt */
2010-05-03 07:10:59 +04:00
2011-06-02 22:10:06 +04:00
set_bit ( FLAGS_DMA_ACTIVE , & dd - > flags ) ;
2010-05-03 07:10:59 +04:00
2012-12-21 21:04:04 +04:00
dmaengine_submit ( tx ) ;
dma_async_issue_pending ( dd - > dma_lch ) ;
2010-05-03 07:10:59 +04:00
2012-12-21 21:04:08 +04:00
dd - > pdata - > trigger ( dd , length ) ;
2010-05-03 07:10:59 +04:00
return - EINPROGRESS ;
}
2016-09-19 18:22:15 +03:00
static int omap_sham_copy_sg_lists ( struct omap_sham_reqctx * ctx ,
struct scatterlist * sg , int bs , int new_len )
{
int n = sg_nents ( sg ) ;
struct scatterlist * tmp ;
int offset = ctx - > offset ;
if ( ctx - > bufcnt )
n + + ;
ctx - > sg = kmalloc_array ( n , sizeof ( * sg ) , GFP_KERNEL ) ;
if ( ! ctx - > sg )
return - ENOMEM ;
sg_init_table ( ctx - > sg , n ) ;
tmp = ctx - > sg ;
ctx - > sg_len = 0 ;
if ( ctx - > bufcnt ) {
sg_set_buf ( tmp , ctx - > dd - > xmit_buf , ctx - > bufcnt ) ;
tmp = sg_next ( tmp ) ;
ctx - > sg_len + + ;
}
while ( sg & & new_len ) {
int len = sg - > length - offset ;
if ( offset ) {
offset - = sg - > length ;
if ( offset < 0 )
offset = 0 ;
}
if ( new_len < len )
len = new_len ;
if ( len > 0 ) {
new_len - = len ;
sg_set_page ( tmp , sg_page ( sg ) , len , sg - > offset ) ;
if ( new_len < = 0 )
sg_mark_end ( tmp ) ;
tmp = sg_next ( tmp ) ;
ctx - > sg_len + + ;
}
sg = sg_next ( sg ) ;
}
set_bit ( FLAGS_SGS_ALLOCED , & ctx - > dd - > flags ) ;
ctx - > bufcnt = 0 ;
return 0 ;
}
static int omap_sham_copy_sgs ( struct omap_sham_reqctx * ctx ,
struct scatterlist * sg , int bs , int new_len )
{
int pages ;
void * buf ;
int len ;
len = new_len + ctx - > bufcnt ;
pages = get_order ( ctx - > total ) ;
buf = ( void * ) __get_free_pages ( GFP_ATOMIC , pages ) ;
if ( ! buf ) {
pr_err ( " Couldn't allocate pages for unaligned cases. \n " ) ;
return - ENOMEM ;
}
if ( ctx - > bufcnt )
memcpy ( buf , ctx - > dd - > xmit_buf , ctx - > bufcnt ) ;
scatterwalk_map_and_copy ( buf + ctx - > bufcnt , sg , ctx - > offset ,
ctx - > total - ctx - > bufcnt , 0 ) ;
sg_init_table ( ctx - > sgl , 1 ) ;
sg_set_buf ( ctx - > sgl , buf , len ) ;
ctx - > sg = ctx - > sgl ;
set_bit ( FLAGS_SGS_COPIED , & ctx - > dd - > flags ) ;
ctx - > sg_len = 1 ;
ctx - > bufcnt = 0 ;
ctx - > offset = 0 ;
return 0 ;
}
static int omap_sham_align_sgs ( struct scatterlist * sg ,
int nbytes , int bs , bool final ,
struct omap_sham_reqctx * rctx )
{
int n = 0 ;
bool aligned = true ;
bool list_ok = true ;
struct scatterlist * sg_tmp = sg ;
int new_len ;
int offset = rctx - > offset ;
if ( ! sg | | ! sg - > length | | ! nbytes )
return 0 ;
new_len = nbytes ;
if ( offset )
list_ok = false ;
if ( final )
new_len = DIV_ROUND_UP ( new_len , bs ) * bs ;
else
2017-05-24 10:35:33 +03:00
new_len = ( new_len - 1 ) / bs * bs ;
if ( nbytes ! = new_len )
list_ok = false ;
2016-09-19 18:22:15 +03:00
while ( nbytes > 0 & & sg_tmp ) {
n + + ;
2018-02-27 16:30:34 +03:00
# ifdef CONFIG_ZONE_DMA
if ( page_zonenum ( sg_page ( sg_tmp ) ) ! = ZONE_DMA ) {
aligned = false ;
break ;
}
# endif
2016-09-19 18:22:15 +03:00
if ( offset < sg_tmp - > length ) {
if ( ! IS_ALIGNED ( offset + sg_tmp - > offset , 4 ) ) {
aligned = false ;
break ;
}
if ( ! IS_ALIGNED ( sg_tmp - > length - offset , bs ) ) {
aligned = false ;
break ;
}
}
if ( offset ) {
offset - = sg_tmp - > length ;
if ( offset < 0 ) {
nbytes + = offset ;
offset = 0 ;
}
} else {
nbytes - = sg_tmp - > length ;
}
sg_tmp = sg_next ( sg_tmp ) ;
if ( nbytes < 0 ) {
list_ok = false ;
break ;
}
}
if ( ! aligned )
return omap_sham_copy_sgs ( rctx , sg , bs , new_len ) ;
else if ( ! list_ok )
return omap_sham_copy_sg_lists ( rctx , sg , bs , new_len ) ;
rctx - > sg_len = n ;
rctx - > sg = sg ;
return 0 ;
}
static int omap_sham_prepare_request ( struct ahash_request * req , bool update )
{
struct omap_sham_reqctx * rctx = ahash_request_ctx ( req ) ;
int bs ;
int ret ;
int nbytes ;
bool final = rctx - > flags & BIT ( FLAGS_FINUP ) ;
int xmit_len , hash_later ;
bs = get_block_size ( rctx ) ;
if ( update )
nbytes = req - > nbytes ;
else
nbytes = 0 ;
rctx - > total = nbytes + rctx - > bufcnt ;
if ( ! rctx - > total )
return 0 ;
if ( nbytes & & ( ! IS_ALIGNED ( rctx - > bufcnt , bs ) ) ) {
int len = bs - rctx - > bufcnt % bs ;
if ( len > nbytes )
len = nbytes ;
scatterwalk_map_and_copy ( rctx - > buffer + rctx - > bufcnt , req - > src ,
0 , len , 0 ) ;
rctx - > bufcnt + = len ;
nbytes - = len ;
rctx - > offset = len ;
}
if ( rctx - > bufcnt )
memcpy ( rctx - > dd - > xmit_buf , rctx - > buffer , rctx - > bufcnt ) ;
ret = omap_sham_align_sgs ( req - > src , nbytes , bs , final , rctx ) ;
if ( ret )
return ret ;
xmit_len = rctx - > total ;
if ( ! IS_ALIGNED ( xmit_len , bs ) ) {
if ( final )
xmit_len = DIV_ROUND_UP ( xmit_len , bs ) * bs ;
else
xmit_len = xmit_len / bs * bs ;
2017-05-24 10:35:33 +03:00
} else if ( ! final ) {
xmit_len - = bs ;
2016-09-19 18:22:15 +03:00
}
hash_later = rctx - > total - xmit_len ;
if ( hash_later < 0 )
hash_later = 0 ;
if ( rctx - > bufcnt & & nbytes ) {
/* have data from previous operation and current */
sg_init_table ( rctx - > sgl , 2 ) ;
sg_set_buf ( rctx - > sgl , rctx - > dd - > xmit_buf , rctx - > bufcnt ) ;
sg_chain ( rctx - > sgl , 2 , req - > src ) ;
rctx - > sg = rctx - > sgl ;
rctx - > sg_len + + ;
} else if ( rctx - > bufcnt ) {
/* have buffered data only */
sg_init_table ( rctx - > sgl , 1 ) ;
sg_set_buf ( rctx - > sgl , rctx - > dd - > xmit_buf , xmit_len ) ;
rctx - > sg = rctx - > sgl ;
rctx - > sg_len = 1 ;
}
if ( hash_later ) {
2017-05-24 10:35:32 +03:00
int offset = 0 ;
if ( hash_later > req - > nbytes ) {
2016-09-19 18:22:15 +03:00
memcpy ( rctx - > buffer , rctx - > buffer + xmit_len ,
2017-05-24 10:35:32 +03:00
hash_later - req - > nbytes ) ;
offset = hash_later - req - > nbytes ;
2016-09-19 18:22:15 +03:00
}
2017-05-24 10:35:32 +03:00
if ( req - > nbytes ) {
scatterwalk_map_and_copy ( rctx - > buffer + offset ,
req - > src ,
offset + req - > nbytes -
hash_later , hash_later , 0 ) ;
}
2016-09-19 18:22:15 +03:00
rctx - > bufcnt = hash_later ;
} else {
rctx - > bufcnt = 0 ;
}
if ( ! final )
rctx - > total = xmit_len ;
return 0 ;
}
2010-05-03 07:10:59 +04:00
static int omap_sham_update_dma_stop ( struct omap_sham_dev * dd )
{
struct omap_sham_reqctx * ctx = ahash_request_ctx ( dd - > req ) ;
2016-09-19 18:22:17 +03:00
dma_unmap_sg ( dd - > dev , ctx - > sg , ctx - > sg_len , DMA_TO_DEVICE ) ;
2012-12-21 21:04:04 +04:00
2016-09-19 18:22:17 +03:00
clear_bit ( FLAGS_DMA_ACTIVE , & dd - > flags ) ;
2010-05-03 07:10:59 +04:00
return 0 ;
}
static int omap_sham_init ( struct ahash_request * req )
{
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct omap_sham_ctx * tctx = crypto_ahash_ctx ( tfm ) ;
struct omap_sham_reqctx * ctx = ahash_request_ctx ( req ) ;
struct omap_sham_dev * dd = NULL , * tmp ;
2013-07-26 10:59:14 +04:00
int bs = 0 ;
2010-05-03 07:10:59 +04:00
spin_lock_bh ( & sham . lock ) ;
if ( ! tctx - > dd ) {
list_for_each_entry ( tmp , & sham . dev_list , list ) {
dd = tmp ;
break ;
}
tctx - > dd = dd ;
} else {
dd = tctx - > dd ;
}
spin_unlock_bh ( & sham . lock ) ;
ctx - > dd = dd ;
ctx - > flags = 0 ;
dev_dbg ( dd - > dev , " init: digest size: %d \n " ,
crypto_ahash_digestsize ( tfm ) ) ;
2012-12-21 21:04:08 +04:00
switch ( crypto_ahash_digestsize ( tfm ) ) {
case MD5_DIGEST_SIZE :
ctx - > flags | = FLAGS_MODE_MD5 ;
2013-07-26 10:59:14 +04:00
bs = SHA1_BLOCK_SIZE ;
2012-12-21 21:04:08 +04:00
break ;
case SHA1_DIGEST_SIZE :
ctx - > flags | = FLAGS_MODE_SHA1 ;
2013-07-26 10:59:14 +04:00
bs = SHA1_BLOCK_SIZE ;
2012-12-21 21:04:08 +04:00
break ;
2012-12-21 21:04:09 +04:00
case SHA224_DIGEST_SIZE :
ctx - > flags | = FLAGS_MODE_SHA224 ;
2013-07-26 10:59:14 +04:00
bs = SHA224_BLOCK_SIZE ;
2012-12-21 21:04:09 +04:00
break ;
case SHA256_DIGEST_SIZE :
ctx - > flags | = FLAGS_MODE_SHA256 ;
2013-07-26 10:59:14 +04:00
bs = SHA256_BLOCK_SIZE ;
break ;
case SHA384_DIGEST_SIZE :
ctx - > flags | = FLAGS_MODE_SHA384 ;
bs = SHA384_BLOCK_SIZE ;
break ;
case SHA512_DIGEST_SIZE :
ctx - > flags | = FLAGS_MODE_SHA512 ;
bs = SHA512_BLOCK_SIZE ;
2012-12-21 21:04:09 +04:00
break ;
2012-12-21 21:04:08 +04:00
}
2010-05-03 07:10:59 +04:00
ctx - > bufcnt = 0 ;
ctx - > digcnt = 0 ;
2016-09-19 18:22:17 +03:00
ctx - > total = 0 ;
ctx - > offset = 0 ;
2010-11-19 17:04:26 +03:00
ctx - > buflen = BUFLEN ;
2010-05-03 07:10:59 +04:00
2011-06-02 22:10:05 +04:00
if ( tctx - > flags & BIT ( FLAGS_HMAC ) ) {
2012-12-21 21:04:08 +04:00
if ( ! test_bit ( FLAGS_AUTO_XOR , & dd - > flags ) ) {
struct omap_sham_hmac_ctx * bctx = tctx - > base ;
2013-07-26 10:59:14 +04:00
memcpy ( ctx - > buffer , bctx - > ipad , bs ) ;
ctx - > bufcnt = bs ;
2012-12-21 21:04:08 +04:00
}
2010-05-03 07:10:59 +04:00
2011-06-02 22:10:05 +04:00
ctx - > flags | = BIT ( FLAGS_HMAC ) ;
2010-05-03 07:10:59 +04:00
}
return 0 ;
}
static int omap_sham_update_req ( struct omap_sham_dev * dd )
{
struct ahash_request * req = dd - > req ;
struct omap_sham_reqctx * ctx = ahash_request_ctx ( req ) ;
int err ;
2016-09-19 18:22:17 +03:00
bool final = ctx - > flags & BIT ( FLAGS_FINUP ) ;
2010-05-03 07:10:59 +04:00
dev_dbg ( dd - > dev , " update_req: total: %u, digcnt: %d, finup: %d \n " ,
2011-06-02 22:10:05 +04:00
ctx - > total , ctx - > digcnt , ( ctx - > flags & BIT ( FLAGS_FINUP ) ) ! = 0 ) ;
2010-05-03 07:10:59 +04:00
2016-09-19 18:22:17 +03:00
if ( ctx - > total < get_block_size ( ctx ) | |
2018-02-27 16:30:36 +03:00
ctx - > total < dd - > fallback_sz )
2016-09-19 18:22:17 +03:00
ctx - > flags | = BIT ( FLAGS_CPU ) ;
2011-06-02 22:10:05 +04:00
if ( ctx - > flags & BIT ( FLAGS_CPU ) )
2016-09-19 18:22:17 +03:00
err = omap_sham_xmit_cpu ( dd , ctx - > total , final ) ;
2010-05-03 07:10:59 +04:00
else
2016-09-19 18:22:17 +03:00
err = omap_sham_xmit_dma ( dd , ctx - > total , final ) ;
2010-05-03 07:10:59 +04:00
/* wait for dma completion before can take more data */
dev_dbg ( dd - > dev , " update: err: %d, digcnt: %d \n " , err , ctx - > digcnt ) ;
return err ;
}
static int omap_sham_final_req ( struct omap_sham_dev * dd )
{
struct ahash_request * req = dd - > req ;
struct omap_sham_reqctx * ctx = ahash_request_ctx ( req ) ;
int err = 0 , use_dma = 1 ;
2016-09-19 18:22:17 +03:00
if ( ( ctx - > total < = get_block_size ( ctx ) ) | | dd - > polling_mode )
2013-08-20 19:02:34 +04:00
/*
* faster to handle last block with cpu or
* use cpu when dma is not present .
*/
2010-05-03 07:10:59 +04:00
use_dma = 0 ;
if ( use_dma )
2016-09-19 18:22:17 +03:00
err = omap_sham_xmit_dma ( dd , ctx - > total , 1 ) ;
2010-05-03 07:10:59 +04:00
else
2016-09-19 18:22:17 +03:00
err = omap_sham_xmit_cpu ( dd , ctx - > total , 1 ) ;
2010-05-03 07:10:59 +04:00
ctx - > bufcnt = 0 ;
dev_dbg ( dd - > dev , " final_req: err: %d \n " , err ) ;
return err ;
}
2011-04-20 14:34:58 +04:00
static int omap_sham_finish_hmac ( struct ahash_request * req )
2010-05-03 07:10:59 +04:00
{
struct omap_sham_ctx * tctx = crypto_tfm_ctx ( req - > base . tfm ) ;
struct omap_sham_hmac_ctx * bctx = tctx - > base ;
int bs = crypto_shash_blocksize ( bctx - > shash ) ;
int ds = crypto_shash_digestsize ( bctx - > shash ) ;
2014-04-05 01:18:00 +04:00
SHASH_DESC_ON_STACK ( shash , bctx - > shash ) ;
2010-05-03 07:10:59 +04:00
2014-04-05 01:18:00 +04:00
shash - > tfm = bctx - > shash ;
2010-05-03 07:10:59 +04:00
2014-04-05 01:18:00 +04:00
return crypto_shash_init ( shash ) ? :
crypto_shash_update ( shash , bctx - > opad , bs ) ? :
crypto_shash_finup ( shash , req - > result , ds , req - > result ) ;
2011-04-20 14:34:58 +04:00
}
static int omap_sham_finish ( struct ahash_request * req )
{
struct omap_sham_reqctx * ctx = ahash_request_ctx ( req ) ;
struct omap_sham_dev * dd = ctx - > dd ;
int err = 0 ;
if ( ctx - > digcnt ) {
omap_sham_copy_ready_hash ( req ) ;
2012-12-21 21:04:08 +04:00
if ( ( ctx - > flags & BIT ( FLAGS_HMAC ) ) & &
! test_bit ( FLAGS_AUTO_XOR , & dd - > flags ) )
2011-04-20 14:34:58 +04:00
err = omap_sham_finish_hmac ( req ) ;
}
dev_dbg ( dd - > dev , " digcnt: %d, bufcnt: %d \n " , ctx - > digcnt , ctx - > bufcnt ) ;
return err ;
2010-05-03 07:10:59 +04:00
}
static void omap_sham_finish_req ( struct ahash_request * req , int err )
{
struct omap_sham_reqctx * ctx = ahash_request_ctx ( req ) ;
2010-11-19 17:04:26 +03:00
struct omap_sham_dev * dd = ctx - > dd ;
2010-05-03 07:10:59 +04:00
2016-09-19 18:22:17 +03:00
if ( test_bit ( FLAGS_SGS_COPIED , & dd - > flags ) )
free_pages ( ( unsigned long ) sg_virt ( ctx - > sg ) ,
2018-04-17 22:53:13 +03:00
get_order ( ctx - > sg - > length + ctx - > bufcnt ) ) ;
2016-09-19 18:22:17 +03:00
if ( test_bit ( FLAGS_SGS_ALLOCED , & dd - > flags ) )
kfree ( ctx - > sg ) ;
ctx - > sg = NULL ;
dd - > flags & = ~ ( BIT ( FLAGS_SGS_ALLOCED ) | BIT ( FLAGS_SGS_COPIED ) ) ;
2010-05-03 07:10:59 +04:00
if ( ! err ) {
2012-12-21 21:04:08 +04:00
dd - > pdata - > copy_hash ( req , 1 ) ;
2011-06-02 22:10:07 +04:00
if ( test_bit ( FLAGS_FINAL , & dd - > flags ) )
2011-04-20 14:34:58 +04:00
err = omap_sham_finish ( req ) ;
2010-11-19 17:04:24 +03:00
} else {
2011-06-02 22:10:05 +04:00
ctx - > flags | = BIT ( FLAGS_ERROR ) ;
2010-05-03 07:10:59 +04:00
}
2011-06-02 22:10:12 +04:00
/* atomic operation is not needed here */
dd - > flags & = ~ ( BIT ( FLAGS_BUSY ) | BIT ( FLAGS_FINAL ) | BIT ( FLAGS_CPU ) |
BIT ( FLAGS_DMA_READY ) | BIT ( FLAGS_OUTPUT_READY ) ) ;
2012-12-21 21:04:02 +04:00
2016-06-22 16:23:34 +03:00
pm_runtime_mark_last_busy ( dd - > dev ) ;
pm_runtime_put_autosuspend ( dd - > dev ) ;
2010-05-03 07:10:59 +04:00
if ( req - > base . complete )
req - > base . complete ( & req - > base , err ) ;
}
2010-11-19 17:04:25 +03:00
static int omap_sham_handle_queue ( struct omap_sham_dev * dd ,
struct ahash_request * req )
2010-05-03 07:10:59 +04:00
{
2010-12-29 13:52:04 +03:00
struct crypto_async_request * async_req , * backlog ;
2010-05-03 07:10:59 +04:00
struct omap_sham_reqctx * ctx ;
unsigned long flags ;
2010-11-19 17:04:25 +03:00
int err = 0 , ret = 0 ;
2010-05-03 07:10:59 +04:00
2016-08-04 13:28:36 +03:00
retry :
2010-05-03 07:10:59 +04:00
spin_lock_irqsave ( & dd - > lock , flags ) ;
2010-11-19 17:04:25 +03:00
if ( req )
ret = ahash_enqueue_request ( & dd - > queue , req ) ;
2011-06-02 22:10:06 +04:00
if ( test_bit ( FLAGS_BUSY , & dd - > flags ) ) {
2010-11-19 17:04:25 +03:00
spin_unlock_irqrestore ( & dd - > lock , flags ) ;
return ret ;
}
2010-12-29 13:52:04 +03:00
backlog = crypto_get_backlog ( & dd - > queue ) ;
2010-05-03 07:10:59 +04:00
async_req = crypto_dequeue_request ( & dd - > queue ) ;
2010-12-29 13:52:04 +03:00
if ( async_req )
2011-06-02 22:10:06 +04:00
set_bit ( FLAGS_BUSY , & dd - > flags ) ;
2010-05-03 07:10:59 +04:00
spin_unlock_irqrestore ( & dd - > lock , flags ) ;
if ( ! async_req )
2010-11-19 17:04:25 +03:00
return ret ;
2010-05-03 07:10:59 +04:00
if ( backlog )
backlog - > complete ( backlog , - EINPROGRESS ) ;
req = ahash_request_cast ( async_req ) ;
dd - > req = req ;
ctx = ahash_request_ctx ( req ) ;
2016-09-19 18:22:17 +03:00
err = omap_sham_prepare_request ( req , ctx - > op = = OP_UPDATE ) ;
2017-05-24 10:35:33 +03:00
if ( err | | ! ctx - > total )
2016-09-19 18:22:15 +03:00
goto err1 ;
2010-05-03 07:10:59 +04:00
dev_dbg ( dd - > dev , " handling new req, op: %lu, nbytes: %d \n " ,
ctx - > op , req - > nbytes ) ;
2010-11-19 17:04:26 +03:00
err = omap_sham_hw_init ( dd ) ;
if ( err )
goto err1 ;
if ( ctx - > digcnt )
2010-05-03 07:10:59 +04:00
/* request has changed - restore hash */
2012-12-21 21:04:08 +04:00
dd - > pdata - > copy_hash ( req , 0 ) ;
2010-05-03 07:10:59 +04:00
if ( ctx - > op = = OP_UPDATE ) {
err = omap_sham_update_req ( dd ) ;
2011-06-02 22:10:05 +04:00
if ( err ! = - EINPROGRESS & & ( ctx - > flags & BIT ( FLAGS_FINUP ) ) )
2010-05-03 07:10:59 +04:00
/* no final() after finup() */
err = omap_sham_final_req ( dd ) ;
} else if ( ctx - > op = = OP_FINAL ) {
err = omap_sham_final_req ( dd ) ;
}
2010-11-19 17:04:26 +03:00
err1 :
2016-08-04 13:28:36 +03:00
dev_dbg ( dd - > dev , " exit, err: %d \n " , err ) ;
if ( err ! = - EINPROGRESS ) {
2010-05-03 07:10:59 +04:00
/* done_task will not finish it, so do it here */
omap_sham_finish_req ( req , err ) ;
2016-08-04 13:28:36 +03:00
req = NULL ;
2010-05-03 07:10:59 +04:00
2016-08-04 13:28:36 +03:00
/*
* Execute next request immediately if there is anything
* in queue .
*/
goto retry ;
}
2010-05-03 07:10:59 +04:00
2010-11-19 17:04:25 +03:00
return ret ;
2010-05-03 07:10:59 +04:00
}
static int omap_sham_enqueue ( struct ahash_request * req , unsigned int op )
{
struct omap_sham_reqctx * ctx = ahash_request_ctx ( req ) ;
struct omap_sham_ctx * tctx = crypto_tfm_ctx ( req - > base . tfm ) ;
struct omap_sham_dev * dd = tctx - > dd ;
ctx - > op = op ;
2010-11-19 17:04:25 +03:00
return omap_sham_handle_queue ( dd , req ) ;
2010-05-03 07:10:59 +04:00
}
static int omap_sham_update ( struct ahash_request * req )
{
struct omap_sham_reqctx * ctx = ahash_request_ctx ( req ) ;
2013-08-20 19:02:34 +04:00
struct omap_sham_dev * dd = ctx - > dd ;
2010-05-03 07:10:59 +04:00
if ( ! req - > nbytes )
return 0 ;
2017-05-24 10:35:32 +03:00
if ( ctx - > bufcnt + req - > nbytes < = ctx - > buflen ) {
2016-09-19 18:22:17 +03:00
scatterwalk_map_and_copy ( ctx - > buffer + ctx - > bufcnt , req - > src ,
0 , req - > nbytes , 0 ) ;
ctx - > bufcnt + = req - > nbytes ;
2010-05-03 07:10:59 +04:00
return 0 ;
}
crypto: omap-sham - Fix Polling mode for larger blocks
Command "tcrypt sec=1 mode=403" give the follwoing error for Polling
mode:
root@am335x-evm:/# insmod tcrypt.ko sec=1 mode=403
[...]
[ 346.982754] test 15 ( 4096 byte blocks, 1024 bytes per update, 4 updates): 4352 opers/sec, 17825792 bytes/sec
[ 347.992661] test 16 ( 4096 byte blocks, 4096 bytes per update, 1 updates): 7095 opers/sec, 29061120 bytes/sec
[ 349.002667] test 17 ( 8192 byte blocks, 16 bytes per update, 512 updates):
[ 349.010882] Unable to handle kernel NULL pointer dereference at virtual address 00000000
[ 349.020037] pgd = ddeac000
[ 349.022884] [00000000] *pgd=9dcb4831, *pte=00000000, *ppte=00000000
[ 349.029816] Internal error: Oops: 17 [#1] PREEMPT SMP ARM
[ 349.035482] Modules linked in: tcrypt(+)
[ 349.039617] CPU: 0 PID: 1473 Comm: insmod Not tainted 3.12.4-01566-g6279006-dirty #38
[ 349.047832] task: dda91540 ti: ddcd2000 task.ti: ddcd2000
[ 349.053517] PC is at omap_sham_xmit_dma+0x6c/0x238
[ 349.058544] LR is at omap_sham_xmit_dma+0x38/0x238
[ 349.063570] pc : [<c04eb7cc>] lr : [<c04eb798>] psr: 20000013
[ 349.063570] sp : ddcd3c78 ip : 00000000 fp : 9d8980b8
[ 349.075610] r10: 00000000 r9 : 00000000 r8 : 00000000
[ 349.081090] r7 : 00001000 r6 : dd898000 r5 : 00000040 r4 : ddb10550
[ 349.087935] r3 : 00000004 r2 : 00000010 r1 : 53100080 r0 : 00000000
[ 349.094783] Flags: nzCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment user
[ 349.102268] Control: 10c5387d Table: 9deac019 DAC: 00000015
[ 349.108294] Process insmod (pid: 1473, stack limit = 0xddcd2248)
[...]
This is because polling_mode is not enabled for ctx without FLAGS_FINUP.
For polling mode the bufcnt is made 0 unconditionally. But it should be made 0
only if it is a final update or a total is not zero(This condition is similar
to what is done in DMA case). Because of this wrong hashes are produced.
Fixing the same.
Signed-off-by: Lokesh Vutla <lokeshvutla@ti.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2013-12-18 17:33:33 +04:00
if ( dd - > polling_mode )
ctx - > flags | = BIT ( FLAGS_CPU ) ;
2010-05-03 07:10:59 +04:00
return omap_sham_enqueue ( req , OP_UPDATE ) ;
}
2014-04-05 01:18:00 +04:00
static int omap_sham_shash_digest ( struct crypto_shash * tfm , u32 flags ,
2010-05-03 07:10:59 +04:00
const u8 * data , unsigned int len , u8 * out )
{
2014-04-05 01:18:00 +04:00
SHASH_DESC_ON_STACK ( shash , tfm ) ;
2010-05-03 07:10:59 +04:00
2014-04-05 01:18:00 +04:00
shash - > tfm = tfm ;
2010-05-03 07:10:59 +04:00
2014-04-05 01:18:00 +04:00
return crypto_shash_digest ( shash , data , len , out ) ;
2010-05-03 07:10:59 +04:00
}
static int omap_sham_final_shash ( struct ahash_request * req )
{
struct omap_sham_ctx * tctx = crypto_tfm_ctx ( req - > base . tfm ) ;
struct omap_sham_reqctx * ctx = ahash_request_ctx ( req ) ;
2016-08-04 13:28:40 +03:00
int offset = 0 ;
/*
* If we are running HMAC on limited hardware support , skip
* the ipad in the beginning of the buffer if we are going for
* software fallback algorithm .
*/
if ( test_bit ( FLAGS_HMAC , & ctx - > flags ) & &
! test_bit ( FLAGS_AUTO_XOR , & ctx - > dd - > flags ) )
offset = get_block_size ( ctx ) ;
2010-05-03 07:10:59 +04:00
return omap_sham_shash_digest ( tctx - > fallback , req - > base . flags ,
2016-08-04 13:28:40 +03:00
ctx - > buffer + offset ,
ctx - > bufcnt - offset , req - > result ) ;
2010-05-03 07:10:59 +04:00
}
static int omap_sham_final ( struct ahash_request * req )
{
struct omap_sham_reqctx * ctx = ahash_request_ctx ( req ) ;
2011-06-02 22:10:05 +04:00
ctx - > flags | = BIT ( FLAGS_FINUP ) ;
2010-05-03 07:10:59 +04:00
2011-06-02 22:10:05 +04:00
if ( ctx - > flags & BIT ( FLAGS_ERROR ) )
2011-04-20 14:34:58 +04:00
return 0 ; /* uncompleted hash is not needed */
2010-05-03 07:10:59 +04:00
2016-06-22 16:23:37 +03:00
/*
* OMAP HW accel works only with buffers > = 9.
* HMAC is always > = 9 because ipad = = block size .
2018-02-27 16:30:36 +03:00
* If buffersize is less than fallback_sz , we use fallback
2016-09-19 18:22:16 +03:00
* SW encoding , as using DMA + HW in this case doesn ' t provide
* any benefit .
2016-06-22 16:23:37 +03:00
*/
2018-02-27 16:30:36 +03:00
if ( ! ctx - > digcnt & & ctx - > bufcnt < ctx - > dd - > fallback_sz )
2011-04-20 14:34:58 +04:00
return omap_sham_final_shash ( req ) ;
else if ( ctx - > bufcnt )
return omap_sham_enqueue ( req , OP_FINAL ) ;
2010-05-03 07:10:59 +04:00
2011-04-20 14:34:58 +04:00
/* copy ready hash (+ finalize hmac) */
return omap_sham_finish ( req ) ;
2010-05-03 07:10:59 +04:00
}
static int omap_sham_finup ( struct ahash_request * req )
{
struct omap_sham_reqctx * ctx = ahash_request_ctx ( req ) ;
int err1 , err2 ;
2011-06-02 22:10:05 +04:00
ctx - > flags | = BIT ( FLAGS_FINUP ) ;
2010-05-03 07:10:59 +04:00
err1 = omap_sham_update ( req ) ;
2011-04-20 14:34:55 +04:00
if ( err1 = = - EINPROGRESS | | err1 = = - EBUSY )
2010-05-03 07:10:59 +04:00
return err1 ;
/*
* final ( ) has to be always called to cleanup resources
* even if udpate ( ) failed , except EINPROGRESS
*/
err2 = omap_sham_final ( req ) ;
return err1 ? : err2 ;
}
static int omap_sham_digest ( struct ahash_request * req )
{
return omap_sham_init ( req ) ? : omap_sham_finup ( req ) ;
}
static int omap_sham_setkey ( struct crypto_ahash * tfm , const u8 * key ,
unsigned int keylen )
{
struct omap_sham_ctx * tctx = crypto_ahash_ctx ( tfm ) ;
struct omap_sham_hmac_ctx * bctx = tctx - > base ;
int bs = crypto_shash_blocksize ( bctx - > shash ) ;
int ds = crypto_shash_digestsize ( bctx - > shash ) ;
2012-12-21 21:04:08 +04:00
struct omap_sham_dev * dd = NULL , * tmp ;
2010-05-03 07:10:59 +04:00
int err , i ;
2012-12-21 21:04:08 +04:00
spin_lock_bh ( & sham . lock ) ;
if ( ! tctx - > dd ) {
list_for_each_entry ( tmp , & sham . dev_list , list ) {
dd = tmp ;
break ;
}
tctx - > dd = dd ;
} else {
dd = tctx - > dd ;
}
spin_unlock_bh ( & sham . lock ) ;
2010-05-03 07:10:59 +04:00
err = crypto_shash_setkey ( tctx - > fallback , key , keylen ) ;
if ( err )
return err ;
if ( keylen > bs ) {
err = omap_sham_shash_digest ( bctx - > shash ,
crypto_shash_get_flags ( bctx - > shash ) ,
key , keylen , bctx - > ipad ) ;
if ( err )
return err ;
keylen = ds ;
} else {
memcpy ( bctx - > ipad , key , keylen ) ;
}
memset ( bctx - > ipad + keylen , 0 , bs - keylen ) ;
2012-12-21 21:04:08 +04:00
if ( ! test_bit ( FLAGS_AUTO_XOR , & dd - > flags ) ) {
memcpy ( bctx - > opad , bctx - > ipad , bs ) ;
for ( i = 0 ; i < bs ; i + + ) {
2017-05-19 09:53:28 +03:00
bctx - > ipad [ i ] ^ = HMAC_IPAD_VALUE ;
bctx - > opad [ i ] ^ = HMAC_OPAD_VALUE ;
2012-12-21 21:04:08 +04:00
}
2010-05-03 07:10:59 +04:00
}
return err ;
}
static int omap_sham_cra_init_alg ( struct crypto_tfm * tfm , const char * alg_base )
{
struct omap_sham_ctx * tctx = crypto_tfm_ctx ( tfm ) ;
const char * alg_name = crypto_tfm_alg_name ( tfm ) ;
/* Allocate a fallback and abort if it failed. */
tctx - > fallback = crypto_alloc_shash ( alg_name , 0 ,
CRYPTO_ALG_NEED_FALLBACK ) ;
if ( IS_ERR ( tctx - > fallback ) ) {
pr_err ( " omap-sham: fallback driver '%s' "
" could not be loaded. \n " , alg_name ) ;
return PTR_ERR ( tctx - > fallback ) ;
}
crypto_ahash_set_reqsize ( __crypto_ahash_cast ( tfm ) ,
2010-11-19 17:04:26 +03:00
sizeof ( struct omap_sham_reqctx ) + BUFLEN ) ;
2010-05-03 07:10:59 +04:00
if ( alg_base ) {
struct omap_sham_hmac_ctx * bctx = tctx - > base ;
2011-06-02 22:10:05 +04:00
tctx - > flags | = BIT ( FLAGS_HMAC ) ;
2010-05-03 07:10:59 +04:00
bctx - > shash = crypto_alloc_shash ( alg_base , 0 ,
CRYPTO_ALG_NEED_FALLBACK ) ;
if ( IS_ERR ( bctx - > shash ) ) {
pr_err ( " omap-sham: base driver '%s' "
" could not be loaded. \n " , alg_base ) ;
crypto_free_shash ( tctx - > fallback ) ;
return PTR_ERR ( bctx - > shash ) ;
}
}
return 0 ;
}
static int omap_sham_cra_init ( struct crypto_tfm * tfm )
{
return omap_sham_cra_init_alg ( tfm , NULL ) ;
}
static int omap_sham_cra_sha1_init ( struct crypto_tfm * tfm )
{
return omap_sham_cra_init_alg ( tfm , " sha1 " ) ;
}
2012-12-21 21:04:09 +04:00
static int omap_sham_cra_sha224_init ( struct crypto_tfm * tfm )
{
return omap_sham_cra_init_alg ( tfm , " sha224 " ) ;
}
static int omap_sham_cra_sha256_init ( struct crypto_tfm * tfm )
{
return omap_sham_cra_init_alg ( tfm , " sha256 " ) ;
}
2010-05-03 07:10:59 +04:00
static int omap_sham_cra_md5_init ( struct crypto_tfm * tfm )
{
return omap_sham_cra_init_alg ( tfm , " md5 " ) ;
}
2013-07-26 10:59:14 +04:00
static int omap_sham_cra_sha384_init ( struct crypto_tfm * tfm )
{
return omap_sham_cra_init_alg ( tfm , " sha384 " ) ;
}
static int omap_sham_cra_sha512_init ( struct crypto_tfm * tfm )
{
return omap_sham_cra_init_alg ( tfm , " sha512 " ) ;
}
2010-05-03 07:10:59 +04:00
static void omap_sham_cra_exit ( struct crypto_tfm * tfm )
{
struct omap_sham_ctx * tctx = crypto_tfm_ctx ( tfm ) ;
crypto_free_shash ( tctx - > fallback ) ;
tctx - > fallback = NULL ;
2011-06-02 22:10:05 +04:00
if ( tctx - > flags & BIT ( FLAGS_HMAC ) ) {
2010-05-03 07:10:59 +04:00
struct omap_sham_hmac_ctx * bctx = tctx - > base ;
crypto_free_shash ( bctx - > shash ) ;
}
}
2016-09-19 18:22:12 +03:00
static int omap_sham_export ( struct ahash_request * req , void * out )
{
2016-09-19 18:22:18 +03:00
struct omap_sham_reqctx * rctx = ahash_request_ctx ( req ) ;
memcpy ( out , rctx , sizeof ( * rctx ) + rctx - > bufcnt ) ;
return 0 ;
2016-09-19 18:22:12 +03:00
}
static int omap_sham_import ( struct ahash_request * req , const void * in )
{
2016-09-19 18:22:18 +03:00
struct omap_sham_reqctx * rctx = ahash_request_ctx ( req ) ;
const struct omap_sham_reqctx * ctx_in = in ;
memcpy ( rctx , in , sizeof ( * rctx ) + ctx_in - > bufcnt ) ;
return 0 ;
2016-09-19 18:22:12 +03:00
}
2012-12-21 21:04:09 +04:00
static struct ahash_alg algs_sha1_md5 [ ] = {
2010-05-03 07:10:59 +04:00
{
. init = omap_sham_init ,
. update = omap_sham_update ,
. final = omap_sham_final ,
. finup = omap_sham_finup ,
. digest = omap_sham_digest ,
. halg . digestsize = SHA1_DIGEST_SIZE ,
. halg . base = {
. cra_name = " sha1 " ,
. cra_driver_name = " omap-sha1 " ,
2016-06-30 22:04:11 +03:00
. cra_priority = 400 ,
2018-07-01 01:16:12 +03:00
. cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
2010-05-03 07:10:59 +04:00
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = SHA1_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct omap_sham_ctx ) ,
2016-09-19 18:22:13 +03:00
. cra_alignmask = OMAP_ALIGN_MASK ,
2010-05-03 07:10:59 +04:00
. cra_module = THIS_MODULE ,
. cra_init = omap_sham_cra_init ,
. cra_exit = omap_sham_cra_exit ,
}
} ,
{
. init = omap_sham_init ,
. update = omap_sham_update ,
. final = omap_sham_final ,
. finup = omap_sham_finup ,
. digest = omap_sham_digest ,
. halg . digestsize = MD5_DIGEST_SIZE ,
. halg . base = {
. cra_name = " md5 " ,
. cra_driver_name = " omap-md5 " ,
2016-06-30 22:04:11 +03:00
. cra_priority = 400 ,
2018-07-01 01:16:12 +03:00
. cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
2010-05-03 07:10:59 +04:00
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = SHA1_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct omap_sham_ctx ) ,
2010-11-19 17:04:26 +03:00
. cra_alignmask = OMAP_ALIGN_MASK ,
2010-05-03 07:10:59 +04:00
. cra_module = THIS_MODULE ,
. cra_init = omap_sham_cra_init ,
. cra_exit = omap_sham_cra_exit ,
}
} ,
{
. init = omap_sham_init ,
. update = omap_sham_update ,
. final = omap_sham_final ,
. finup = omap_sham_finup ,
. digest = omap_sham_digest ,
. setkey = omap_sham_setkey ,
. halg . digestsize = SHA1_DIGEST_SIZE ,
. halg . base = {
. cra_name = " hmac(sha1) " ,
. cra_driver_name = " omap-hmac-sha1 " ,
2016-06-30 22:04:11 +03:00
. cra_priority = 400 ,
2018-07-01 01:16:12 +03:00
. cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
2010-05-03 07:10:59 +04:00
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = SHA1_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct omap_sham_ctx ) +
sizeof ( struct omap_sham_hmac_ctx ) ,
2010-11-19 17:04:26 +03:00
. cra_alignmask = OMAP_ALIGN_MASK ,
2010-05-03 07:10:59 +04:00
. cra_module = THIS_MODULE ,
. cra_init = omap_sham_cra_sha1_init ,
. cra_exit = omap_sham_cra_exit ,
}
} ,
{
. init = omap_sham_init ,
. update = omap_sham_update ,
. final = omap_sham_final ,
. finup = omap_sham_finup ,
. digest = omap_sham_digest ,
. setkey = omap_sham_setkey ,
. halg . digestsize = MD5_DIGEST_SIZE ,
. halg . base = {
. cra_name = " hmac(md5) " ,
. cra_driver_name = " omap-hmac-md5 " ,
2016-06-30 22:04:11 +03:00
. cra_priority = 400 ,
2018-07-01 01:16:12 +03:00
. cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
2010-05-03 07:10:59 +04:00
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = SHA1_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct omap_sham_ctx ) +
sizeof ( struct omap_sham_hmac_ctx ) ,
2010-11-19 17:04:26 +03:00
. cra_alignmask = OMAP_ALIGN_MASK ,
2010-05-03 07:10:59 +04:00
. cra_module = THIS_MODULE ,
. cra_init = omap_sham_cra_md5_init ,
. cra_exit = omap_sham_cra_exit ,
}
}
} ;
2012-12-21 21:04:09 +04:00
/* OMAP4 has some algs in addition to what OMAP2 has */
static struct ahash_alg algs_sha224_sha256 [ ] = {
{
. init = omap_sham_init ,
. update = omap_sham_update ,
. final = omap_sham_final ,
. finup = omap_sham_finup ,
. digest = omap_sham_digest ,
. halg . digestsize = SHA224_DIGEST_SIZE ,
. halg . base = {
. cra_name = " sha224 " ,
. cra_driver_name = " omap-sha224 " ,
2016-06-30 22:04:11 +03:00
. cra_priority = 400 ,
2018-07-01 01:16:12 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2012-12-21 21:04:09 +04:00
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = SHA224_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct omap_sham_ctx ) ,
2016-09-19 18:22:13 +03:00
. cra_alignmask = OMAP_ALIGN_MASK ,
2012-12-21 21:04:09 +04:00
. cra_module = THIS_MODULE ,
. cra_init = omap_sham_cra_init ,
. cra_exit = omap_sham_cra_exit ,
}
} ,
{
. init = omap_sham_init ,
. update = omap_sham_update ,
. final = omap_sham_final ,
. finup = omap_sham_finup ,
. digest = omap_sham_digest ,
. halg . digestsize = SHA256_DIGEST_SIZE ,
. halg . base = {
. cra_name = " sha256 " ,
. cra_driver_name = " omap-sha256 " ,
2016-06-30 22:04:11 +03:00
. cra_priority = 400 ,
2018-07-01 01:16:12 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2012-12-21 21:04:09 +04:00
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = SHA256_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct omap_sham_ctx ) ,
2016-09-19 18:22:13 +03:00
. cra_alignmask = OMAP_ALIGN_MASK ,
2012-12-21 21:04:09 +04:00
. cra_module = THIS_MODULE ,
. cra_init = omap_sham_cra_init ,
. cra_exit = omap_sham_cra_exit ,
}
} ,
{
. init = omap_sham_init ,
. update = omap_sham_update ,
. final = omap_sham_final ,
. finup = omap_sham_finup ,
. digest = omap_sham_digest ,
. setkey = omap_sham_setkey ,
. halg . digestsize = SHA224_DIGEST_SIZE ,
. halg . base = {
. cra_name = " hmac(sha224) " ,
. cra_driver_name = " omap-hmac-sha224 " ,
2016-06-30 22:04:11 +03:00
. cra_priority = 400 ,
2018-07-01 01:16:12 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2012-12-21 21:04:09 +04:00
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = SHA224_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct omap_sham_ctx ) +
sizeof ( struct omap_sham_hmac_ctx ) ,
. cra_alignmask = OMAP_ALIGN_MASK ,
. cra_module = THIS_MODULE ,
. cra_init = omap_sham_cra_sha224_init ,
. cra_exit = omap_sham_cra_exit ,
}
} ,
{
. init = omap_sham_init ,
. update = omap_sham_update ,
. final = omap_sham_final ,
. finup = omap_sham_finup ,
. digest = omap_sham_digest ,
. setkey = omap_sham_setkey ,
. halg . digestsize = SHA256_DIGEST_SIZE ,
. halg . base = {
. cra_name = " hmac(sha256) " ,
. cra_driver_name = " omap-hmac-sha256 " ,
2016-06-30 22:04:11 +03:00
. cra_priority = 400 ,
2018-07-01 01:16:12 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2012-12-21 21:04:09 +04:00
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = SHA256_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct omap_sham_ctx ) +
sizeof ( struct omap_sham_hmac_ctx ) ,
. cra_alignmask = OMAP_ALIGN_MASK ,
. cra_module = THIS_MODULE ,
. cra_init = omap_sham_cra_sha256_init ,
. cra_exit = omap_sham_cra_exit ,
}
} ,
} ;
2013-07-26 10:59:14 +04:00
static struct ahash_alg algs_sha384_sha512 [ ] = {
{
. init = omap_sham_init ,
. update = omap_sham_update ,
. final = omap_sham_final ,
. finup = omap_sham_finup ,
. digest = omap_sham_digest ,
. halg . digestsize = SHA384_DIGEST_SIZE ,
. halg . base = {
. cra_name = " sha384 " ,
. cra_driver_name = " omap-sha384 " ,
2016-06-30 22:04:11 +03:00
. cra_priority = 400 ,
2018-07-01 01:16:12 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2013-07-26 10:59:14 +04:00
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = SHA384_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct omap_sham_ctx ) ,
2016-09-19 18:22:13 +03:00
. cra_alignmask = OMAP_ALIGN_MASK ,
2013-07-26 10:59:14 +04:00
. cra_module = THIS_MODULE ,
. cra_init = omap_sham_cra_init ,
. cra_exit = omap_sham_cra_exit ,
}
} ,
{
. init = omap_sham_init ,
. update = omap_sham_update ,
. final = omap_sham_final ,
. finup = omap_sham_finup ,
. digest = omap_sham_digest ,
. halg . digestsize = SHA512_DIGEST_SIZE ,
. halg . base = {
. cra_name = " sha512 " ,
. cra_driver_name = " omap-sha512 " ,
2016-06-30 22:04:11 +03:00
. cra_priority = 400 ,
2018-07-01 01:16:12 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2013-07-26 10:59:14 +04:00
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = SHA512_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct omap_sham_ctx ) ,
2016-09-19 18:22:13 +03:00
. cra_alignmask = OMAP_ALIGN_MASK ,
2013-07-26 10:59:14 +04:00
. cra_module = THIS_MODULE ,
. cra_init = omap_sham_cra_init ,
. cra_exit = omap_sham_cra_exit ,
}
} ,
{
. init = omap_sham_init ,
. update = omap_sham_update ,
. final = omap_sham_final ,
. finup = omap_sham_finup ,
. digest = omap_sham_digest ,
. setkey = omap_sham_setkey ,
. halg . digestsize = SHA384_DIGEST_SIZE ,
. halg . base = {
. cra_name = " hmac(sha384) " ,
. cra_driver_name = " omap-hmac-sha384 " ,
2016-06-30 22:04:11 +03:00
. cra_priority = 400 ,
2018-07-01 01:16:12 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2013-07-26 10:59:14 +04:00
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = SHA384_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct omap_sham_ctx ) +
sizeof ( struct omap_sham_hmac_ctx ) ,
. cra_alignmask = OMAP_ALIGN_MASK ,
. cra_module = THIS_MODULE ,
. cra_init = omap_sham_cra_sha384_init ,
. cra_exit = omap_sham_cra_exit ,
}
} ,
{
. init = omap_sham_init ,
. update = omap_sham_update ,
. final = omap_sham_final ,
. finup = omap_sham_finup ,
. digest = omap_sham_digest ,
. setkey = omap_sham_setkey ,
. halg . digestsize = SHA512_DIGEST_SIZE ,
. halg . base = {
. cra_name = " hmac(sha512) " ,
. cra_driver_name = " omap-hmac-sha512 " ,
2016-06-30 22:04:11 +03:00
. cra_priority = 400 ,
2018-07-01 01:16:12 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2013-07-26 10:59:14 +04:00
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = SHA512_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct omap_sham_ctx ) +
sizeof ( struct omap_sham_hmac_ctx ) ,
. cra_alignmask = OMAP_ALIGN_MASK ,
. cra_module = THIS_MODULE ,
. cra_init = omap_sham_cra_sha512_init ,
. cra_exit = omap_sham_cra_exit ,
}
} ,
} ;
2010-05-03 07:10:59 +04:00
static void omap_sham_done_task ( unsigned long data )
{
struct omap_sham_dev * dd = ( struct omap_sham_dev * ) data ;
2011-06-02 22:10:10 +04:00
int err = 0 ;
2010-05-03 07:10:59 +04:00
2011-06-02 22:10:09 +04:00
if ( ! test_bit ( FLAGS_BUSY , & dd - > flags ) ) {
omap_sham_handle_queue ( dd , NULL ) ;
return ;
}
2011-06-02 22:10:10 +04:00
if ( test_bit ( FLAGS_CPU , & dd - > flags ) ) {
2016-09-19 18:22:17 +03:00
if ( test_and_clear_bit ( FLAGS_OUTPUT_READY , & dd - > flags ) )
goto finish ;
2011-06-02 22:10:10 +04:00
} else if ( test_bit ( FLAGS_DMA_READY , & dd - > flags ) ) {
if ( test_and_clear_bit ( FLAGS_DMA_ACTIVE , & dd - > flags ) ) {
omap_sham_update_dma_stop ( dd ) ;
if ( dd - > err ) {
err = dd - > err ;
goto finish ;
}
}
if ( test_and_clear_bit ( FLAGS_OUTPUT_READY , & dd - > flags ) ) {
/* hash or semi-hash ready */
clear_bit ( FLAGS_DMA_READY , & dd - > flags ) ;
2018-03-01 23:50:11 +03:00
goto finish ;
2011-06-02 22:10:10 +04:00
}
2010-05-03 07:10:59 +04:00
}
2011-06-02 22:10:10 +04:00
return ;
2010-11-19 17:04:24 +03:00
2011-06-02 22:10:10 +04:00
finish :
dev_dbg ( dd - > dev , " update done: err: %d \n " , err ) ;
/* finish curent request */
omap_sham_finish_req ( dd - > req , err ) ;
2016-08-04 13:28:36 +03:00
/* If we are not busy, process next req */
if ( ! test_bit ( FLAGS_BUSY , & dd - > flags ) )
omap_sham_handle_queue ( dd , NULL ) ;
2010-05-03 07:10:59 +04:00
}
2012-12-21 21:04:08 +04:00
static irqreturn_t omap_sham_irq_common ( struct omap_sham_dev * dd )
{
if ( ! test_bit ( FLAGS_BUSY , & dd - > flags ) ) {
dev_warn ( dd - > dev , " Interrupt when no active requests. \n " ) ;
} else {
set_bit ( FLAGS_OUTPUT_READY , & dd - > flags ) ;
tasklet_schedule ( & dd - > done_task ) ;
}
return IRQ_HANDLED ;
}
static irqreturn_t omap_sham_irq_omap2 ( int irq , void * dev_id )
2010-05-03 07:10:59 +04:00
{
struct omap_sham_dev * dd = dev_id ;
2011-06-02 22:10:07 +04:00
if ( unlikely ( test_bit ( FLAGS_FINAL , & dd - > flags ) ) )
2010-05-03 07:10:59 +04:00
/* final -> allow device to go to power-saving mode */
omap_sham_write_mask ( dd , SHA_REG_CTRL , 0 , SHA_REG_CTRL_LENGTH ) ;
omap_sham_write_mask ( dd , SHA_REG_CTRL , SHA_REG_CTRL_OUTPUT_READY ,
SHA_REG_CTRL_OUTPUT_READY ) ;
omap_sham_read ( dd , SHA_REG_CTRL ) ;
2012-12-21 21:04:08 +04:00
return omap_sham_irq_common ( dd ) ;
}
2011-06-02 22:10:13 +04:00
2012-12-21 21:04:08 +04:00
static irqreturn_t omap_sham_irq_omap4 ( int irq , void * dev_id )
{
struct omap_sham_dev * dd = dev_id ;
2010-05-03 07:10:59 +04:00
2012-12-21 21:04:08 +04:00
omap_sham_write_mask ( dd , SHA_REG_MASK ( dd ) , 0 , SHA_REG_MASK_IT_EN ) ;
return omap_sham_irq_common ( dd ) ;
2010-05-03 07:10:59 +04:00
}
2012-12-21 21:04:09 +04:00
static struct omap_sham_algs_info omap_sham_algs_info_omap2 [ ] = {
{
. algs_list = algs_sha1_md5 ,
. size = ARRAY_SIZE ( algs_sha1_md5 ) ,
} ,
} ;
2012-12-21 21:04:08 +04:00
static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
2012-12-21 21:04:09 +04:00
. algs_info = omap_sham_algs_info_omap2 ,
. algs_info_size = ARRAY_SIZE ( omap_sham_algs_info_omap2 ) ,
2012-12-21 21:04:08 +04:00
. flags = BIT ( FLAGS_BE32_SHA1 ) ,
. digest_size = SHA1_DIGEST_SIZE ,
. copy_hash = omap_sham_copy_hash_omap2 ,
. write_ctrl = omap_sham_write_ctrl_omap2 ,
. trigger = omap_sham_trigger_omap2 ,
. poll_irq = omap_sham_poll_irq_omap2 ,
. intr_hdlr = omap_sham_irq_omap2 ,
. idigest_ofs = 0x00 ,
. din_ofs = 0x1c ,
. digcnt_ofs = 0x14 ,
. rev_ofs = 0x5c ,
. mask_ofs = 0x60 ,
. sysstatus_ofs = 0x64 ,
. major_mask = 0xf0 ,
. major_shift = 4 ,
. minor_mask = 0x0f ,
. minor_shift = 0 ,
} ;
2012-12-21 21:04:06 +04:00
# ifdef CONFIG_OF
2012-12-21 21:04:09 +04:00
static struct omap_sham_algs_info omap_sham_algs_info_omap4 [ ] = {
{
. algs_list = algs_sha1_md5 ,
. size = ARRAY_SIZE ( algs_sha1_md5 ) ,
} ,
{
. algs_list = algs_sha224_sha256 ,
. size = ARRAY_SIZE ( algs_sha224_sha256 ) ,
} ,
} ;
2012-12-21 21:04:08 +04:00
static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
2012-12-21 21:04:09 +04:00
. algs_info = omap_sham_algs_info_omap4 ,
. algs_info_size = ARRAY_SIZE ( omap_sham_algs_info_omap4 ) ,
2012-12-21 21:04:08 +04:00
. flags = BIT ( FLAGS_AUTO_XOR ) ,
. digest_size = SHA256_DIGEST_SIZE ,
. copy_hash = omap_sham_copy_hash_omap4 ,
. write_ctrl = omap_sham_write_ctrl_omap4 ,
. trigger = omap_sham_trigger_omap4 ,
. poll_irq = omap_sham_poll_irq_omap4 ,
. intr_hdlr = omap_sham_irq_omap4 ,
. idigest_ofs = 0x020 ,
2013-07-26 10:59:14 +04:00
. odigest_ofs = 0x0 ,
2012-12-21 21:04:08 +04:00
. din_ofs = 0x080 ,
. digcnt_ofs = 0x040 ,
. rev_ofs = 0x100 ,
. mask_ofs = 0x110 ,
. sysstatus_ofs = 0x114 ,
2013-07-26 10:59:14 +04:00
. mode_ofs = 0x44 ,
. length_ofs = 0x48 ,
2012-12-21 21:04:08 +04:00
. major_mask = 0x0700 ,
. major_shift = 8 ,
. minor_mask = 0x003f ,
. minor_shift = 0 ,
} ;
2013-07-26 10:59:15 +04:00
static struct omap_sham_algs_info omap_sham_algs_info_omap5 [ ] = {
{
. algs_list = algs_sha1_md5 ,
. size = ARRAY_SIZE ( algs_sha1_md5 ) ,
} ,
{
. algs_list = algs_sha224_sha256 ,
. size = ARRAY_SIZE ( algs_sha224_sha256 ) ,
} ,
{
. algs_list = algs_sha384_sha512 ,
. size = ARRAY_SIZE ( algs_sha384_sha512 ) ,
} ,
} ;
static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
. algs_info = omap_sham_algs_info_omap5 ,
. algs_info_size = ARRAY_SIZE ( omap_sham_algs_info_omap5 ) ,
. flags = BIT ( FLAGS_AUTO_XOR ) ,
. digest_size = SHA512_DIGEST_SIZE ,
. copy_hash = omap_sham_copy_hash_omap4 ,
. write_ctrl = omap_sham_write_ctrl_omap4 ,
. trigger = omap_sham_trigger_omap4 ,
. poll_irq = omap_sham_poll_irq_omap4 ,
. intr_hdlr = omap_sham_irq_omap4 ,
. idigest_ofs = 0x240 ,
. odigest_ofs = 0x200 ,
. din_ofs = 0x080 ,
. digcnt_ofs = 0x280 ,
. rev_ofs = 0x100 ,
. mask_ofs = 0x110 ,
. sysstatus_ofs = 0x114 ,
. mode_ofs = 0x284 ,
. length_ofs = 0x288 ,
. major_mask = 0x0700 ,
. major_shift = 8 ,
. minor_mask = 0x003f ,
. minor_shift = 0 ,
} ;
2012-12-21 21:04:06 +04:00
static const struct of_device_id omap_sham_of_match [ ] = {
{
. compatible = " ti,omap2-sham " ,
2012-12-21 21:04:08 +04:00
. data = & omap_sham_pdata_omap2 ,
} ,
2015-02-26 16:49:53 +03:00
{
. compatible = " ti,omap3-sham " ,
. data = & omap_sham_pdata_omap2 ,
} ,
2012-12-21 21:04:08 +04:00
{
. compatible = " ti,omap4-sham " ,
. data = & omap_sham_pdata_omap4 ,
2012-12-21 21:04:06 +04:00
} ,
2013-07-26 10:59:15 +04:00
{
. compatible = " ti,omap5-sham " ,
. data = & omap_sham_pdata_omap5 ,
} ,
2012-12-21 21:04:06 +04:00
{ } ,
} ;
MODULE_DEVICE_TABLE ( of , omap_sham_of_match ) ;
static int omap_sham_get_res_of ( struct omap_sham_dev * dd ,
struct device * dev , struct resource * res )
2010-05-03 07:10:59 +04:00
{
2012-12-21 21:04:06 +04:00
struct device_node * node = dev - > of_node ;
int err = 0 ;
2010-05-03 07:10:59 +04:00
2017-09-20 21:42:48 +03:00
dd - > pdata = of_device_get_match_data ( dev ) ;
if ( ! dd - > pdata ) {
2012-12-21 21:04:06 +04:00
dev_err ( dev , " no compatible OF match \n " ) ;
err = - EINVAL ;
goto err ;
2010-11-19 17:04:24 +03:00
}
2012-12-21 21:04:06 +04:00
err = of_address_to_resource ( node , 0 , res ) ;
if ( err < 0 ) {
dev_err ( dev , " can't translate OF node address \n " ) ;
err = - EINVAL ;
goto err ;
}
2013-09-18 17:24:44 +04:00
dd - > irq = irq_of_parse_and_map ( node , 0 ) ;
2012-12-21 21:04:06 +04:00
if ( ! dd - > irq ) {
dev_err ( dev , " can't translate OF irq value \n " ) ;
err = - EINVAL ;
goto err ;
}
err :
return err ;
2010-05-03 07:10:59 +04:00
}
2012-12-21 21:04:06 +04:00
# else
2013-01-16 00:53:02 +04:00
static const struct of_device_id omap_sham_of_match [ ] = {
{ } ,
} ;
2010-05-03 07:10:59 +04:00
2013-01-16 00:53:02 +04:00
static int omap_sham_get_res_of ( struct omap_sham_dev * dd ,
2012-12-21 21:04:06 +04:00
struct device * dev , struct resource * res )
2010-05-03 07:10:59 +04:00
{
2012-12-21 21:04:06 +04:00
return - EINVAL ;
}
# endif
2010-05-03 07:10:59 +04:00
2012-12-21 21:04:06 +04:00
static int omap_sham_get_res_pdev ( struct omap_sham_dev * dd ,
struct platform_device * pdev , struct resource * res )
{
struct device * dev = & pdev - > dev ;
struct resource * r ;
int err = 0 ;
2010-05-03 07:10:59 +04:00
2012-12-21 21:04:06 +04:00
/* Get the base address */
r = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
if ( ! r ) {
dev_err ( dev , " no MEM resource info \n " ) ;
err = - ENODEV ;
goto err ;
2010-05-03 07:10:59 +04:00
}
2012-12-21 21:04:06 +04:00
memcpy ( res , r , sizeof ( * res ) ) ;
2010-09-03 15:20:19 +04:00
2012-12-21 21:04:06 +04:00
/* Get the IRQ */
dd - > irq = platform_get_irq ( pdev , 0 ) ;
if ( dd - > irq < 0 ) {
err = dd - > irq ;
goto err ;
}
2010-05-03 07:10:59 +04:00
2012-12-21 21:04:08 +04:00
/* Only OMAP2/3 can be non-DT */
dd - > pdata = & omap_sham_pdata_omap2 ;
2012-12-21 21:04:06 +04:00
err :
return err ;
2010-05-03 07:10:59 +04:00
}
2018-02-27 16:30:36 +03:00
static ssize_t fallback_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct omap_sham_dev * dd = dev_get_drvdata ( dev ) ;
return sprintf ( buf , " %d \n " , dd - > fallback_sz ) ;
}
static ssize_t fallback_store ( struct device * dev , struct device_attribute * attr ,
const char * buf , size_t size )
{
struct omap_sham_dev * dd = dev_get_drvdata ( dev ) ;
ssize_t status ;
long value ;
status = kstrtol ( buf , 0 , & value ) ;
if ( status )
return status ;
/* HW accelerator only works with buffers > 9 */
if ( value < 9 ) {
dev_err ( dev , " minimum fallback size 9 \n " ) ;
return - EINVAL ;
}
dd - > fallback_sz = value ;
return size ;
}
2018-02-27 16:30:37 +03:00
static ssize_t queue_len_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct omap_sham_dev * dd = dev_get_drvdata ( dev ) ;
return sprintf ( buf , " %d \n " , dd - > queue . max_qlen ) ;
}
static ssize_t queue_len_store ( struct device * dev ,
struct device_attribute * attr , const char * buf ,
size_t size )
{
struct omap_sham_dev * dd = dev_get_drvdata ( dev ) ;
ssize_t status ;
long value ;
unsigned long flags ;
status = kstrtol ( buf , 0 , & value ) ;
if ( status )
return status ;
if ( value < 1 )
return - EINVAL ;
/*
* Changing the queue size in fly is safe , if size becomes smaller
* than current size , it will just not accept new entries until
* it has shrank enough .
*/
spin_lock_irqsave ( & dd - > lock , flags ) ;
dd - > queue . max_qlen = value ;
spin_unlock_irqrestore ( & dd - > lock , flags ) ;
return size ;
}
static DEVICE_ATTR_RW ( queue_len ) ;
2018-02-27 16:30:36 +03:00
static DEVICE_ATTR_RW ( fallback ) ;
static struct attribute * omap_sham_attrs [ ] = {
2018-02-27 16:30:37 +03:00
& dev_attr_queue_len . attr ,
2018-02-27 16:30:36 +03:00
& dev_attr_fallback . attr ,
NULL ,
} ;
static struct attribute_group omap_sham_attr_group = {
. attrs = omap_sham_attrs ,
} ;
2012-12-22 01:14:09 +04:00
static int omap_sham_probe ( struct platform_device * pdev )
2010-05-03 07:10:59 +04:00
{
struct omap_sham_dev * dd ;
struct device * dev = & pdev - > dev ;
2012-12-21 21:04:06 +04:00
struct resource res ;
2012-12-21 21:04:04 +04:00
dma_cap_mask_t mask ;
2010-05-03 07:10:59 +04:00
int err , i , j ;
2012-12-21 21:04:08 +04:00
u32 rev ;
2010-05-03 07:10:59 +04:00
2013-07-26 10:59:17 +04:00
dd = devm_kzalloc ( dev , sizeof ( struct omap_sham_dev ) , GFP_KERNEL ) ;
2010-05-03 07:10:59 +04:00
if ( dd = = NULL ) {
dev_err ( dev , " unable to alloc data struct. \n " ) ;
err = - ENOMEM ;
goto data_err ;
}
dd - > dev = dev ;
platform_set_drvdata ( pdev , dd ) ;
INIT_LIST_HEAD ( & dd - > list ) ;
spin_lock_init ( & dd - > lock ) ;
tasklet_init ( & dd - > done_task , omap_sham_done_task , ( unsigned long ) dd ) ;
crypto_init_queue ( & dd - > queue , OMAP_SHAM_QUEUE_LENGTH ) ;
2012-12-21 21:04:06 +04:00
err = ( dev - > of_node ) ? omap_sham_get_res_of ( dd , dev , & res ) :
omap_sham_get_res_pdev ( dd , pdev , & res ) ;
if ( err )
2013-07-26 10:59:17 +04:00
goto data_err ;
2010-05-03 07:10:59 +04:00
2013-05-02 16:00:38 +04:00
dd - > io_base = devm_ioremap_resource ( dev , & res ) ;
if ( IS_ERR ( dd - > io_base ) ) {
err = PTR_ERR ( dd - > io_base ) ;
2013-07-26 10:59:17 +04:00
goto data_err ;
2010-05-03 07:10:59 +04:00
}
2012-12-21 21:04:06 +04:00
dd - > phys_base = res . start ;
2010-05-03 07:10:59 +04:00
2013-07-26 10:59:16 +04:00
err = devm_request_irq ( dev , dd - > irq , dd - > pdata - > intr_hdlr ,
IRQF_TRIGGER_NONE , dev_name ( dev ) , dd ) ;
2010-05-03 07:10:59 +04:00
if ( err ) {
2013-07-26 10:59:16 +04:00
dev_err ( dev , " unable to request irq %d, err = %d \n " ,
dd - > irq , err ) ;
2013-07-26 10:59:17 +04:00
goto data_err ;
2010-05-03 07:10:59 +04:00
}
2012-12-21 21:04:04 +04:00
dma_cap_zero ( mask ) ;
dma_cap_set ( DMA_SLAVE , mask ) ;
2010-05-03 07:10:59 +04:00
2016-04-29 16:03:41 +03:00
dd - > dma_lch = dma_request_chan ( dev , " rx " ) ;
if ( IS_ERR ( dd - > dma_lch ) ) {
err = PTR_ERR ( dd - > dma_lch ) ;
if ( err = = - EPROBE_DEFER )
goto data_err ;
2013-08-20 19:02:34 +04:00
dd - > polling_mode = 1 ;
dev_dbg ( dev , " using polling mode instead of dma \n " ) ;
2010-05-03 07:10:59 +04:00
}
2012-12-21 21:04:08 +04:00
dd - > flags | = dd - > pdata - > flags ;
2010-05-03 07:10:59 +04:00
2016-06-22 16:23:34 +03:00
pm_runtime_use_autosuspend ( dev ) ;
pm_runtime_set_autosuspend_delay ( dev , DEFAULT_AUTOSUSPEND_DELAY ) ;
2018-02-27 16:30:36 +03:00
dd - > fallback_sz = OMAP_SHA_DMA_THRESHOLD ;
2012-12-21 21:04:02 +04:00
pm_runtime_enable ( dev ) ;
2015-03-31 07:22:24 +03:00
pm_runtime_irq_safe ( dev ) ;
2015-03-08 13:01:01 +03:00
err = pm_runtime_get_sync ( dev ) ;
if ( err < 0 ) {
dev_err ( dev , " failed to get sync: %d \n " , err ) ;
goto err_pm ;
}
2012-12-21 21:04:08 +04:00
rev = omap_sham_read ( dd , SHA_REG_REV ( dd ) ) ;
pm_runtime_put_sync ( & pdev - > dev ) ;
2010-05-03 07:10:59 +04:00
dev_info ( dev , " hw accel on OMAP rev %u.%u \n " ,
2012-12-21 21:04:08 +04:00
( rev & dd - > pdata - > major_mask ) > > dd - > pdata - > major_shift ,
( rev & dd - > pdata - > minor_mask ) > > dd - > pdata - > minor_shift ) ;
2010-05-03 07:10:59 +04:00
spin_lock ( & sham . lock ) ;
list_add_tail ( & dd - > list , & sham . dev_list ) ;
spin_unlock ( & sham . lock ) ;
2012-12-21 21:04:09 +04:00
for ( i = 0 ; i < dd - > pdata - > algs_info_size ; i + + ) {
for ( j = 0 ; j < dd - > pdata - > algs_info [ i ] . size ; j + + ) {
2016-09-19 18:22:12 +03:00
struct ahash_alg * alg ;
alg = & dd - > pdata - > algs_info [ i ] . algs_list [ j ] ;
alg - > export = omap_sham_export ;
alg - > import = omap_sham_import ;
2016-09-19 18:22:18 +03:00
alg - > halg . statesize = sizeof ( struct omap_sham_reqctx ) +
BUFLEN ;
2016-09-19 18:22:12 +03:00
err = crypto_register_ahash ( alg ) ;
2012-12-21 21:04:09 +04:00
if ( err )
goto err_algs ;
dd - > pdata - > algs_info [ i ] . registered + + ;
}
2010-05-03 07:10:59 +04:00
}
2018-02-27 16:30:36 +03:00
err = sysfs_create_group ( & dev - > kobj , & omap_sham_attr_group ) ;
if ( err ) {
dev_err ( dev , " could not create sysfs device attrs \n " ) ;
goto err_algs ;
}
2010-05-03 07:10:59 +04:00
return 0 ;
err_algs :
2012-12-21 21:04:09 +04:00
for ( i = dd - > pdata - > algs_info_size - 1 ; i > = 0 ; i - - )
for ( j = dd - > pdata - > algs_info [ i ] . registered - 1 ; j > = 0 ; j - - )
crypto_unregister_ahash (
& dd - > pdata - > algs_info [ i ] . algs_list [ j ] ) ;
2015-03-08 13:01:01 +03:00
err_pm :
2012-12-21 21:04:02 +04:00
pm_runtime_disable ( dev ) ;
2016-05-18 13:39:05 +03:00
if ( ! dd - > polling_mode )
2013-11-13 00:12:27 +04:00
dma_release_channel ( dd - > dma_lch ) ;
2010-05-03 07:10:59 +04:00
data_err :
dev_err ( dev , " initialization failed. \n " ) ;
return err ;
}
2012-12-22 01:14:09 +04:00
static int omap_sham_remove ( struct platform_device * pdev )
2010-05-03 07:10:59 +04:00
{
2017-07-19 02:03:11 +03:00
struct omap_sham_dev * dd ;
2012-12-21 21:04:09 +04:00
int i , j ;
2010-05-03 07:10:59 +04:00
dd = platform_get_drvdata ( pdev ) ;
if ( ! dd )
return - ENODEV ;
spin_lock ( & sham . lock ) ;
list_del ( & dd - > list ) ;
spin_unlock ( & sham . lock ) ;
2012-12-21 21:04:09 +04:00
for ( i = dd - > pdata - > algs_info_size - 1 ; i > = 0 ; i - - )
for ( j = dd - > pdata - > algs_info [ i ] . registered - 1 ; j > = 0 ; j - - )
crypto_unregister_ahash (
& dd - > pdata - > algs_info [ i ] . algs_list [ j ] ) ;
2010-05-03 07:10:59 +04:00
tasklet_kill ( & dd - > done_task ) ;
2012-12-21 21:04:02 +04:00
pm_runtime_disable ( & pdev - > dev ) ;
2013-11-13 00:12:27 +04:00
2016-04-29 16:03:41 +03:00
if ( ! dd - > polling_mode )
2013-11-13 00:12:27 +04:00
dma_release_channel ( dd - > dma_lch ) ;
2010-05-03 07:10:59 +04:00
return 0 ;
}
2012-12-21 21:04:03 +04:00
# ifdef CONFIG_PM_SLEEP
static int omap_sham_suspend ( struct device * dev )
{
pm_runtime_put_sync ( dev ) ;
return 0 ;
}
static int omap_sham_resume ( struct device * dev )
{
2015-03-08 13:01:01 +03:00
int err = pm_runtime_get_sync ( dev ) ;
if ( err < 0 ) {
dev_err ( dev , " failed to get sync: %d \n " , err ) ;
return err ;
}
2012-12-21 21:04:03 +04:00
return 0 ;
}
# endif
2014-02-27 15:33:32 +04:00
static SIMPLE_DEV_PM_OPS ( omap_sham_pm_ops , omap_sham_suspend , omap_sham_resume ) ;
2012-12-21 21:04:03 +04:00
2010-05-03 07:10:59 +04:00
static struct platform_driver omap_sham_driver = {
. probe = omap_sham_probe ,
. remove = omap_sham_remove ,
. driver = {
. name = " omap-sham " ,
2012-12-21 21:04:03 +04:00
. pm = & omap_sham_pm_ops ,
2012-12-21 21:04:06 +04:00
. of_match_table = omap_sham_of_match ,
2010-05-03 07:10:59 +04:00
} ,
} ;
2013-03-04 13:39:43 +04:00
module_platform_driver ( omap_sham_driver ) ;
2010-05-03 07:10:59 +04:00
MODULE_DESCRIPTION ( " OMAP SHA1/MD5 hw acceleration support. " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
MODULE_AUTHOR ( " Dmitry Kasatkin " ) ;
2013-10-27 01:00:41 +04:00
MODULE_ALIAS ( " platform:omap-sham " ) ;