2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2013-03-01 15:37:53 +04:00
/*
* Cryptographic API .
*
* Support for SAHARA cryptographic accelerator .
*
2014-12-01 15:26:34 +03:00
* Copyright ( c ) 2014 Steffen Trumtrar < s . trumtrar @ pengutronix . de >
2013-03-01 15:37:53 +04:00
* Copyright ( c ) 2013 Vista Silicon S . L .
* Author : Javier Martin < javier . martin @ vista - silicon . com >
*
* Based on omap - aes . c and tegra - aes . c
*/
# include <crypto/aes.h>
2014-12-01 15:26:34 +03:00
# include <crypto/internal/hash.h>
2016-06-29 13:04:05 +03:00
# include <crypto/internal/skcipher.h>
2014-12-01 15:26:34 +03:00
# include <crypto/scatterwalk.h>
# include <crypto/sha.h>
2013-03-01 15:37:53 +04:00
# include <linux/clk.h>
# include <linux/crypto.h>
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/irq.h>
# include <linux/kernel.h>
2014-12-01 15:26:33 +03:00
# include <linux/kthread.h>
2013-03-01 15:37:53 +04:00
# include <linux/module.h>
2014-12-01 15:26:33 +03:00
# include <linux/mutex.h>
2013-03-01 15:37:53 +04:00
# include <linux/of.h>
2014-12-01 15:26:32 +03:00
# include <linux/of_device.h>
2013-03-01 15:37:53 +04:00
# include <linux/platform_device.h>
2014-12-01 15:26:34 +03:00
# define SHA_BUFFER_LEN PAGE_SIZE
# define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
2013-03-01 15:37:53 +04:00
# define SAHARA_NAME "sahara"
# define SAHARA_VERSION_3 3
2014-12-01 15:26:32 +03:00
# define SAHARA_VERSION_4 4
2013-03-01 15:37:53 +04:00
# define SAHARA_TIMEOUT_MS 1000
# define SAHARA_MAX_HW_DESC 2
# define SAHARA_MAX_HW_LINK 20
# define FLAGS_MODE_MASK 0x000f
# define FLAGS_ENCRYPT BIT(0)
# define FLAGS_CBC BIT(1)
# define FLAGS_NEW_KEY BIT(3)
# define SAHARA_HDR_BASE 0x00800000
# define SAHARA_HDR_SKHA_ALG_AES 0
# define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
# define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
# define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
# define SAHARA_HDR_FORM_DATA (5 << 16)
# define SAHARA_HDR_FORM_KEY (8 << 16)
# define SAHARA_HDR_LLO (1 << 24)
# define SAHARA_HDR_CHA_SKHA (1 << 28)
# define SAHARA_HDR_CHA_MDHA (2 << 28)
# define SAHARA_HDR_PARITY_BIT (1 << 31)
2014-12-01 15:26:34 +03:00
# define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
# define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
# define SAHARA_HDR_MDHA_HASH 0xA0850000
# define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
# define SAHARA_HDR_MDHA_ALG_SHA1 0
# define SAHARA_HDR_MDHA_ALG_MD5 1
# define SAHARA_HDR_MDHA_ALG_SHA256 2
# define SAHARA_HDR_MDHA_ALG_SHA224 3
# define SAHARA_HDR_MDHA_PDATA (1 << 2)
# define SAHARA_HDR_MDHA_HMAC (1 << 3)
# define SAHARA_HDR_MDHA_INIT (1 << 5)
# define SAHARA_HDR_MDHA_IPAD (1 << 6)
# define SAHARA_HDR_MDHA_OPAD (1 << 7)
# define SAHARA_HDR_MDHA_SWAP (1 << 8)
# define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
# define SAHARA_HDR_MDHA_SSL (1 << 10)
2013-03-01 15:37:53 +04:00
/* SAHARA can only process one request at a time */
# define SAHARA_QUEUE_LENGTH 1
# define SAHARA_REG_VERSION 0x00
# define SAHARA_REG_DAR 0x04
# define SAHARA_REG_CONTROL 0x08
# define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
# define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
# define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
# define SAHARA_CONTROL_ENABLE_INT (1 << 4)
# define SAHARA_REG_CMD 0x0C
# define SAHARA_CMD_RESET (1 << 0)
# define SAHARA_CMD_CLEAR_INT (1 << 8)
# define SAHARA_CMD_CLEAR_ERR (1 << 9)
# define SAHARA_CMD_SINGLE_STEP (1 << 10)
# define SAHARA_CMD_MODE_BATCH (1 << 16)
# define SAHARA_CMD_MODE_DEBUG (1 << 18)
# define SAHARA_REG_STATUS 0x10
# define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
# define SAHARA_STATE_IDLE 0
# define SAHARA_STATE_BUSY 1
# define SAHARA_STATE_ERR 2
# define SAHARA_STATE_FAULT 3
# define SAHARA_STATE_COMPLETE 4
# define SAHARA_STATE_COMP_FLAG (1 << 2)
# define SAHARA_STATUS_DAR_FULL (1 << 3)
# define SAHARA_STATUS_ERROR (1 << 4)
# define SAHARA_STATUS_SECURE (1 << 5)
# define SAHARA_STATUS_FAIL (1 << 6)
# define SAHARA_STATUS_INIT (1 << 7)
# define SAHARA_STATUS_RNG_RESEED (1 << 8)
# define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
# define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
# define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
# define SAHARA_STATUS_MODE_BATCH (1 << 16)
# define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
# define SAHARA_STATUS_MODE_DEBUG (1 << 18)
# define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
# define SAHARA_REG_ERRSTATUS 0x14
# define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
# define SAHARA_ERRSOURCE_CHA 14
# define SAHARA_ERRSOURCE_DMA 15
# define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
# define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
# define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
# define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
# define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
# define SAHARA_REG_FADDR 0x18
# define SAHARA_REG_CDAR 0x1C
# define SAHARA_REG_IDAR 0x20
struct sahara_hw_desc {
2015-12-08 18:23:51 +03:00
u32 hdr ;
u32 len1 ;
u32 p1 ;
u32 len2 ;
u32 p2 ;
u32 next ;
2013-03-01 15:37:53 +04:00
} ;
struct sahara_hw_link {
2015-12-08 18:23:51 +03:00
u32 len ;
u32 p ;
u32 next ;
2013-03-01 15:37:53 +04:00
} ;
struct sahara_ctx {
unsigned long flags ;
2014-12-01 15:26:34 +03:00
/* AES-specific context */
2013-03-01 15:37:53 +04:00
int keylen ;
u8 key [ AES_KEYSIZE_128 ] ;
2020-07-07 09:32:02 +03:00
struct crypto_skcipher * fallback ;
2013-03-01 15:37:53 +04:00
} ;
struct sahara_aes_reqctx {
unsigned long mode ;
2020-07-07 09:32:02 +03:00
struct skcipher_request fallback_req ; // keep at the end
2013-03-01 15:37:53 +04:00
} ;
2014-12-01 15:26:34 +03:00
/*
* struct sahara_sha_reqctx - private data per request
* @ buf : holds data for requests smaller than block_size
* @ rembuf : used to prepare one block_size - aligned request
* @ context : hw - specific context for request . Digest is extracted from this
* @ mode : specifies what type of hw - descriptor needs to be built
* @ digest_size : length of digest for this request
* @ context_size : length of hw - context for this request .
* Always digest_size + 4
* @ buf_cnt : number of bytes saved in buf
* @ sg_in_idx : number of hw links
* @ in_sg : scatterlist for input data
* @ in_sg_chain : scatterlists for chained input data
* @ total : total number of bytes for transfer
* @ last : is this the last block
* @ first : is this the first block
* @ active : inside a transfer
*/
struct sahara_sha_reqctx {
u8 buf [ SAHARA_MAX_SHA_BLOCK_SIZE ] ;
u8 rembuf [ SAHARA_MAX_SHA_BLOCK_SIZE ] ;
u8 context [ SHA256_DIGEST_SIZE + 4 ] ;
unsigned int mode ;
unsigned int digest_size ;
unsigned int context_size ;
unsigned int buf_cnt ;
unsigned int sg_in_idx ;
struct scatterlist * in_sg ;
struct scatterlist in_sg_chain [ 2 ] ;
size_t total ;
unsigned int last ;
unsigned int first ;
unsigned int active ;
} ;
2013-03-01 15:37:53 +04:00
struct sahara_dev {
struct device * device ;
2014-12-01 15:26:32 +03:00
unsigned int version ;
2013-03-01 15:37:53 +04:00
void __iomem * regs_base ;
struct clk * clk_ipg ;
struct clk * clk_ahb ;
2014-12-01 15:26:33 +03:00
struct mutex queue_mutex ;
struct task_struct * kthread ;
struct completion dma_completion ;
2013-03-01 15:37:53 +04:00
struct sahara_ctx * ctx ;
struct crypto_queue queue ;
unsigned long flags ;
struct sahara_hw_desc * hw_desc [ SAHARA_MAX_HW_DESC ] ;
dma_addr_t hw_phys_desc [ SAHARA_MAX_HW_DESC ] ;
u8 * key_base ;
dma_addr_t key_phys_base ;
u8 * iv_base ;
dma_addr_t iv_phys_base ;
2014-12-01 15:26:34 +03:00
u8 * context_base ;
dma_addr_t context_phys_base ;
2013-03-01 15:37:53 +04:00
struct sahara_hw_link * hw_link [ SAHARA_MAX_HW_LINK ] ;
dma_addr_t hw_phys_link [ SAHARA_MAX_HW_LINK ] ;
size_t total ;
struct scatterlist * in_sg ;
2015-11-19 15:38:17 +03:00
int nb_in_sg ;
2013-03-01 15:37:53 +04:00
struct scatterlist * out_sg ;
2015-11-19 15:38:17 +03:00
int nb_out_sg ;
2013-03-01 15:37:53 +04:00
u32 error ;
} ;
static struct sahara_dev * dev_ptr ;
static inline void sahara_write ( struct sahara_dev * dev , u32 data , u32 reg )
{
writel ( data , dev - > regs_base + reg ) ;
}
static inline unsigned int sahara_read ( struct sahara_dev * dev , u32 reg )
{
return readl ( dev - > regs_base + reg ) ;
}
static u32 sahara_aes_key_hdr ( struct sahara_dev * dev )
{
u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT ;
if ( dev - > flags & FLAGS_CBC ) {
hdr | = SAHARA_HDR_SKHA_MODE_CBC ;
hdr ^ = SAHARA_HDR_PARITY_BIT ;
}
if ( dev - > flags & FLAGS_ENCRYPT ) {
hdr | = SAHARA_HDR_SKHA_OP_ENC ;
hdr ^ = SAHARA_HDR_PARITY_BIT ;
}
return hdr ;
}
static u32 sahara_aes_data_link_hdr ( struct sahara_dev * dev )
{
return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT ;
}
2015-10-14 22:14:19 +03:00
static const char * sahara_err_src [ 16 ] = {
2013-03-01 15:37:53 +04:00
" No error " ,
" Header error " ,
" Descriptor length error " ,
" Descriptor length or pointer error " ,
" Link length error " ,
" Link pointer error " ,
" Input buffer error " ,
" Output buffer error " ,
" Output buffer starvation " ,
" Internal state fault " ,
" General descriptor problem " ,
" Reserved " ,
" Descriptor address error " ,
" Link address error " ,
" CHA error " ,
" DMA error "
} ;
2015-10-14 22:14:19 +03:00
static const char * sahara_err_dmasize [ 4 ] = {
2013-03-01 15:37:53 +04:00
" Byte transfer " ,
" Half-word transfer " ,
" Word transfer " ,
" Reserved "
} ;
2015-10-14 22:14:19 +03:00
static const char * sahara_err_dmasrc [ 8 ] = {
2013-03-01 15:37:53 +04:00
" No error " ,
" AHB bus error " ,
" Internal IP bus error " ,
" Parity error " ,
" DMA crosses 256 byte boundary " ,
" DMA is busy " ,
" Reserved " ,
" DMA HW error "
} ;
2015-10-14 22:14:19 +03:00
static const char * sahara_cha_errsrc [ 12 ] = {
2013-03-01 15:37:53 +04:00
" Input buffer non-empty " ,
" Illegal address " ,
" Illegal mode " ,
" Illegal data size " ,
" Illegal key size " ,
" Write during processing " ,
" CTX read during processing " ,
" HW error " ,
" Input buffer disabled/underflow " ,
" Output buffer disabled/overflow " ,
" DES key parity error " ,
" Reserved "
} ;
2015-10-14 22:14:19 +03:00
static const char * sahara_cha_err [ 4 ] = { " No error " , " SKHA " , " MDHA " , " RNG " } ;
2013-03-01 15:37:53 +04:00
static void sahara_decode_error ( struct sahara_dev * dev , unsigned int error )
{
u8 source = SAHARA_ERRSTATUS_GET_SOURCE ( error ) ;
u16 chasrc = ffs ( SAHARA_ERRSTATUS_GET_CHASRC ( error ) ) ;
dev_err ( dev - > device , " %s: Error Register = 0x%08x \n " , __func__ , error ) ;
dev_err ( dev - > device , " - %s. \n " , sahara_err_src [ source ] ) ;
if ( source = = SAHARA_ERRSOURCE_DMA ) {
if ( error & SAHARA_ERRSTATUS_DMA_DIR )
dev_err ( dev - > device , " * DMA read. \n " ) ;
else
dev_err ( dev - > device , " * DMA write. \n " ) ;
dev_err ( dev - > device , " * %s. \n " ,
sahara_err_dmasize [ SAHARA_ERRSTATUS_GET_DMASZ ( error ) ] ) ;
dev_err ( dev - > device , " * %s. \n " ,
sahara_err_dmasrc [ SAHARA_ERRSTATUS_GET_DMASRC ( error ) ] ) ;
} else if ( source = = SAHARA_ERRSOURCE_CHA ) {
dev_err ( dev - > device , " * %s. \n " ,
sahara_cha_errsrc [ chasrc ] ) ;
dev_err ( dev - > device , " * %s. \n " ,
sahara_cha_err [ SAHARA_ERRSTATUS_GET_CHAERR ( error ) ] ) ;
}
dev_err ( dev - > device , " \n " ) ;
}
2015-10-14 22:14:19 +03:00
static const char * sahara_state [ 4 ] = { " Idle " , " Busy " , " Error " , " HW Fault " } ;
2013-03-01 15:37:53 +04:00
static void sahara_decode_status ( struct sahara_dev * dev , unsigned int status )
{
u8 state ;
2019-04-09 19:33:13 +03:00
if ( ! __is_defined ( DEBUG ) )
2013-03-01 15:37:53 +04:00
return ;
state = SAHARA_STATUS_GET_STATE ( status ) ;
dev_dbg ( dev - > device , " %s: Status Register = 0x%08x \n " ,
__func__ , status ) ;
dev_dbg ( dev - > device , " - State = %d: \n " , state ) ;
if ( state & SAHARA_STATE_COMP_FLAG )
dev_dbg ( dev - > device , " * Descriptor completed. IRQ pending. \n " ) ;
dev_dbg ( dev - > device , " * %s. \n " ,
sahara_state [ state & ~ SAHARA_STATE_COMP_FLAG ] ) ;
if ( status & SAHARA_STATUS_DAR_FULL )
dev_dbg ( dev - > device , " - DAR Full. \n " ) ;
if ( status & SAHARA_STATUS_ERROR )
dev_dbg ( dev - > device , " - Error. \n " ) ;
if ( status & SAHARA_STATUS_SECURE )
dev_dbg ( dev - > device , " - Secure. \n " ) ;
if ( status & SAHARA_STATUS_FAIL )
dev_dbg ( dev - > device , " - Fail. \n " ) ;
if ( status & SAHARA_STATUS_RNG_RESEED )
dev_dbg ( dev - > device , " - RNG Reseed Request. \n " ) ;
if ( status & SAHARA_STATUS_ACTIVE_RNG )
dev_dbg ( dev - > device , " - RNG Active. \n " ) ;
if ( status & SAHARA_STATUS_ACTIVE_MDHA )
dev_dbg ( dev - > device , " - MDHA Active. \n " ) ;
if ( status & SAHARA_STATUS_ACTIVE_SKHA )
dev_dbg ( dev - > device , " - SKHA Active. \n " ) ;
if ( status & SAHARA_STATUS_MODE_BATCH )
dev_dbg ( dev - > device , " - Batch Mode. \n " ) ;
else if ( status & SAHARA_STATUS_MODE_DEDICATED )
2016-10-25 14:07:27 +03:00
dev_dbg ( dev - > device , " - Dedicated Mode. \n " ) ;
2013-03-01 15:37:53 +04:00
else if ( status & SAHARA_STATUS_MODE_DEBUG )
dev_dbg ( dev - > device , " - Debug Mode. \n " ) ;
dev_dbg ( dev - > device , " - Internal state = 0x%02x \n " ,
SAHARA_STATUS_GET_ISTATE ( status ) ) ;
dev_dbg ( dev - > device , " Current DAR: 0x%08x \n " ,
sahara_read ( dev , SAHARA_REG_CDAR ) ) ;
dev_dbg ( dev - > device , " Initial DAR: 0x%08x \n \n " ,
sahara_read ( dev , SAHARA_REG_IDAR ) ) ;
}
static void sahara_dump_descriptors ( struct sahara_dev * dev )
{
int i ;
2019-04-09 19:33:13 +03:00
if ( ! __is_defined ( DEBUG ) )
2013-03-01 15:37:53 +04:00
return ;
for ( i = 0 ; i < SAHARA_MAX_HW_DESC ; i + + ) {
2015-12-08 18:24:22 +03:00
dev_dbg ( dev - > device , " Descriptor (%d) (%pad): \n " ,
i , & dev - > hw_phys_desc [ i ] ) ;
2013-03-01 15:37:53 +04:00
dev_dbg ( dev - > device , " \t hdr = 0x%08x \n " , dev - > hw_desc [ i ] - > hdr ) ;
dev_dbg ( dev - > device , " \t len1 = %u \n " , dev - > hw_desc [ i ] - > len1 ) ;
dev_dbg ( dev - > device , " \t p1 = 0x%08x \n " , dev - > hw_desc [ i ] - > p1 ) ;
dev_dbg ( dev - > device , " \t len2 = %u \n " , dev - > hw_desc [ i ] - > len2 ) ;
dev_dbg ( dev - > device , " \t p2 = 0x%08x \n " , dev - > hw_desc [ i ] - > p2 ) ;
dev_dbg ( dev - > device , " \t next = 0x%08x \n " ,
dev - > hw_desc [ i ] - > next ) ;
}
dev_dbg ( dev - > device , " \n " ) ;
}
static void sahara_dump_links ( struct sahara_dev * dev )
{
int i ;
2019-04-09 19:33:13 +03:00
if ( ! __is_defined ( DEBUG ) )
2013-03-01 15:37:53 +04:00
return ;
for ( i = 0 ; i < SAHARA_MAX_HW_LINK ; i + + ) {
2015-12-08 18:24:22 +03:00
dev_dbg ( dev - > device , " Link (%d) (%pad): \n " ,
i , & dev - > hw_phys_link [ i ] ) ;
2013-03-01 15:37:53 +04:00
dev_dbg ( dev - > device , " \t len = %u \n " , dev - > hw_link [ i ] - > len ) ;
dev_dbg ( dev - > device , " \t p = 0x%08x \n " , dev - > hw_link [ i ] - > p ) ;
dev_dbg ( dev - > device , " \t next = 0x%08x \n " ,
dev - > hw_link [ i ] - > next ) ;
}
dev_dbg ( dev - > device , " \n " ) ;
}
static int sahara_hw_descriptor_create ( struct sahara_dev * dev )
{
struct sahara_ctx * ctx = dev - > ctx ;
struct scatterlist * sg ;
int ret ;
int i , j ;
2015-04-07 18:13:42 +03:00
int idx = 0 ;
2013-03-01 15:37:53 +04:00
/* Copy new key if necessary */
if ( ctx - > flags & FLAGS_NEW_KEY ) {
memcpy ( dev - > key_base , ctx - > key , ctx - > keylen ) ;
ctx - > flags & = ~ FLAGS_NEW_KEY ;
if ( dev - > flags & FLAGS_CBC ) {
2015-04-07 18:13:42 +03:00
dev - > hw_desc [ idx ] - > len1 = AES_BLOCK_SIZE ;
dev - > hw_desc [ idx ] - > p1 = dev - > iv_phys_base ;
2013-03-01 15:37:53 +04:00
} else {
2015-04-07 18:13:42 +03:00
dev - > hw_desc [ idx ] - > len1 = 0 ;
dev - > hw_desc [ idx ] - > p1 = 0 ;
2013-03-01 15:37:53 +04:00
}
2015-04-07 18:13:42 +03:00
dev - > hw_desc [ idx ] - > len2 = ctx - > keylen ;
dev - > hw_desc [ idx ] - > p2 = dev - > key_phys_base ;
dev - > hw_desc [ idx ] - > next = dev - > hw_phys_desc [ 1 ] ;
dev - > hw_desc [ idx ] - > hdr = sahara_aes_key_hdr ( dev ) ;
idx + + ;
2013-03-01 15:37:53 +04:00
}
2015-09-18 15:57:11 +03:00
dev - > nb_in_sg = sg_nents_for_len ( dev - > in_sg , dev - > total ) ;
2015-11-04 23:13:35 +03:00
if ( dev - > nb_in_sg < 0 ) {
dev_err ( dev - > device , " Invalid numbers of src SG. \n " ) ;
return dev - > nb_in_sg ;
}
2015-09-18 15:57:11 +03:00
dev - > nb_out_sg = sg_nents_for_len ( dev - > out_sg , dev - > total ) ;
2015-11-04 23:13:35 +03:00
if ( dev - > nb_out_sg < 0 ) {
dev_err ( dev - > device , " Invalid numbers of dst SG. \n " ) ;
return dev - > nb_out_sg ;
}
2013-03-01 15:37:53 +04:00
if ( ( dev - > nb_in_sg + dev - > nb_out_sg ) > SAHARA_MAX_HW_LINK ) {
dev_err ( dev - > device , " not enough hw links (%d) \n " ,
dev - > nb_in_sg + dev - > nb_out_sg ) ;
return - EINVAL ;
}
ret = dma_map_sg ( dev - > device , dev - > in_sg , dev - > nb_in_sg ,
DMA_TO_DEVICE ) ;
if ( ret ! = dev - > nb_in_sg ) {
dev_err ( dev - > device , " couldn't map in sg \n " ) ;
goto unmap_in ;
}
ret = dma_map_sg ( dev - > device , dev - > out_sg , dev - > nb_out_sg ,
DMA_FROM_DEVICE ) ;
if ( ret ! = dev - > nb_out_sg ) {
dev_err ( dev - > device , " couldn't map out sg \n " ) ;
goto unmap_out ;
}
/* Create input links */
2015-04-07 18:13:42 +03:00
dev - > hw_desc [ idx ] - > p1 = dev - > hw_phys_link [ 0 ] ;
2013-03-01 15:37:53 +04:00
sg = dev - > in_sg ;
for ( i = 0 ; i < dev - > nb_in_sg ; i + + ) {
dev - > hw_link [ i ] - > len = sg - > length ;
dev - > hw_link [ i ] - > p = sg - > dma_address ;
if ( i = = ( dev - > nb_in_sg - 1 ) ) {
dev - > hw_link [ i ] - > next = 0 ;
} else {
dev - > hw_link [ i ] - > next = dev - > hw_phys_link [ i + 1 ] ;
sg = sg_next ( sg ) ;
}
}
/* Create output links */
2015-04-07 18:13:42 +03:00
dev - > hw_desc [ idx ] - > p2 = dev - > hw_phys_link [ i ] ;
2013-03-01 15:37:53 +04:00
sg = dev - > out_sg ;
for ( j = i ; j < dev - > nb_out_sg + i ; j + + ) {
dev - > hw_link [ j ] - > len = sg - > length ;
dev - > hw_link [ j ] - > p = sg - > dma_address ;
if ( j = = ( dev - > nb_out_sg + i - 1 ) ) {
dev - > hw_link [ j ] - > next = 0 ;
} else {
dev - > hw_link [ j ] - > next = dev - > hw_phys_link [ j + 1 ] ;
sg = sg_next ( sg ) ;
}
}
/* Fill remaining fields of hw_desc[1] */
2015-04-07 18:13:42 +03:00
dev - > hw_desc [ idx ] - > hdr = sahara_aes_data_link_hdr ( dev ) ;
dev - > hw_desc [ idx ] - > len1 = dev - > total ;
dev - > hw_desc [ idx ] - > len2 = dev - > total ;
dev - > hw_desc [ idx ] - > next = 0 ;
2013-03-01 15:37:53 +04:00
sahara_dump_descriptors ( dev ) ;
sahara_dump_links ( dev ) ;
sahara_write ( dev , dev - > hw_phys_desc [ 0 ] , SAHARA_REG_DAR ) ;
return 0 ;
unmap_out :
dma_unmap_sg ( dev - > device , dev - > out_sg , dev - > nb_out_sg ,
2017-08-03 16:34:12 +03:00
DMA_FROM_DEVICE ) ;
2013-03-01 15:37:53 +04:00
unmap_in :
dma_unmap_sg ( dev - > device , dev - > in_sg , dev - > nb_in_sg ,
2017-08-03 16:34:12 +03:00
DMA_TO_DEVICE ) ;
2013-03-01 15:37:53 +04:00
return - EINVAL ;
}
2019-11-09 20:09:43 +03:00
static int sahara_aes_process ( struct skcipher_request * req )
2013-03-01 15:37:53 +04:00
{
2014-12-01 15:26:33 +03:00
struct sahara_dev * dev = dev_ptr ;
2013-03-01 15:37:53 +04:00
struct sahara_ctx * ctx ;
struct sahara_aes_reqctx * rctx ;
int ret ;
2015-02-07 14:17:13 +03:00
unsigned long timeout ;
2013-03-01 15:37:53 +04:00
/* Request is ready to be dispatched by the device */
dev_dbg ( dev - > device ,
" dispatch request (nbytes=%d, src=%p, dst=%p) \n " ,
2019-11-09 20:09:43 +03:00
req - > cryptlen , req - > src , req - > dst ) ;
2013-03-01 15:37:53 +04:00
/* assign new request to device */
2019-11-09 20:09:43 +03:00
dev - > total = req - > cryptlen ;
2013-03-01 15:37:53 +04:00
dev - > in_sg = req - > src ;
dev - > out_sg = req - > dst ;
2019-11-09 20:09:43 +03:00
rctx = skcipher_request_ctx ( req ) ;
ctx = crypto_skcipher_ctx ( crypto_skcipher_reqtfm ( req ) ) ;
2013-03-01 15:37:53 +04:00
rctx - > mode & = FLAGS_MODE_MASK ;
dev - > flags = ( dev - > flags & ~ FLAGS_MODE_MASK ) | rctx - > mode ;
2019-11-09 20:09:43 +03:00
if ( ( dev - > flags & FLAGS_CBC ) & & req - > iv )
memcpy ( dev - > iv_base , req - > iv , AES_KEYSIZE_128 ) ;
2013-03-01 15:37:53 +04:00
/* assign new context to device */
dev - > ctx = ctx ;
2014-12-01 15:26:33 +03:00
reinit_completion ( & dev - > dma_completion ) ;
2013-03-01 15:37:53 +04:00
ret = sahara_hw_descriptor_create ( dev ) ;
2015-02-07 14:27:45 +03:00
if ( ret )
return - EINVAL ;
2014-12-01 15:26:33 +03:00
2015-02-07 14:17:13 +03:00
timeout = wait_for_completion_timeout ( & dev - > dma_completion ,
2014-12-01 15:26:33 +03:00
msecs_to_jiffies ( SAHARA_TIMEOUT_MS ) ) ;
2015-02-07 14:17:13 +03:00
if ( ! timeout ) {
2014-12-01 15:26:33 +03:00
dev_err ( dev - > device , " AES timeout \n " ) ;
return - ETIMEDOUT ;
2013-03-01 15:37:53 +04:00
}
2014-12-01 15:26:33 +03:00
dma_unmap_sg ( dev - > device , dev - > out_sg , dev - > nb_out_sg ,
DMA_FROM_DEVICE ) ;
2017-08-03 16:34:12 +03:00
dma_unmap_sg ( dev - > device , dev - > in_sg , dev - > nb_in_sg ,
DMA_TO_DEVICE ) ;
2014-12-01 15:26:33 +03:00
return 0 ;
2013-03-01 15:37:53 +04:00
}
2019-11-09 20:09:43 +03:00
static int sahara_aes_setkey ( struct crypto_skcipher * tfm , const u8 * key ,
2013-03-01 15:37:53 +04:00
unsigned int keylen )
{
2019-11-09 20:09:43 +03:00
struct sahara_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2013-03-01 15:37:53 +04:00
ctx - > keylen = keylen ;
/* SAHARA only supports 128bit keys */
if ( keylen = = AES_KEYSIZE_128 ) {
memcpy ( ctx - > key , key , keylen ) ;
ctx - > flags | = FLAGS_NEW_KEY ;
return 0 ;
}
2016-06-29 13:04:05 +03:00
if ( keylen ! = AES_KEYSIZE_192 & & keylen ! = AES_KEYSIZE_256 )
2013-03-01 15:37:53 +04:00
return - EINVAL ;
/*
* The requested key size is not supported by HW , do a fallback .
*/
2020-07-07 09:32:02 +03:00
crypto_skcipher_clear_flags ( ctx - > fallback , CRYPTO_TFM_REQ_MASK ) ;
crypto_skcipher_set_flags ( ctx - > fallback , tfm - > base . crt_flags &
2016-06-29 13:04:05 +03:00
CRYPTO_TFM_REQ_MASK ) ;
2020-07-07 09:32:02 +03:00
return crypto_skcipher_setkey ( ctx - > fallback , key , keylen ) ;
2013-03-01 15:37:53 +04:00
}
2019-11-09 20:09:43 +03:00
static int sahara_aes_crypt ( struct skcipher_request * req , unsigned long mode )
2013-03-01 15:37:53 +04:00
{
2019-11-09 20:09:43 +03:00
struct sahara_aes_reqctx * rctx = skcipher_request_ctx ( req ) ;
2013-03-01 15:37:53 +04:00
struct sahara_dev * dev = dev_ptr ;
int err = 0 ;
dev_dbg ( dev - > device , " nbytes: %d, enc: %d, cbc: %d \n " ,
2019-11-09 20:09:43 +03:00
req - > cryptlen , ! ! ( mode & FLAGS_ENCRYPT ) , ! ! ( mode & FLAGS_CBC ) ) ;
2013-03-01 15:37:53 +04:00
2019-11-09 20:09:43 +03:00
if ( ! IS_ALIGNED ( req - > cryptlen , AES_BLOCK_SIZE ) ) {
2013-03-01 15:37:53 +04:00
dev_err ( dev - > device ,
" request size is not exact amount of AES blocks \n " ) ;
return - EINVAL ;
}
rctx - > mode = mode ;
2014-12-01 15:26:33 +03:00
mutex_lock ( & dev - > queue_mutex ) ;
2019-11-09 20:09:43 +03:00
err = crypto_enqueue_request ( & dev - > queue , & req - > base ) ;
2014-12-01 15:26:33 +03:00
mutex_unlock ( & dev - > queue_mutex ) ;
2013-03-01 15:37:53 +04:00
2014-12-01 15:26:33 +03:00
wake_up_process ( dev - > kthread ) ;
2013-03-01 15:37:53 +04:00
return err ;
}
2019-11-09 20:09:43 +03:00
static int sahara_aes_ecb_encrypt ( struct skcipher_request * req )
2013-03-01 15:37:53 +04:00
{
2020-07-07 09:32:02 +03:00
struct sahara_aes_reqctx * rctx = skcipher_request_ctx ( req ) ;
2019-11-09 20:09:43 +03:00
struct sahara_ctx * ctx = crypto_skcipher_ctx (
crypto_skcipher_reqtfm ( req ) ) ;
2013-03-01 15:37:53 +04:00
if ( unlikely ( ctx - > keylen ! = AES_KEYSIZE_128 ) ) {
2020-07-07 09:32:02 +03:00
skcipher_request_set_tfm ( & rctx - > fallback_req , ctx - > fallback ) ;
skcipher_request_set_callback ( & rctx - > fallback_req ,
req - > base . flags ,
req - > base . complete ,
req - > base . data ) ;
skcipher_request_set_crypt ( & rctx - > fallback_req , req - > src ,
req - > dst , req - > cryptlen , req - > iv ) ;
return crypto_skcipher_encrypt ( & rctx - > fallback_req ) ;
2013-03-01 15:37:53 +04:00
}
return sahara_aes_crypt ( req , FLAGS_ENCRYPT ) ;
}
2019-11-09 20:09:43 +03:00
static int sahara_aes_ecb_decrypt ( struct skcipher_request * req )
2013-03-01 15:37:53 +04:00
{
2020-07-07 09:32:02 +03:00
struct sahara_aes_reqctx * rctx = skcipher_request_ctx ( req ) ;
2019-11-09 20:09:43 +03:00
struct sahara_ctx * ctx = crypto_skcipher_ctx (
crypto_skcipher_reqtfm ( req ) ) ;
2013-03-01 15:37:53 +04:00
if ( unlikely ( ctx - > keylen ! = AES_KEYSIZE_128 ) ) {
2020-07-07 09:32:02 +03:00
skcipher_request_set_tfm ( & rctx - > fallback_req , ctx - > fallback ) ;
skcipher_request_set_callback ( & rctx - > fallback_req ,
req - > base . flags ,
req - > base . complete ,
req - > base . data ) ;
skcipher_request_set_crypt ( & rctx - > fallback_req , req - > src ,
req - > dst , req - > cryptlen , req - > iv ) ;
return crypto_skcipher_decrypt ( & rctx - > fallback_req ) ;
2013-03-01 15:37:53 +04:00
}
return sahara_aes_crypt ( req , 0 ) ;
}
2019-11-09 20:09:43 +03:00
static int sahara_aes_cbc_encrypt ( struct skcipher_request * req )
2013-03-01 15:37:53 +04:00
{
2020-07-07 09:32:02 +03:00
struct sahara_aes_reqctx * rctx = skcipher_request_ctx ( req ) ;
2019-11-09 20:09:43 +03:00
struct sahara_ctx * ctx = crypto_skcipher_ctx (
crypto_skcipher_reqtfm ( req ) ) ;
2013-03-01 15:37:53 +04:00
if ( unlikely ( ctx - > keylen ! = AES_KEYSIZE_128 ) ) {
2020-07-07 09:32:02 +03:00
skcipher_request_set_tfm ( & rctx - > fallback_req , ctx - > fallback ) ;
skcipher_request_set_callback ( & rctx - > fallback_req ,
req - > base . flags ,
req - > base . complete ,
req - > base . data ) ;
skcipher_request_set_crypt ( & rctx - > fallback_req , req - > src ,
req - > dst , req - > cryptlen , req - > iv ) ;
return crypto_skcipher_encrypt ( & rctx - > fallback_req ) ;
2013-03-01 15:37:53 +04:00
}
return sahara_aes_crypt ( req , FLAGS_ENCRYPT | FLAGS_CBC ) ;
}
2019-11-09 20:09:43 +03:00
static int sahara_aes_cbc_decrypt ( struct skcipher_request * req )
2013-03-01 15:37:53 +04:00
{
2020-07-07 09:32:02 +03:00
struct sahara_aes_reqctx * rctx = skcipher_request_ctx ( req ) ;
2019-11-09 20:09:43 +03:00
struct sahara_ctx * ctx = crypto_skcipher_ctx (
crypto_skcipher_reqtfm ( req ) ) ;
2013-03-01 15:37:53 +04:00
if ( unlikely ( ctx - > keylen ! = AES_KEYSIZE_128 ) ) {
2020-07-07 09:32:02 +03:00
skcipher_request_set_tfm ( & rctx - > fallback_req , ctx - > fallback ) ;
skcipher_request_set_callback ( & rctx - > fallback_req ,
req - > base . flags ,
req - > base . complete ,
req - > base . data ) ;
skcipher_request_set_crypt ( & rctx - > fallback_req , req - > src ,
req - > dst , req - > cryptlen , req - > iv ) ;
return crypto_skcipher_decrypt ( & rctx - > fallback_req ) ;
2013-03-01 15:37:53 +04:00
}
return sahara_aes_crypt ( req , FLAGS_CBC ) ;
}
2019-11-09 20:09:43 +03:00
static int sahara_aes_init_tfm ( struct crypto_skcipher * tfm )
2013-03-01 15:37:53 +04:00
{
2019-11-09 20:09:43 +03:00
const char * name = crypto_tfm_alg_name ( & tfm - > base ) ;
struct sahara_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2013-03-01 15:37:53 +04:00
2020-07-07 09:32:02 +03:00
ctx - > fallback = crypto_alloc_skcipher ( name , 0 ,
2016-06-29 13:04:05 +03:00
CRYPTO_ALG_NEED_FALLBACK ) ;
2013-03-01 15:37:53 +04:00
if ( IS_ERR ( ctx - > fallback ) ) {
pr_err ( " Error allocating fallback algo %s \n " , name ) ;
return PTR_ERR ( ctx - > fallback ) ;
}
2020-07-07 09:32:02 +03:00
crypto_skcipher_set_reqsize ( tfm , sizeof ( struct sahara_aes_reqctx ) +
crypto_skcipher_reqsize ( ctx - > fallback ) ) ;
2013-03-01 15:37:53 +04:00
return 0 ;
}
2019-11-09 20:09:43 +03:00
static void sahara_aes_exit_tfm ( struct crypto_skcipher * tfm )
2013-03-01 15:37:53 +04:00
{
2019-11-09 20:09:43 +03:00
struct sahara_ctx * ctx = crypto_skcipher_ctx ( tfm ) ;
2013-03-01 15:37:53 +04:00
2020-07-07 09:32:02 +03:00
crypto_free_skcipher ( ctx - > fallback ) ;
2013-03-01 15:37:53 +04:00
}
2014-12-01 15:26:34 +03:00
static u32 sahara_sha_init_hdr ( struct sahara_dev * dev ,
struct sahara_sha_reqctx * rctx )
{
u32 hdr = 0 ;
hdr = rctx - > mode ;
if ( rctx - > first ) {
hdr | = SAHARA_HDR_MDHA_SET_MODE_HASH ;
hdr | = SAHARA_HDR_MDHA_INIT ;
} else {
hdr | = SAHARA_HDR_MDHA_SET_MODE_MD_KEY ;
}
if ( rctx - > last )
hdr | = SAHARA_HDR_MDHA_PDATA ;
if ( hweight_long ( hdr ) % 2 = = 0 )
hdr | = SAHARA_HDR_PARITY_BIT ;
return hdr ;
}
static int sahara_sha_hw_links_create ( struct sahara_dev * dev ,
struct sahara_sha_reqctx * rctx ,
int start )
{
struct scatterlist * sg ;
unsigned int i ;
int ret ;
dev - > in_sg = rctx - > in_sg ;
2015-09-18 15:57:11 +03:00
dev - > nb_in_sg = sg_nents_for_len ( dev - > in_sg , rctx - > total ) ;
2015-11-04 23:13:35 +03:00
if ( dev - > nb_in_sg < 0 ) {
dev_err ( dev - > device , " Invalid numbers of src SG. \n " ) ;
return dev - > nb_in_sg ;
}
2014-12-01 15:26:34 +03:00
if ( ( dev - > nb_in_sg ) > SAHARA_MAX_HW_LINK ) {
dev_err ( dev - > device , " not enough hw links (%d) \n " ,
dev - > nb_in_sg + dev - > nb_out_sg ) ;
return - EINVAL ;
}
2015-09-23 14:55:28 +03:00
sg = dev - > in_sg ;
ret = dma_map_sg ( dev - > device , dev - > in_sg , dev - > nb_in_sg , DMA_TO_DEVICE ) ;
if ( ! ret )
return - EFAULT ;
for ( i = start ; i < dev - > nb_in_sg + start ; i + + ) {
dev - > hw_link [ i ] - > len = sg - > length ;
dev - > hw_link [ i ] - > p = sg - > dma_address ;
if ( i = = ( dev - > nb_in_sg + start - 1 ) ) {
dev - > hw_link [ i ] - > next = 0 ;
} else {
2014-12-01 15:26:34 +03:00
dev - > hw_link [ i ] - > next = dev - > hw_phys_link [ i + 1 ] ;
sg = sg_next ( sg ) ;
}
}
return i ;
}
static int sahara_sha_hw_data_descriptor_create ( struct sahara_dev * dev ,
struct sahara_sha_reqctx * rctx ,
struct ahash_request * req ,
int index )
{
unsigned result_len ;
int i = index ;
if ( rctx - > first )
/* Create initial descriptor: #8*/
dev - > hw_desc [ index ] - > hdr = sahara_sha_init_hdr ( dev , rctx ) ;
else
/* Create hash descriptor: #10. Must follow #6. */
dev - > hw_desc [ index ] - > hdr = SAHARA_HDR_MDHA_HASH ;
dev - > hw_desc [ index ] - > len1 = rctx - > total ;
if ( dev - > hw_desc [ index ] - > len1 = = 0 ) {
/* if len1 is 0, p1 must be 0, too */
dev - > hw_desc [ index ] - > p1 = 0 ;
rctx - > sg_in_idx = 0 ;
} else {
/* Create input links */
dev - > hw_desc [ index ] - > p1 = dev - > hw_phys_link [ index ] ;
i = sahara_sha_hw_links_create ( dev , rctx , index ) ;
rctx - > sg_in_idx = index ;
if ( i < 0 )
return i ;
}
dev - > hw_desc [ index ] - > p2 = dev - > hw_phys_link [ i ] ;
/* Save the context for the next operation */
result_len = rctx - > context_size ;
dev - > hw_link [ i ] - > p = dev - > context_phys_base ;
dev - > hw_link [ i ] - > len = result_len ;
dev - > hw_desc [ index ] - > len2 = result_len ;
dev - > hw_link [ i ] - > next = 0 ;
return 0 ;
}
/*
* Load descriptor aka # 6
*
* To load a previously saved context back to the MDHA unit
*
* p1 : Saved Context
* p2 : NULL
*
*/
static int sahara_sha_hw_context_descriptor_create ( struct sahara_dev * dev ,
struct sahara_sha_reqctx * rctx ,
struct ahash_request * req ,
int index )
{
dev - > hw_desc [ index ] - > hdr = sahara_sha_init_hdr ( dev , rctx ) ;
dev - > hw_desc [ index ] - > len1 = rctx - > context_size ;
dev - > hw_desc [ index ] - > p1 = dev - > hw_phys_link [ index ] ;
dev - > hw_desc [ index ] - > len2 = 0 ;
dev - > hw_desc [ index ] - > p2 = 0 ;
dev - > hw_link [ index ] - > len = rctx - > context_size ;
dev - > hw_link [ index ] - > p = dev - > context_phys_base ;
dev - > hw_link [ index ] - > next = 0 ;
return 0 ;
}
static int sahara_walk_and_recalc ( struct scatterlist * sg , unsigned int nbytes )
{
if ( ! sg | | ! sg - > length )
return nbytes ;
while ( nbytes & & sg ) {
if ( nbytes < = sg - > length ) {
sg - > length = nbytes ;
sg_mark_end ( sg ) ;
break ;
}
nbytes - = sg - > length ;
2015-01-20 11:06:16 +03:00
sg = sg_next ( sg ) ;
2014-12-01 15:26:34 +03:00
}
return nbytes ;
}
static int sahara_sha_prepare_request ( struct ahash_request * req )
{
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct sahara_sha_reqctx * rctx = ahash_request_ctx ( req ) ;
unsigned int hash_later ;
unsigned int block_size ;
unsigned int len ;
block_size = crypto_tfm_alg_blocksize ( crypto_ahash_tfm ( tfm ) ) ;
/* append bytes from previous operation */
len = rctx - > buf_cnt + req - > nbytes ;
/* only the last transfer can be padded in hardware */
if ( ! rctx - > last & & ( len < block_size ) ) {
/* to few data, save for next operation */
scatterwalk_map_and_copy ( rctx - > buf + rctx - > buf_cnt , req - > src ,
0 , req - > nbytes , 0 ) ;
rctx - > buf_cnt + = req - > nbytes ;
return 0 ;
}
/* add data from previous operation first */
if ( rctx - > buf_cnt )
memcpy ( rctx - > rembuf , rctx - > buf , rctx - > buf_cnt ) ;
/* data must always be a multiple of block_size */
hash_later = rctx - > last ? 0 : len & ( block_size - 1 ) ;
if ( hash_later ) {
unsigned int offset = req - > nbytes - hash_later ;
/* Save remaining bytes for later use */
scatterwalk_map_and_copy ( rctx - > buf , req - > src , offset ,
hash_later , 0 ) ;
}
/* nbytes should now be multiple of blocksize */
req - > nbytes = req - > nbytes - hash_later ;
sahara_walk_and_recalc ( req - > src , req - > nbytes ) ;
/* have data from previous operation and current */
if ( rctx - > buf_cnt & & req - > nbytes ) {
sg_init_table ( rctx - > in_sg_chain , 2 ) ;
sg_set_buf ( rctx - > in_sg_chain , rctx - > rembuf , rctx - > buf_cnt ) ;
2015-08-07 19:15:13 +03:00
sg_chain ( rctx - > in_sg_chain , 2 , req - > src ) ;
2014-12-01 15:26:34 +03:00
rctx - > total = req - > nbytes + rctx - > buf_cnt ;
rctx - > in_sg = rctx - > in_sg_chain ;
req - > src = rctx - > in_sg_chain ;
/* only data from previous operation */
} else if ( rctx - > buf_cnt ) {
if ( req - > src )
rctx - > in_sg = req - > src ;
else
rctx - > in_sg = rctx - > in_sg_chain ;
/* buf was copied into rembuf above */
sg_init_one ( rctx - > in_sg , rctx - > rembuf , rctx - > buf_cnt ) ;
rctx - > total = rctx - > buf_cnt ;
/* no data from previous operation */
} else {
rctx - > in_sg = req - > src ;
rctx - > total = req - > nbytes ;
req - > src = rctx - > in_sg ;
}
/* on next call, we only have the remaining data in the buffer */
rctx - > buf_cnt = hash_later ;
return - EINPROGRESS ;
}
static int sahara_sha_process ( struct ahash_request * req )
{
struct sahara_dev * dev = dev_ptr ;
struct sahara_sha_reqctx * rctx = ahash_request_ctx ( req ) ;
2015-02-07 14:16:46 +03:00
int ret ;
2015-02-07 14:17:13 +03:00
unsigned long timeout ;
2014-12-01 15:26:34 +03:00
ret = sahara_sha_prepare_request ( req ) ;
if ( ! ret )
return ret ;
if ( rctx - > first ) {
sahara_sha_hw_data_descriptor_create ( dev , rctx , req , 0 ) ;
dev - > hw_desc [ 0 ] - > next = 0 ;
rctx - > first = 0 ;
} else {
memcpy ( dev - > context_base , rctx - > context , rctx - > context_size ) ;
sahara_sha_hw_context_descriptor_create ( dev , rctx , req , 0 ) ;
dev - > hw_desc [ 0 ] - > next = dev - > hw_phys_desc [ 1 ] ;
sahara_sha_hw_data_descriptor_create ( dev , rctx , req , 1 ) ;
dev - > hw_desc [ 1 ] - > next = 0 ;
}
sahara_dump_descriptors ( dev ) ;
sahara_dump_links ( dev ) ;
reinit_completion ( & dev - > dma_completion ) ;
sahara_write ( dev , dev - > hw_phys_desc [ 0 ] , SAHARA_REG_DAR ) ;
2015-02-07 14:17:13 +03:00
timeout = wait_for_completion_timeout ( & dev - > dma_completion ,
2014-12-01 15:26:34 +03:00
msecs_to_jiffies ( SAHARA_TIMEOUT_MS ) ) ;
2015-02-07 14:17:13 +03:00
if ( ! timeout ) {
2014-12-01 15:26:34 +03:00
dev_err ( dev - > device , " SHA timeout \n " ) ;
return - ETIMEDOUT ;
}
if ( rctx - > sg_in_idx )
2015-09-23 14:55:28 +03:00
dma_unmap_sg ( dev - > device , dev - > in_sg , dev - > nb_in_sg ,
DMA_TO_DEVICE ) ;
2014-12-01 15:26:34 +03:00
memcpy ( rctx - > context , dev - > context_base , rctx - > context_size ) ;
if ( req - > result )
memcpy ( req - > result , rctx - > context , rctx - > digest_size ) ;
return 0 ;
}
2014-12-01 15:26:33 +03:00
static int sahara_queue_manage ( void * data )
{
struct sahara_dev * dev = ( struct sahara_dev * ) data ;
struct crypto_async_request * async_req ;
2015-04-07 18:13:41 +03:00
struct crypto_async_request * backlog ;
2014-12-01 15:26:33 +03:00
int ret = 0 ;
do {
__set_current_state ( TASK_INTERRUPTIBLE ) ;
mutex_lock ( & dev - > queue_mutex ) ;
2015-04-07 18:13:41 +03:00
backlog = crypto_get_backlog ( & dev - > queue ) ;
2014-12-01 15:26:33 +03:00
async_req = crypto_dequeue_request ( & dev - > queue ) ;
mutex_unlock ( & dev - > queue_mutex ) ;
2015-04-07 18:13:41 +03:00
if ( backlog )
backlog - > complete ( backlog , - EINPROGRESS ) ;
2014-12-01 15:26:33 +03:00
if ( async_req ) {
2014-12-01 15:26:34 +03:00
if ( crypto_tfm_alg_type ( async_req - > tfm ) = =
CRYPTO_ALG_TYPE_AHASH ) {
struct ahash_request * req =
ahash_request_cast ( async_req ) ;
ret = sahara_sha_process ( req ) ;
} else {
2019-11-09 20:09:43 +03:00
struct skcipher_request * req =
skcipher_request_cast ( async_req ) ;
2014-12-01 15:26:33 +03:00
2014-12-01 15:26:34 +03:00
ret = sahara_aes_process ( req ) ;
}
2014-12-01 15:26:33 +03:00
async_req - > complete ( async_req , ret ) ;
continue ;
}
schedule ( ) ;
} while ( ! kthread_should_stop ( ) ) ;
return 0 ;
}
2014-12-01 15:26:34 +03:00
static int sahara_sha_enqueue ( struct ahash_request * req , int last )
{
struct sahara_sha_reqctx * rctx = ahash_request_ctx ( req ) ;
struct sahara_dev * dev = dev_ptr ;
int ret ;
if ( ! req - > nbytes & & ! last )
return 0 ;
rctx - > last = last ;
if ( ! rctx - > active ) {
rctx - > active = 1 ;
rctx - > first = 1 ;
}
mutex_lock ( & dev - > queue_mutex ) ;
ret = crypto_enqueue_request ( & dev - > queue , & req - > base ) ;
mutex_unlock ( & dev - > queue_mutex ) ;
wake_up_process ( dev - > kthread ) ;
return ret ;
}
static int sahara_sha_init ( struct ahash_request * req )
{
struct crypto_ahash * tfm = crypto_ahash_reqtfm ( req ) ;
struct sahara_sha_reqctx * rctx = ahash_request_ctx ( req ) ;
memset ( rctx , 0 , sizeof ( * rctx ) ) ;
switch ( crypto_ahash_digestsize ( tfm ) ) {
case SHA1_DIGEST_SIZE :
rctx - > mode | = SAHARA_HDR_MDHA_ALG_SHA1 ;
rctx - > digest_size = SHA1_DIGEST_SIZE ;
break ;
case SHA256_DIGEST_SIZE :
rctx - > mode | = SAHARA_HDR_MDHA_ALG_SHA256 ;
rctx - > digest_size = SHA256_DIGEST_SIZE ;
break ;
default :
return - EINVAL ;
}
rctx - > context_size = rctx - > digest_size + 4 ;
rctx - > active = 0 ;
return 0 ;
}
static int sahara_sha_update ( struct ahash_request * req )
{
return sahara_sha_enqueue ( req , 0 ) ;
}
static int sahara_sha_final ( struct ahash_request * req )
{
req - > nbytes = 0 ;
return sahara_sha_enqueue ( req , 1 ) ;
}
static int sahara_sha_finup ( struct ahash_request * req )
{
return sahara_sha_enqueue ( req , 1 ) ;
}
static int sahara_sha_digest ( struct ahash_request * req )
{
sahara_sha_init ( req ) ;
return sahara_sha_finup ( req ) ;
}
static int sahara_sha_export ( struct ahash_request * req , void * out )
{
struct sahara_sha_reqctx * rctx = ahash_request_ctx ( req ) ;
2016-02-03 15:46:51 +03:00
memcpy ( out , rctx , sizeof ( struct sahara_sha_reqctx ) ) ;
2014-12-01 15:26:34 +03:00
return 0 ;
}
static int sahara_sha_import ( struct ahash_request * req , const void * in )
{
struct sahara_sha_reqctx * rctx = ahash_request_ctx ( req ) ;
2016-02-03 15:46:51 +03:00
memcpy ( rctx , in , sizeof ( struct sahara_sha_reqctx ) ) ;
2014-12-01 15:26:34 +03:00
return 0 ;
}
static int sahara_sha_cra_init ( struct crypto_tfm * tfm )
{
crypto_ahash_set_reqsize ( __crypto_ahash_cast ( tfm ) ,
sizeof ( struct sahara_sha_reqctx ) +
SHA_BUFFER_LEN + SHA256_BLOCK_SIZE ) ;
return 0 ;
}
2019-11-09 20:09:43 +03:00
static struct skcipher_alg aes_algs [ ] = {
2013-03-01 15:37:53 +04:00
{
2019-11-09 20:09:43 +03:00
. base . cra_name = " ecb(aes) " ,
. base . cra_driver_name = " sahara-ecb-aes " ,
. base . cra_priority = 300 ,
. base . cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK ,
. base . cra_blocksize = AES_BLOCK_SIZE ,
. base . cra_ctxsize = sizeof ( struct sahara_ctx ) ,
. base . cra_alignmask = 0x0 ,
. base . cra_module = THIS_MODULE ,
. init = sahara_aes_init_tfm ,
. exit = sahara_aes_exit_tfm ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. setkey = sahara_aes_setkey ,
. encrypt = sahara_aes_ecb_encrypt ,
. decrypt = sahara_aes_ecb_decrypt ,
2013-03-01 15:37:53 +04:00
} , {
2019-11-09 20:09:43 +03:00
. base . cra_name = " cbc(aes) " ,
. base . cra_driver_name = " sahara-cbc-aes " ,
. base . cra_priority = 300 ,
. base . cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK ,
. base . cra_blocksize = AES_BLOCK_SIZE ,
. base . cra_ctxsize = sizeof ( struct sahara_ctx ) ,
. base . cra_alignmask = 0x0 ,
. base . cra_module = THIS_MODULE ,
. init = sahara_aes_init_tfm ,
. exit = sahara_aes_exit_tfm ,
. min_keysize = AES_MIN_KEY_SIZE ,
. max_keysize = AES_MAX_KEY_SIZE ,
. ivsize = AES_BLOCK_SIZE ,
. setkey = sahara_aes_setkey ,
. encrypt = sahara_aes_cbc_encrypt ,
. decrypt = sahara_aes_cbc_decrypt ,
2013-03-01 15:37:53 +04:00
}
} ;
2014-12-01 15:26:34 +03:00
static struct ahash_alg sha_v3_algs [ ] = {
{
. init = sahara_sha_init ,
. update = sahara_sha_update ,
. final = sahara_sha_final ,
. finup = sahara_sha_finup ,
. digest = sahara_sha_digest ,
. export = sahara_sha_export ,
. import = sahara_sha_import ,
. halg . digestsize = SHA1_DIGEST_SIZE ,
2016-02-03 15:46:52 +03:00
. halg . statesize = sizeof ( struct sahara_sha_reqctx ) ,
2014-12-01 15:26:34 +03:00
. halg . base = {
. cra_name = " sha1 " ,
. cra_driver_name = " sahara-sha1 " ,
. cra_priority = 300 ,
2018-07-01 01:16:12 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2014-12-01 15:26:34 +03:00
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = SHA1_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct sahara_ctx ) ,
. cra_alignmask = 0 ,
. cra_module = THIS_MODULE ,
. cra_init = sahara_sha_cra_init ,
}
} ,
} ;
static struct ahash_alg sha_v4_algs [ ] = {
{
. init = sahara_sha_init ,
. update = sahara_sha_update ,
. final = sahara_sha_final ,
. finup = sahara_sha_finup ,
. digest = sahara_sha_digest ,
. export = sahara_sha_export ,
. import = sahara_sha_import ,
. halg . digestsize = SHA256_DIGEST_SIZE ,
2016-02-03 15:46:52 +03:00
. halg . statesize = sizeof ( struct sahara_sha_reqctx ) ,
2014-12-01 15:26:34 +03:00
. halg . base = {
. cra_name = " sha256 " ,
. cra_driver_name = " sahara-sha256 " ,
. cra_priority = 300 ,
2018-07-01 01:16:12 +03:00
. cra_flags = CRYPTO_ALG_ASYNC |
2014-12-01 15:26:34 +03:00
CRYPTO_ALG_NEED_FALLBACK ,
. cra_blocksize = SHA256_BLOCK_SIZE ,
. cra_ctxsize = sizeof ( struct sahara_ctx ) ,
. cra_alignmask = 0 ,
. cra_module = THIS_MODULE ,
. cra_init = sahara_sha_cra_init ,
}
} ,
} ;
2013-03-01 15:37:53 +04:00
static irqreturn_t sahara_irq_handler ( int irq , void * data )
{
struct sahara_dev * dev = ( struct sahara_dev * ) data ;
unsigned int stat = sahara_read ( dev , SAHARA_REG_STATUS ) ;
unsigned int err = sahara_read ( dev , SAHARA_REG_ERRSTATUS ) ;
sahara_write ( dev , SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR ,
SAHARA_REG_CMD ) ;
sahara_decode_status ( dev , stat ) ;
if ( SAHARA_STATUS_GET_STATE ( stat ) = = SAHARA_STATE_BUSY ) {
return IRQ_NONE ;
} else if ( SAHARA_STATUS_GET_STATE ( stat ) = = SAHARA_STATE_COMPLETE ) {
dev - > error = 0 ;
} else {
sahara_decode_error ( dev , err ) ;
dev - > error = - EINVAL ;
}
2014-12-01 15:26:33 +03:00
complete ( & dev - > dma_completion ) ;
2013-03-01 15:37:53 +04:00
return IRQ_HANDLED ;
}
static int sahara_register_algs ( struct sahara_dev * dev )
{
2014-12-01 15:26:34 +03:00
int err ;
unsigned int i , j , k , l ;
2013-03-01 15:37:53 +04:00
for ( i = 0 ; i < ARRAY_SIZE ( aes_algs ) ; i + + ) {
2019-11-09 20:09:43 +03:00
err = crypto_register_skcipher ( & aes_algs [ i ] ) ;
2013-03-01 15:37:53 +04:00
if ( err )
goto err_aes_algs ;
}
2014-12-01 15:26:34 +03:00
for ( k = 0 ; k < ARRAY_SIZE ( sha_v3_algs ) ; k + + ) {
err = crypto_register_ahash ( & sha_v3_algs [ k ] ) ;
if ( err )
goto err_sha_v3_algs ;
}
if ( dev - > version > SAHARA_VERSION_3 )
for ( l = 0 ; l < ARRAY_SIZE ( sha_v4_algs ) ; l + + ) {
err = crypto_register_ahash ( & sha_v4_algs [ l ] ) ;
if ( err )
goto err_sha_v4_algs ;
}
2013-03-01 15:37:53 +04:00
return 0 ;
2014-12-01 15:26:34 +03:00
err_sha_v4_algs :
for ( j = 0 ; j < l ; j + + )
crypto_unregister_ahash ( & sha_v4_algs [ j ] ) ;
err_sha_v3_algs :
for ( j = 0 ; j < k ; j + + )
2018-07-15 01:27:06 +03:00
crypto_unregister_ahash ( & sha_v3_algs [ j ] ) ;
2014-12-01 15:26:34 +03:00
2013-03-01 15:37:53 +04:00
err_aes_algs :
for ( j = 0 ; j < i ; j + + )
2019-11-09 20:09:43 +03:00
crypto_unregister_skcipher ( & aes_algs [ j ] ) ;
2013-03-01 15:37:53 +04:00
return err ;
}
static void sahara_unregister_algs ( struct sahara_dev * dev )
{
2014-12-01 15:26:34 +03:00
unsigned int i ;
2013-03-01 15:37:53 +04:00
for ( i = 0 ; i < ARRAY_SIZE ( aes_algs ) ; i + + )
2019-11-09 20:09:43 +03:00
crypto_unregister_skcipher ( & aes_algs [ i ] ) ;
2014-12-01 15:26:34 +03:00
2018-07-15 01:27:06 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( sha_v3_algs ) ; i + + )
2014-12-01 15:26:34 +03:00
crypto_unregister_ahash ( & sha_v3_algs [ i ] ) ;
if ( dev - > version > SAHARA_VERSION_3 )
for ( i = 0 ; i < ARRAY_SIZE ( sha_v4_algs ) ; i + + )
crypto_unregister_ahash ( & sha_v4_algs [ i ] ) ;
2013-03-01 15:37:53 +04:00
}
2017-08-13 12:04:00 +03:00
static const struct platform_device_id sahara_platform_ids [ ] = {
2013-03-01 15:37:53 +04:00
{ . name = " sahara-imx27 " } ,
{ /* sentinel */ }
} ;
MODULE_DEVICE_TABLE ( platform , sahara_platform_ids ) ;
2017-06-27 14:41:23 +03:00
static const struct of_device_id sahara_dt_ids [ ] = {
2014-12-01 15:26:32 +03:00
{ . compatible = " fsl,imx53-sahara " } ,
2013-03-01 15:37:53 +04:00
{ . compatible = " fsl,imx27-sahara " } ,
{ /* sentinel */ }
} ;
2013-06-04 01:57:37 +04:00
MODULE_DEVICE_TABLE ( of , sahara_dt_ids ) ;
2013-03-01 15:37:53 +04:00
static int sahara_probe ( struct platform_device * pdev )
{
struct sahara_dev * dev ;
u32 version ;
int irq ;
int err ;
int i ;
2018-02-14 16:14:05 +03:00
dev = devm_kzalloc ( & pdev - > dev , sizeof ( * dev ) , GFP_KERNEL ) ;
2018-02-14 16:10:03 +03:00
if ( ! dev )
2013-03-01 15:37:53 +04:00
return - ENOMEM ;
dev - > device = & pdev - > dev ;
platform_set_drvdata ( pdev , dev ) ;
/* Get the base address */
2019-06-06 19:13:49 +03:00
dev - > regs_base = devm_platform_ioremap_resource ( pdev , 0 ) ;
2014-02-12 08:23:37 +04:00
if ( IS_ERR ( dev - > regs_base ) )
return PTR_ERR ( dev - > regs_base ) ;
2013-03-01 15:37:53 +04:00
/* Get the IRQ */
irq = platform_get_irq ( pdev , 0 ) ;
2019-07-30 21:15:05 +03:00
if ( irq < 0 )
2013-03-01 15:37:53 +04:00
return irq ;
2014-03-10 16:13:32 +04:00
err = devm_request_irq ( & pdev - > dev , irq , sahara_irq_handler ,
0 , dev_name ( & pdev - > dev ) , dev ) ;
if ( err ) {
2013-03-01 15:37:53 +04:00
dev_err ( & pdev - > dev , " failed to request irq \n " ) ;
2014-03-10 16:13:32 +04:00
return err ;
2013-03-01 15:37:53 +04:00
}
/* clocks */
dev - > clk_ipg = devm_clk_get ( & pdev - > dev , " ipg " ) ;
if ( IS_ERR ( dev - > clk_ipg ) ) {
dev_err ( & pdev - > dev , " Could not get ipg clock \n " ) ;
return PTR_ERR ( dev - > clk_ipg ) ;
}
dev - > clk_ahb = devm_clk_get ( & pdev - > dev , " ahb " ) ;
if ( IS_ERR ( dev - > clk_ahb ) ) {
dev_err ( & pdev - > dev , " Could not get ahb clock \n " ) ;
return PTR_ERR ( dev - > clk_ahb ) ;
}
/* Allocate HW descriptors */
2015-08-18 09:06:05 +03:00
dev - > hw_desc [ 0 ] = dmam_alloc_coherent ( & pdev - > dev ,
2013-03-01 15:37:53 +04:00
SAHARA_MAX_HW_DESC * sizeof ( struct sahara_hw_desc ) ,
& dev - > hw_phys_desc [ 0 ] , GFP_KERNEL ) ;
if ( ! dev - > hw_desc [ 0 ] ) {
dev_err ( & pdev - > dev , " Could not allocate hw descriptors \n " ) ;
return - ENOMEM ;
}
dev - > hw_desc [ 1 ] = dev - > hw_desc [ 0 ] + 1 ;
dev - > hw_phys_desc [ 1 ] = dev - > hw_phys_desc [ 0 ] +
sizeof ( struct sahara_hw_desc ) ;
/* Allocate space for iv and key */
2015-08-18 09:06:05 +03:00
dev - > key_base = dmam_alloc_coherent ( & pdev - > dev , 2 * AES_KEYSIZE_128 ,
2013-03-01 15:37:53 +04:00
& dev - > key_phys_base , GFP_KERNEL ) ;
if ( ! dev - > key_base ) {
dev_err ( & pdev - > dev , " Could not allocate memory for key \n " ) ;
2015-08-18 09:06:05 +03:00
return - ENOMEM ;
2013-03-01 15:37:53 +04:00
}
dev - > iv_base = dev - > key_base + AES_KEYSIZE_128 ;
dev - > iv_phys_base = dev - > key_phys_base + AES_KEYSIZE_128 ;
2014-12-01 15:26:34 +03:00
/* Allocate space for context: largest digest + message length field */
2015-08-18 09:06:05 +03:00
dev - > context_base = dmam_alloc_coherent ( & pdev - > dev ,
2014-12-01 15:26:34 +03:00
SHA256_DIGEST_SIZE + 4 ,
& dev - > context_phys_base , GFP_KERNEL ) ;
if ( ! dev - > context_base ) {
dev_err ( & pdev - > dev , " Could not allocate memory for MDHA context \n " ) ;
2015-08-18 09:06:05 +03:00
return - ENOMEM ;
2014-12-01 15:26:34 +03:00
}
2013-03-01 15:37:53 +04:00
/* Allocate space for HW links */
2015-08-18 09:06:05 +03:00
dev - > hw_link [ 0 ] = dmam_alloc_coherent ( & pdev - > dev ,
2013-03-01 15:37:53 +04:00
SAHARA_MAX_HW_LINK * sizeof ( struct sahara_hw_link ) ,
& dev - > hw_phys_link [ 0 ] , GFP_KERNEL ) ;
2013-08-20 12:51:41 +04:00
if ( ! dev - > hw_link [ 0 ] ) {
2013-03-01 15:37:53 +04:00
dev_err ( & pdev - > dev , " Could not allocate hw links \n " ) ;
2015-08-18 09:06:05 +03:00
return - ENOMEM ;
2013-03-01 15:37:53 +04:00
}
for ( i = 1 ; i < SAHARA_MAX_HW_LINK ; i + + ) {
dev - > hw_phys_link [ i ] = dev - > hw_phys_link [ i - 1 ] +
sizeof ( struct sahara_hw_link ) ;
dev - > hw_link [ i ] = dev - > hw_link [ i - 1 ] + 1 ;
}
crypto_init_queue ( & dev - > queue , SAHARA_QUEUE_LENGTH ) ;
2014-12-01 15:26:33 +03:00
mutex_init ( & dev - > queue_mutex ) ;
2014-12-01 15:26:31 +03:00
2013-03-01 15:37:53 +04:00
dev_ptr = dev ;
2014-12-01 15:26:33 +03:00
dev - > kthread = kthread_run ( sahara_queue_manage , dev , " sahara_crypto " ) ;
if ( IS_ERR ( dev - > kthread ) ) {
2015-08-18 09:06:05 +03:00
return PTR_ERR ( dev - > kthread ) ;
2014-12-01 15:26:33 +03:00
}
2013-03-01 15:37:53 +04:00
2014-12-01 15:26:33 +03:00
init_completion ( & dev - > dma_completion ) ;
2013-03-01 15:37:53 +04:00
2015-06-20 21:30:22 +03:00
err = clk_prepare_enable ( dev - > clk_ipg ) ;
if ( err )
2015-08-18 09:06:05 +03:00
return err ;
2015-06-20 21:30:22 +03:00
err = clk_prepare_enable ( dev - > clk_ahb ) ;
if ( err )
goto clk_ipg_disable ;
2013-03-01 15:37:53 +04:00
version = sahara_read ( dev , SAHARA_REG_VERSION ) ;
2014-12-01 15:26:32 +03:00
if ( of_device_is_compatible ( pdev - > dev . of_node , " fsl,imx27-sahara " ) ) {
if ( version ! = SAHARA_VERSION_3 )
err = - ENODEV ;
} else if ( of_device_is_compatible ( pdev - > dev . of_node ,
" fsl,imx53-sahara " ) ) {
if ( ( ( version > > 8 ) & 0xff ) ! = SAHARA_VERSION_4 )
err = - ENODEV ;
version = ( version > > 8 ) & 0xff ;
}
if ( err = = - ENODEV ) {
2013-03-01 15:37:53 +04:00
dev_err ( & pdev - > dev , " SAHARA version %d not supported \n " ,
2014-12-01 15:26:32 +03:00
version ) ;
2013-03-01 15:37:53 +04:00
goto err_algs ;
}
2014-12-01 15:26:32 +03:00
dev - > version = version ;
2013-03-01 15:37:53 +04:00
sahara_write ( dev , SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH ,
SAHARA_REG_CMD ) ;
sahara_write ( dev , SAHARA_CONTROL_SET_THROTTLE ( 0 ) |
SAHARA_CONTROL_SET_MAXBURST ( 8 ) |
SAHARA_CONTROL_RNG_AUTORSD |
SAHARA_CONTROL_ENABLE_INT ,
SAHARA_REG_CONTROL ) ;
err = sahara_register_algs ( dev ) ;
if ( err )
goto err_algs ;
dev_info ( & pdev - > dev , " SAHARA version %d initialized \n " , version ) ;
return 0 ;
err_algs :
2014-12-01 15:26:33 +03:00
kthread_stop ( dev - > kthread ) ;
2013-03-01 15:37:53 +04:00
dev_ptr = NULL ;
2015-06-20 21:30:22 +03:00
clk_disable_unprepare ( dev - > clk_ahb ) ;
clk_ipg_disable :
clk_disable_unprepare ( dev - > clk_ipg ) ;
2013-03-01 15:37:53 +04:00
return err ;
}
static int sahara_remove ( struct platform_device * pdev )
{
struct sahara_dev * dev = platform_get_drvdata ( pdev ) ;
2014-12-01 15:26:33 +03:00
kthread_stop ( dev - > kthread ) ;
2013-03-01 15:37:53 +04:00
sahara_unregister_algs ( dev ) ;
clk_disable_unprepare ( dev - > clk_ipg ) ;
clk_disable_unprepare ( dev - > clk_ahb ) ;
dev_ptr = NULL ;
return 0 ;
}
static struct platform_driver sahara_driver = {
. probe = sahara_probe ,
. remove = sahara_remove ,
. driver = {
. name = SAHARA_NAME ,
2013-09-30 07:19:41 +04:00
. of_match_table = sahara_dt_ids ,
2013-03-01 15:37:53 +04:00
} ,
. id_table = sahara_platform_ids ,
} ;
module_platform_driver ( sahara_driver ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " Javier Martin <javier.martin@vista-silicon.com> " ) ;
2014-12-01 15:26:34 +03:00
MODULE_AUTHOR ( " Steffen Trumtrar <s.trumtrar@pengutronix.de> " ) ;
2013-03-01 15:37:53 +04:00
MODULE_DESCRIPTION ( " SAHARA2 HW crypto accelerator " ) ;